summary refs log tree commit diff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-08 07:55:01 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-08 07:55:01 -0800
commitd7fc02c7bae7b1cf69269992cf880a43a350cdaa (patch)
treea43d56fa72913a1cc98a0bbebe054d08581b3a7c /drivers
parentee1262dbc65ce0b6234a915d8432171e8d77f518 (diff)
parent28b4d5cc17c20786848cdc07b7ea237a309776bb (diff)
downloadlinux-d7fc02c7bae7b1cf69269992cf880a43a350cdaa.tar.gz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1815 commits)
  mac80211: fix reorder buffer release
  iwmc3200wifi: Enable wimax core through module parameter
  iwmc3200wifi: Add wifi-wimax coexistence mode as a module parameter
  iwmc3200wifi: Coex table command does not expect a response
  iwmc3200wifi: Update wiwi priority table
  iwlwifi: driver version track kernel version
  iwlwifi: indicate uCode type when fail dump error/event log
  iwl3945: remove duplicated event logging code
  b43: fix two warnings
  ipw2100: fix rebooting hang with driver loaded
  cfg80211: indent regulatory messages with spaces
  iwmc3200wifi: fix NULL pointer dereference in pmkid update
  mac80211: Fix TX status reporting for injected data frames
  ath9k: enable 2GHz band only if the device supports it
  airo: Fix integer overflow warning
  rt2x00: Fix padding bug on L2PAD devices.
  WE: Fix set events not propagated
  b43legacy: avoid PPC fault during resume
  b43: avoid PPC fault during resume
  tcp: fix a timewait refcnt race
  ...

Fix up conflicts due to sysctl cleanups (dead sysctl_check code and
CTL_UNNUMBERED removed) in
	kernel/sysctl_check.c
	net/ipv4/sysctl_net_ipv4.c
	net/ipv6/addrconf.c
	net/sctp/sysctl.c
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/ambassador.c1
-rw-r--r--drivers/atm/fore200e.c4
-rw-r--r--drivers/atm/he.c2
-rw-r--r--drivers/atm/solos-pci.c32
-rw-r--r--drivers/bluetooth/btmrvl_debugfs.c1
-rw-r--r--drivers/bluetooth/btmrvl_drv.h1
-rw-r--r--drivers/bluetooth/btmrvl_main.c55
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c4
-rw-r--r--drivers/bluetooth/hci_vhci.c20
-rw-r--r--drivers/ieee802154/fakehard.c59
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c12
-rw-r--r--drivers/isdn/gigaset/Kconfig25
-rw-r--r--drivers/isdn/gigaset/Makefile5
-rw-r--r--drivers/isdn/gigaset/asyncdata.c662
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c76
-rw-r--r--drivers/isdn/gigaset/capi.c2292
-rw-r--r--drivers/isdn/gigaset/common.c139
-rw-r--r--drivers/isdn/gigaset/dummyll.c68
-rw-r--r--drivers/isdn/gigaset/ev-layer.c578
-rw-r--r--drivers/isdn/gigaset/gigaset.h176
-rw-r--r--drivers/isdn/gigaset/i4l.c563
-rw-r--r--drivers/isdn/gigaset/interface.c41
-rw-r--r--drivers/isdn/gigaset/isocdata.c186
-rw-r--r--drivers/isdn/gigaset/proc.c2
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c56
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c71
-rw-r--r--drivers/isdn/hardware/mISDN/speedfax.c1
-rw-r--r--drivers/isdn/mISDN/socket.c5
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/iwmc3200top/Kconfig20
-rw-r--r--drivers/misc/iwmc3200top/Makefile29
-rw-r--r--drivers/misc/iwmc3200top/debugfs.c133
-rw-r--r--drivers/misc/iwmc3200top/debugfs.h58
-rw-r--r--drivers/misc/iwmc3200top/fw-download.c355
-rw-r--r--drivers/misc/iwmc3200top/fw-msg.h113
-rw-r--r--drivers/misc/iwmc3200top/iwmc3200top.h209
-rw-r--r--drivers/misc/iwmc3200top/log.c347
-rw-r--r--drivers/misc/iwmc3200top/log.h158
-rw-r--r--drivers/misc/iwmc3200top/main.c678
-rw-r--r--drivers/net/3c501.c10
-rw-r--r--drivers/net/3c503.c15
-rw-r--r--drivers/net/3c505.c2
-rw-r--r--drivers/net/3c507.c6
-rw-r--r--drivers/net/3c509.c12
-rw-r--r--drivers/net/3c515.c15
-rw-r--r--drivers/net/3c523.c2
-rw-r--r--drivers/net/3c527.c6
-rw-r--r--drivers/net/3c59x.c7
-rw-r--r--drivers/net/8139cp.c12
-rw-r--r--drivers/net/8139too.c15
-rw-r--r--drivers/net/Kconfig4
-rw-r--r--drivers/net/appletalk/cops.c2
-rw-r--r--drivers/net/appletalk/ipddp.c12
-rw-r--r--drivers/net/appletalk/ltpc.c2
-rw-r--r--drivers/net/arcnet/arc-rimi.c8
-rw-r--r--drivers/net/arcnet/arcnet.c14
-rw-r--r--drivers/net/arcnet/com20020.c2
-rw-r--r--drivers/net/arcnet/com90io.c2
-rw-r--r--drivers/net/arcnet/com90xx.c2
-rw-r--r--drivers/net/arm/ks8695net.c131
-rw-r--r--drivers/net/arm/w90p910_ether.c4
-rw-r--r--drivers/net/at1700.c18
-rw-r--r--drivers/net/atarilance.c4
-rw-r--r--drivers/net/atl1c/atl1c.h22
-rw-r--r--drivers/net/atl1c/atl1c_main.c88
-rw-r--r--drivers/net/atl1e/atl1e_ethtool.c17
-rw-r--r--drivers/net/atl1e/atl1e_main.c42
-rw-r--r--drivers/net/atlx/atl1.c21
-rw-r--r--drivers/net/atlx/atl2.c5
-rw-r--r--drivers/net/atp.c6
-rw-r--r--drivers/net/au1000_eth.c2
-rw-r--r--drivers/net/bcm63xx_enet.c5
-rw-r--r--drivers/net/benet/be.h24
-rw-r--r--drivers/net/benet/be_cmds.c496
-rw-r--r--drivers/net/benet/be_cmds.h152
-rw-r--r--drivers/net/benet/be_ethtool.c204
-rw-r--r--drivers/net/benet/be_hw.h9
-rw-r--r--drivers/net/benet/be_main.c257
-rw-r--r--drivers/net/bfin_mac.c12
-rw-r--r--drivers/net/bmac.c4
-rw-r--r--drivers/net/bnx2.c217
-rw-r--r--drivers/net/bnx2.h3
-rw-r--r--drivers/net/bnx2x.h95
-rw-r--r--drivers/net/bnx2x_hsi.h21
-rw-r--r--drivers/net/bnx2x_link.c321
-rw-r--r--drivers/net/bnx2x_link.h3
-rw-r--r--drivers/net/bnx2x_main.c1443
-rw-r--r--drivers/net/bnx2x_reg.h23
-rw-r--r--drivers/net/bonding/bond_3ad.c123
-rw-r--r--drivers/net/bonding/bond_alb.c3
-rw-r--r--drivers/net/bonding/bond_ipv6.c7
-rw-r--r--drivers/net/bonding/bond_main.c316
-rw-r--r--drivers/net/bonding/bond_sysfs.c92
-rw-r--r--drivers/net/bonding/bonding.h35
-rw-r--r--drivers/net/can/Kconfig15
-rw-r--r--drivers/net/can/Makefile3
-rw-r--r--drivers/net/can/at91_can.c34
-rw-r--r--drivers/net/can/dev.c76
-rw-r--r--drivers/net/can/mcp251x.c1166
-rw-r--r--drivers/net/can/mscan/Kconfig23
-rw-r--r--drivers/net/can/mscan/Makefile5
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c259
-rw-r--r--drivers/net/can/mscan/mscan.c668
-rw-r--r--drivers/net/can/mscan/mscan.h296
-rw-r--r--drivers/net/can/sja1000/sja1000.c19
-rw-r--r--drivers/net/can/sja1000/sja1000.h2
-rw-r--r--drivers/net/can/ti_hecc.c993
-rw-r--r--drivers/net/can/usb/ems_usb.c28
-rw-r--r--drivers/net/cnic.c1875
-rw-r--r--drivers/net/cnic.h64
-rw-r--r--drivers/net/cnic_defs.h1917
-rw-r--r--drivers/net/cnic_if.h14
-rw-r--r--drivers/net/cpmac.c6
-rw-r--r--drivers/net/cs89x0.c2
-rw-r--r--drivers/net/cxgb3/adapter.h16
-rw-r--r--drivers/net/cxgb3/common.h8
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c73
-rw-r--r--drivers/net/cxgb3/sge.c31
-rw-r--r--drivers/net/davinci_emac.c30
-rw-r--r--drivers/net/declance.c4
-rw-r--r--drivers/net/depca.c2
-rw-r--r--drivers/net/dl2k.c24
-rw-r--r--drivers/net/dm9000.c149
-rw-r--r--drivers/net/dm9000.h7
-rw-r--r--drivers/net/e100.c25
-rw-r--r--drivers/net/e1000/e1000.h2
-rw-r--r--drivers/net/e1000/e1000_ethtool.c39
-rw-r--r--drivers/net/e1000/e1000_main.c130
-rw-r--r--drivers/net/e1000e/82571.c323
-rw-r--r--drivers/net/e1000e/defines.h2
-rw-r--r--drivers/net/e1000e/e1000.h47
-rw-r--r--drivers/net/e1000e/es2lan.c213
-rw-r--r--drivers/net/e1000e/ethtool.c81
-rw-r--r--drivers/net/e1000e/hw.h52
-rw-r--r--drivers/net/e1000e/ich8lan.c530
-rw-r--r--drivers/net/e1000e/lib.c261
-rw-r--r--drivers/net/e1000e/netdev.c470
-rw-r--r--drivers/net/e1000e/param.c2
-rw-r--r--drivers/net/e1000e/phy.c516
-rw-r--r--drivers/net/e2100.c6
-rw-r--r--drivers/net/eepro.c2
-rw-r--r--drivers/net/eexpress.c2
-rw-r--r--drivers/net/ehea/ehea_main.c29
-rw-r--r--drivers/net/ehea/ehea_qmr.c4
-rw-r--r--drivers/net/enic/enic_main.c15
-rw-r--r--drivers/net/epic100.c10
-rw-r--r--drivers/net/ethoc.c6
-rw-r--r--drivers/net/fealnx.c14
-rw-r--r--drivers/net/fec_mpc52xx.c6
-rw-r--r--drivers/net/forcedeth.c6
-rw-r--r--drivers/net/fsl_pq_mdio.c67
-rw-r--r--drivers/net/fsl_pq_mdio.h11
-rw-r--r--drivers/net/gianfar.c1826
-rw-r--r--drivers/net/gianfar.h412
-rw-r--r--drivers/net/gianfar_ethtool.c376
-rw-r--r--drivers/net/gianfar_sysfs.c81
-rw-r--r--drivers/net/hamachi.c24
-rw-r--r--drivers/net/hamradio/6pack.c21
-rw-r--r--drivers/net/hamradio/baycom_epp.c6
-rw-r--r--drivers/net/hamradio/bpqether.c9
-rw-r--r--drivers/net/hamradio/dmascc.c8
-rw-r--r--drivers/net/hamradio/mkiss.c21
-rw-r--r--drivers/net/hp-plus.c4
-rw-r--r--drivers/net/hp100.c4
-rw-r--r--drivers/net/ibm_newemac/core.c34
-rw-r--r--drivers/net/ibmveth.c2
-rw-r--r--drivers/net/ifb.c10
-rw-r--r--drivers/net/igb/e1000_82575.c572
-rw-r--r--drivers/net/igb/e1000_82575.h32
-rw-r--r--drivers/net/igb/e1000_defines.h50
-rw-r--r--drivers/net/igb/e1000_hw.h22
-rw-r--r--drivers/net/igb/e1000_mac.c100
-rw-r--r--drivers/net/igb/e1000_mbx.c82
-rw-r--r--drivers/net/igb/e1000_mbx.h10
-rw-r--r--drivers/net/igb/e1000_nvm.c36
-rw-r--r--drivers/net/igb/e1000_phy.c453
-rw-r--r--drivers/net/igb/e1000_phy.h37
-rw-r--r--drivers/net/igb/e1000_regs.h80
-rw-r--r--drivers/net/igb/igb.h149
-rw-r--r--drivers/net/igb/igb_ethtool.c747
-rw-r--r--drivers/net/igb/igb_main.c3450
-rw-r--r--drivers/net/igbvf/ethtool.c25
-rw-r--r--drivers/net/igbvf/igbvf.h1
-rw-r--r--drivers/net/igbvf/netdev.c83
-rw-r--r--drivers/net/ipg.c9
-rw-r--r--drivers/net/irda/au1k_ir.c4
-rw-r--r--drivers/net/irda/irda-usb.c10
-rw-r--r--drivers/net/irda/stir4200.c12
-rw-r--r--drivers/net/irda/via-ircc.c16
-rw-r--r--drivers/net/irda/vlsi_ir.c16
-rw-r--r--drivers/net/isa-skeleton.c10
-rw-r--r--drivers/net/iseries_veth.c42
-rw-r--r--drivers/net/ixgb/ixgb.h2
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c69
-rw-r--r--drivers/net/ixgb/ixgb_main.c118
-rw-r--r--drivers/net/ixgbe/ixgbe.h33
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c178
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c37
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c140
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c76
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c251
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h26
-rw-r--r--drivers/net/ixp2000/ixpdev.c3
-rw-r--r--drivers/net/jazzsonic.c6
-rw-r--r--drivers/net/jme.c22
-rw-r--r--drivers/net/korina.c13
-rw-r--r--drivers/net/ks8842.c5
-rw-r--r--drivers/net/ks8851.c4
-rw-r--r--drivers/net/lance.c14
-rw-r--r--drivers/net/lib82596.c13
-rw-r--r--drivers/net/lib8390.c12
-rw-r--r--drivers/net/ll_temac_main.c4
-rw-r--r--drivers/net/loopback.c10
-rw-r--r--drivers/net/lp486e.c2
-rw-r--r--drivers/net/mac89x0.c6
-rw-r--r--drivers/net/mace.c4
-rw-r--r--drivers/net/macsonic.c4
-rw-r--r--drivers/net/macvlan.c278
-rw-r--r--drivers/net/mdio.c12
-rw-r--r--drivers/net/mipsnet.c2
-rw-r--r--drivers/net/mv643xx_eth.c2
-rw-r--r--drivers/net/myri10ge/myri10ge.c25
-rw-r--r--drivers/net/myri_sbus.c2
-rw-r--r--drivers/net/natsemi.c30
-rw-r--r--drivers/net/netx-eth.c5
-rw-r--r--drivers/net/netxen/netxen_nic.h78
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c38
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h76
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c995
-rw-r--r--drivers/net/netxen/netxen_nic_init.c361
-rw-r--r--drivers/net/netxen/netxen_nic_main.c334
-rw-r--r--drivers/net/ni5010.c2
-rw-r--r--drivers/net/ni52.c6
-rw-r--r--drivers/net/ni65.c2
-rw-r--r--drivers/net/niu.c15
-rw-r--r--drivers/net/ns83820.c24
-rw-r--r--drivers/net/pasemi_mac.c4
-rw-r--r--drivers/net/pasemi_mac_ethtool.c14
-rw-r--r--drivers/net/pci-skeleton.c12
-rw-r--r--drivers/net/pcmcia/axnet_cs.c4
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c22
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c2
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c11
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c8
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c6
-rw-r--r--drivers/net/pcnet32.c22
-rw-r--r--drivers/net/phy/broadcom.c208
-rw-r--r--drivers/net/phy/phy.c18
-rw-r--r--drivers/net/plip.c13
-rw-r--r--drivers/net/ppp_async.c19
-rw-r--r--drivers/net/ppp_deflate.c44
-rw-r--r--drivers/net/ppp_generic.c98
-rw-r--r--drivers/net/ppp_mppe.c8
-rw-r--r--drivers/net/ppp_synctty.c9
-rw-r--r--drivers/net/pppoe.c63
-rw-r--r--drivers/net/pppol2tp.c62
-rw-r--r--drivers/net/pppox.c5
-rw-r--r--drivers/net/ps3_gelic_net.c98
-rw-r--r--drivers/net/ps3_gelic_net.h21
-rw-r--r--drivers/net/qla3xxx.c8
-rw-r--r--drivers/net/qlge/qlge.h229
-rw-r--r--drivers/net/qlge/qlge_dbg.c180
-rw-r--r--drivers/net/qlge/qlge_ethtool.c290
-rw-r--r--drivers/net/qlge/qlge_main.c490
-rw-r--r--drivers/net/qlge/qlge_mpi.c210
-rw-r--r--drivers/net/r6040.c5
-rw-r--r--drivers/net/r8169.c71
-rw-r--r--drivers/net/s6gmac.c8
-rw-r--r--drivers/net/sb1000.c2
-rw-r--r--drivers/net/sb1250-mac.c3
-rw-r--r--drivers/net/sc92031.c18
-rw-r--r--drivers/net/seeq8005.c4
-rw-r--r--drivers/net/sfc/Kconfig13
-rw-r--r--drivers/net/sfc/Makefile7
-rw-r--r--drivers/net/sfc/bitfield.h17
-rw-r--r--drivers/net/sfc/boards.c328
-rw-r--r--drivers/net/sfc/boards.h28
-rw-r--r--drivers/net/sfc/efx.c793
-rw-r--r--drivers/net/sfc/efx.h65
-rw-r--r--drivers/net/sfc/enum.h116
-rw-r--r--drivers/net/sfc/ethtool.c222
-rw-r--r--drivers/net/sfc/ethtool.h27
-rw-r--r--drivers/net/sfc/falcon.c2829
-rw-r--r--drivers/net/sfc/falcon.h145
-rw-r--r--drivers/net/sfc/falcon_boards.c752
-rw-r--r--drivers/net/sfc/falcon_gmac.c123
-rw-r--r--drivers/net/sfc/falcon_hwdefs.h1333
-rw-r--r--drivers/net/sfc/falcon_io.h258
-rw-r--r--drivers/net/sfc/falcon_xmac.c278
-rw-r--r--drivers/net/sfc/gmii.h60
-rw-r--r--drivers/net/sfc/io.h256
-rw-r--r--drivers/net/sfc/mac.h6
-rw-r--r--drivers/net/sfc/mcdi.c1112
-rw-r--r--drivers/net/sfc/mcdi.h130
-rw-r--r--drivers/net/sfc/mcdi_mac.c152
-rw-r--r--drivers/net/sfc/mcdi_pcol.h1578
-rw-r--r--drivers/net/sfc/mcdi_phy.c597
-rw-r--r--drivers/net/sfc/mdio_10g.c144
-rw-r--r--drivers/net/sfc/mdio_10g.h6
-rw-r--r--drivers/net/sfc/mtd.c559
-rw-r--r--drivers/net/sfc/net_driver.h302
-rw-r--r--drivers/net/sfc/nic.c1583
-rw-r--r--drivers/net/sfc/nic.h261
-rw-r--r--drivers/net/sfc/phy.h27
-rw-r--r--drivers/net/sfc/qt202x_phy.c (renamed from drivers/net/sfc/xfp_phy.c)132
-rw-r--r--drivers/net/sfc/regs.h3168
-rw-r--r--drivers/net/sfc/rx.c82
-rw-r--r--drivers/net/sfc/rx.h26
-rw-r--r--drivers/net/sfc/selftest.c146
-rw-r--r--drivers/net/sfc/sfe4001.c435
-rw-r--r--drivers/net/sfc/siena.c604
-rw-r--r--drivers/net/sfc/spi.h18
-rw-r--r--drivers/net/sfc/tenxpress.c223
-rw-r--r--drivers/net/sfc/tx.c184
-rw-r--r--drivers/net/sfc/tx.h25
-rw-r--r--drivers/net/sfc/workarounds.h20
-rw-r--r--drivers/net/sgiseeq.c7
-rw-r--r--drivers/net/sh_eth.c56
-rw-r--r--drivers/net/sh_eth.h1
-rw-r--r--drivers/net/sis190.c3
-rw-r--r--drivers/net/sis900.c2
-rw-r--r--drivers/net/skge.c32
-rw-r--r--drivers/net/sky2.c157
-rw-r--r--drivers/net/sky2.h185
-rw-r--r--drivers/net/slip.c33
-rw-r--r--drivers/net/smc-mca.c6
-rw-r--r--drivers/net/smc911x.c2
-rw-r--r--drivers/net/smc9194.c2
-rw-r--r--drivers/net/smc91x.c20
-rw-r--r--drivers/net/smc91x.h10
-rw-r--r--drivers/net/smsc911x.c7
-rw-r--r--drivers/net/smsc9420.c2
-rw-r--r--drivers/net/spider_net.c1
-rw-r--r--drivers/net/starfire.c10
-rw-r--r--drivers/net/stmmac/stmmac_main.c5
-rw-r--r--drivers/net/sun3_82586.c2
-rw-r--r--drivers/net/sunbmac.c2
-rw-r--r--drivers/net/sundance.c18
-rw-r--r--drivers/net/sungem.c4
-rw-r--r--drivers/net/sungem.h4
-rw-r--r--drivers/net/sunhme.c27
-rw-r--r--drivers/net/sunlance.c2
-rw-r--r--drivers/net/sunqe.c2
-rw-r--r--drivers/net/tc35815.c292
-rw-r--r--drivers/net/tehuti.c45
-rw-r--r--drivers/net/tehuti.h2
-rw-r--r--drivers/net/tg3.c1254
-rw-r--r--drivers/net/tg3.h99
-rw-r--r--drivers/net/tlan.c11
-rw-r--r--drivers/net/tokenring/3c359.c3
-rw-r--r--drivers/net/tokenring/ibmtr.c2
-rw-r--r--drivers/net/tokenring/lanstreamer.c10
-rw-r--r--drivers/net/tokenring/olympic.c4
-rw-r--r--drivers/net/tokenring/smctr.c86
-rw-r--r--drivers/net/tokenring/tms380tr.c26
-rw-r--r--drivers/net/tsi108_eth.c10
-rw-r--r--drivers/net/tulip/21142.c8
-rw-r--r--drivers/net/tulip/de2104x.c6
-rw-r--r--drivers/net/tulip/dmfe.c2
-rw-r--r--drivers/net/tulip/eeprom.c8
-rw-r--r--drivers/net/tulip/interrupt.c16
-rw-r--r--drivers/net/tulip/media.c4
-rw-r--r--drivers/net/tulip/pnic2.c6
-rw-r--r--drivers/net/tulip/tulip_core.c26
-rw-r--r--drivers/net/tulip/uli526x.c2
-rw-r--r--drivers/net/tulip/winbond-840.c10
-rw-r--r--drivers/net/tulip/xircom_cb.c2
-rw-r--r--drivers/net/tun.c58
-rw-r--r--drivers/net/typhoon.c6
-rw-r--r--drivers/net/ucc_geth.c24
-rw-r--r--drivers/net/usb/asix.c24
-rw-r--r--drivers/net/usb/catc.c4
-rw-r--r--drivers/net/usb/cdc-phonet.c8
-rw-r--r--drivers/net/usb/cdc_eem.c4
-rw-r--r--drivers/net/usb/cdc_ether.c79
-rw-r--r--drivers/net/usb/dm9601.c2
-rw-r--r--drivers/net/usb/hso.c34
-rw-r--r--drivers/net/usb/kaweth.c11
-rw-r--r--drivers/net/usb/mcs7830.c4
-rw-r--r--drivers/net/usb/rndis_host.c10
-rw-r--r--drivers/net/usb/usbnet.c218
-rw-r--r--drivers/net/usb/zaurus.c4
-rw-r--r--drivers/net/veth.c38
-rw-r--r--drivers/net/via-rhine.c20
-rw-r--r--drivers/net/via-velocity.c405
-rw-r--r--drivers/net/via-velocity.h15
-rw-r--r--drivers/net/virtio_net.c17
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h246
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c363
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c10
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h17
-rw-r--r--drivers/net/vxge/vxge-config.c300
-rw-r--r--drivers/net/vxge/vxge-config.h2
-rw-r--r--drivers/net/vxge/vxge-main.c115
-rw-r--r--drivers/net/vxge/vxge-main.h1
-rw-r--r--drivers/net/vxge/vxge-reg.h4
-rw-r--r--drivers/net/vxge/vxge-traffic.c24
-rw-r--r--drivers/net/vxge/vxge-traffic.h2
-rw-r--r--drivers/net/vxge/vxge-version.h4
-rw-r--r--drivers/net/wan/cosa.c20
-rw-r--r--drivers/net/wan/dlci.c14
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wan/farsync.c12
-rw-r--r--drivers/net/wan/hdlc.c4
-rw-r--r--drivers/net/wan/hdlc_fr.c6
-rw-r--r--drivers/net/wan/hostess_sv11.c2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wan/n2.c2
-rw-r--r--drivers/net/wan/pc300_drv.c17
-rw-r--r--drivers/net/wan/sbni.c28
-rw-r--r--drivers/net/wan/sdla.c2
-rw-r--r--drivers/net/wan/sealevel.c13
-rw-r--r--drivers/net/wan/x25_asy.c23
-rw-r--r--drivers/net/wimax/i2400m/Kconfig8
-rw-r--r--drivers/net/wimax/i2400m/control.c16
-rw-r--r--drivers/net/wimax/i2400m/debugfs.c2
-rw-r--r--drivers/net/wimax/i2400m/driver.c500
-rw-r--r--drivers/net/wimax/i2400m/fw.c886
-rw-r--r--drivers/net/wimax/i2400m/i2400m-sdio.h16
-rw-r--r--drivers/net/wimax/i2400m/i2400m-usb.h16
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h209
-rw-r--r--drivers/net/wimax/i2400m/netdev.c127
-rw-r--r--drivers/net/wimax/i2400m/rx.c170
-rw-r--r--drivers/net/wimax/i2400m/sdio-fw.c11
-rw-r--r--drivers/net/wimax/i2400m/sdio-rx.c42
-rw-r--r--drivers/net/wimax/i2400m/sdio-tx.c5
-rw-r--r--drivers/net/wimax/i2400m/sdio.c205
-rw-r--r--drivers/net/wimax/i2400m/tx.c20
-rw-r--r--drivers/net/wimax/i2400m/usb-fw.c37
-rw-r--r--drivers/net/wimax/i2400m/usb-notif.c35
-rw-r--r--drivers/net/wimax/i2400m/usb-rx.c60
-rw-r--r--drivers/net/wimax/i2400m/usb-tx.c61
-rw-r--r--drivers/net/wimax/i2400m/usb.c189
-rw-r--r--drivers/net/wireless/Kconfig212
-rw-r--r--drivers/net/wireless/Makefile10
-rw-r--r--drivers/net/wireless/adm8211.c2
-rw-r--r--drivers/net/wireless/airo.c5
-rw-r--r--drivers/net/wireless/at76c50x-usb.c55
-rw-r--r--drivers/net/wireless/ath/Kconfig9
-rw-r--r--drivers/net/wireless/ath/Makefile9
-rw-r--r--drivers/net/wireless/ath/ar9170/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ar9170/ar9170.h6
-rw-r--r--drivers/net/wireless/ath/ar9170/cmd.c3
-rw-r--r--drivers/net/wireless/ath/ar9170/cmd.h1
-rw-r--r--drivers/net/wireless/ath/ar9170/hw.h6
-rw-r--r--drivers/net/wireless/ath/ar9170/mac.c15
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c50
-rw-r--r--drivers/net/wireless/ath/ar9170/phy.c99
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c16
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.h2
-rw-r--r--drivers/net/wireless/ath/ath.h69
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h53
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c33
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c140
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h14
-rw-r--r--drivers/net/wireless/ath/ath5k/initvals.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c193
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c191
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h19
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig18
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile32
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c141
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h215
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c136
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c383
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h64
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c421
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c299
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h127
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c72
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h47
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c94
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c97
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c183
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1344
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h98
-rw-r--r--drivers/net/wireless/ath/ath9k/initvals.h101
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c200
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h26
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c1413
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c47
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.c1034
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.h42
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c535
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h25
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c366
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h27
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c110
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c354
-rw-r--r--drivers/net/wireless/ath/debug.c32
-rw-r--r--drivers/net/wireless/ath/debug.h77
-rw-r--r--drivers/net/wireless/ath/hw.c126
-rw-r--r--drivers/net/wireless/ath/reg.h27
-rw-r--r--drivers/net/wireless/ath/regd.c5
-rw-r--r--drivers/net/wireless/ath/regd.h8
-rw-r--r--drivers/net/wireless/ath/regd_common.h32
-rw-r--r--drivers/net/wireless/atmel.c16
-rw-r--r--drivers/net/wireless/b43/Kconfig2
-rw-r--r--drivers/net/wireless/b43/b43.h18
-rw-r--r--drivers/net/wireless/b43/dma.c312
-rw-r--r--drivers/net/wireless/b43/dma.h13
-rw-r--r--drivers/net/wireless/b43/leds.c1
-rw-r--r--drivers/net/wireless/b43/main.c6
-rw-r--r--drivers/net/wireless/b43/phy_lp.c777
-rw-r--r--drivers/net/wireless/b43/phy_lp.h11
-rw-r--r--drivers/net/wireless/b43/pio.c85
-rw-r--r--drivers/net/wireless/b43/rfkill.c10
-rw-r--r--drivers/net/wireless/b43/xmit.c8
-rw-r--r--drivers/net/wireless/b43/xmit.h19
-rw-r--r--drivers/net/wireless/b43legacy/Kconfig2
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h2
-rw-r--r--drivers/net/wireless/b43legacy/dma.c17
-rw-r--r--drivers/net/wireless/b43legacy/main.c6
-rw-r--r--drivers/net/wireless/b43legacy/rfkill.c7
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c1
-rw-r--r--drivers/net/wireless/hostap/Kconfig3
-rw-r--r--drivers/net/wireless/i82593.h229
-rw-r--r--drivers/net/wireless/ipw2x00/Kconfig11
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c147
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c159
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h8
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_module.c71
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig30
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c51
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.c371
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.h22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c102
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c287
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h56
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c255
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c415
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c305
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-led.c85
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-led.h32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c584
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h101
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c776
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c78
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h297
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c691
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h86
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h189
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c860
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h150
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h197
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c133
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c29
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h56
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c324
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h46
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c232
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c218
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c40
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-spectrum.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c160
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c155
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c444
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Kconfig3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c82
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c108
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.h93
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debugfs.c23
-rw-r--r--drivers/net/wireless/iwmc3200wifi/eeprom.c50
-rw-r--r--drivers/net/wireless/iwmc3200wifi/eeprom.h29
-rw-r--r--drivers/net/wireless/iwmc3200wifi/fw.c9
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h17
-rw-r--r--drivers/net/wireless/iwmc3200wifi/lmac.h8
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c92
-rw-r--r--drivers/net/wireless/iwmc3200wifi/netdev.c9
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c150
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.c13
-rw-r--r--drivers/net/wireless/iwmc3200wifi/tx.c66
-rw-r--r--drivers/net/wireless/iwmc3200wifi/umac.h41
-rw-r--r--drivers/net/wireless/libertas/11d.c696
-rw-r--r--drivers/net/wireless/libertas/11d.h105
-rw-r--r--drivers/net/wireless/libertas/Kconfig39
-rw-r--r--drivers/net/wireless/libertas/Makefile14
-rw-r--r--drivers/net/wireless/libertas/README26
-rw-r--r--drivers/net/wireless/libertas/assoc.c445
-rw-r--r--drivers/net/wireless/libertas/assoc.h141
-rw-r--r--drivers/net/wireless/libertas/cfg.c198
-rw-r--r--drivers/net/wireless/libertas/cfg.h16
-rw-r--r--drivers/net/wireless/libertas/cmd.c695
-rw-r--r--drivers/net/wireless/libertas/cmd.h127
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c116
-rw-r--r--drivers/net/wireless/libertas/debugfs.c27
-rw-r--r--drivers/net/wireless/libertas/decl.h65
-rw-r--r--drivers/net/wireless/libertas/defs.h3
-rw-r--r--drivers/net/wireless/libertas/dev.h431
-rw-r--r--drivers/net/wireless/libertas/ethtool.c84
-rw-r--r--drivers/net/wireless/libertas/host.h959
-rw-r--r--drivers/net/wireless/libertas/hostcmd.h800
-rw-r--r--drivers/net/wireless/libertas/if_cs.c4
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c62
-rw-r--r--drivers/net/wireless/libertas/if_sdio.h3
-rw-r--r--drivers/net/wireless/libertas/if_spi.c143
-rw-r--r--drivers/net/wireless/libertas/if_usb.c5
-rw-r--r--drivers/net/wireless/libertas/main.c720
-rw-r--r--drivers/net/wireless/libertas/mesh.c1141
-rw-r--r--drivers/net/wireless/libertas/mesh.h78
-rw-r--r--drivers/net/wireless/libertas/persistcfg.c453
-rw-r--r--drivers/net/wireless/libertas/rx.c13
-rw-r--r--drivers/net/wireless/libertas/scan.c250
-rw-r--r--drivers/net/wireless/libertas/scan.h30
-rw-r--r--drivers/net/wireless/libertas/tx.c9
-rw-r--r--drivers/net/wireless/libertas/types.h4
-rw-r--r--drivers/net/wireless/libertas/wext.c196
-rw-r--r--drivers/net/wireless/libertas/wext.h9
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c85
-rw-r--r--drivers/net/wireless/mwl8k.c1265
-rw-r--r--drivers/net/wireless/orinoco/Kconfig6
-rw-r--r--drivers/net/wireless/orinoco/fw.c6
-rw-r--r--drivers/net/wireless/orinoco/hw.c33
-rw-r--r--drivers/net/wireless/orinoco/hw.h3
-rw-r--r--drivers/net/wireless/orinoco/main.c34
-rw-r--r--drivers/net/wireless/orinoco/orinoco.h1
-rw-r--r--drivers/net/wireless/p54/Kconfig2
-rw-r--r--drivers/net/wireless/p54/eeprom.c31
-rw-r--r--drivers/net/wireless/p54/main.c2
-rw-r--r--drivers/net/wireless/p54/p54pci.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c3
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c2
-rw-r--r--drivers/net/wireless/ray_cs.c36
-rw-r--r--drivers/net/wireless/rndis_wlan.c13
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig46
-rw-r--r--drivers/net/wireless/rt2x00/Makefile3
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c31
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c32
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c166
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h1852
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c2284
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h151
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c1322
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.h159
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c2286
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.h1864
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h73
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00crypto.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c19
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dump.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00firmware.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00ht.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00leds.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00leds.h6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h20
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c90
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h26
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c15
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h7
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00reg.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00soc.c165
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00soc.h52
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h21
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c65
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c29
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.h2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187.h1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c13
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig3
-rw-r--r--drivers/net/wireless/wl12xx/wl1251.h5
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_acx.c68
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_acx.h36
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_boot.c55
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_event.c15
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_init.c8
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_main.c171
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_netlink.h30
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_ps.c44
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_ps.h1
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_reg.h6
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_rx.c6
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_spi.c4
-rw-r--r--drivers/net/wireless/wl12xx/wl1271.h95
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.c369
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.h586
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.c218
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.h22
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.c503
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.h190
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_conf.h919
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.c121
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.h37
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_init.c141
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_init.h83
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_main.c979
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ps.c68
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ps.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_reg.h47
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.c88
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.h4
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.c311
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.h65
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.c76
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.h18
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_80211.h4
-rw-r--r--drivers/net/wireless/wl3501_cs.c2
-rw-r--r--drivers/net/wireless/zd1201.c3
-rw-r--r--drivers/net/wireless/zd1211rw/Kconfig2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c4
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h18
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c202
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h25
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c11
-rw-r--r--drivers/net/xilinx_emaclite.c2
-rw-r--r--drivers/net/xtsonic.c2
-rw-r--r--drivers/net/yellowfin.c10
-rw-r--r--drivers/net/znet.c9
-rw-r--r--drivers/parisc/led.c7
-rw-r--r--drivers/s390/net/Makefile6
-rw-r--r--drivers/s390/net/claw.c82
-rw-r--r--drivers/s390/net/claw.h12
-rw-r--r--drivers/s390/net/ctcm_fsms.c1
-rw-r--r--drivers/s390/net/ctcm_fsms.h1
-rw-r--r--drivers/s390/net/ctcm_main.c168
-rw-r--r--drivers/s390/net/ctcm_main.h20
-rw-r--r--drivers/s390/net/ctcm_mpc.c1
-rw-r--r--drivers/s390/net/ctcm_sysfs.c11
-rw-r--r--drivers/s390/net/cu3088.c148
-rw-r--r--drivers/s390/net/cu3088.h41
-rw-r--r--drivers/s390/net/fsm.c1
-rw-r--r--drivers/s390/net/fsm.h2
-rw-r--r--drivers/s390/net/lcs.c115
-rw-r--r--drivers/s390/net/lcs.h18
-rw-r--r--drivers/s390/net/netiucv.c4
-rw-r--r--drivers/s390/net/qeth_core.h8
-rw-r--r--drivers/s390/net/qeth_core_main.c225
-rw-r--r--drivers/s390/net/qeth_core_mpc.h45
-rw-r--r--drivers/s390/net/qeth_core_sys.c83
-rw-r--r--drivers/s390/net/qeth_l2_main.c33
-rw-r--r--drivers/s390/net/qeth_l3.h2
-rw-r--r--drivers/s390/net/qeth_l3_main.c144
-rw-r--r--drivers/s390/net/qeth_l3_sys.c67
-rw-r--r--drivers/ssb/driver_pcicore.c4
-rw-r--r--drivers/ssb/main.c126
-rw-r--r--drivers/ssb/scan.c2
-rw-r--r--drivers/ssb/sprom.c30
-rw-r--r--drivers/ssb/ssb_private.h12
-rw-r--r--drivers/staging/Kconfig8
-rw-r--r--drivers/staging/Makefile5
-rw-r--r--drivers/staging/arlan/Kconfig15
-rw-r--r--drivers/staging/arlan/Makefile3
-rw-r--r--drivers/staging/arlan/TODO7
-rw-r--r--drivers/staging/arlan/arlan-main.c (renamed from drivers/net/wireless/arlan-main.c)0
-rw-r--r--drivers/staging/arlan/arlan-proc.c (renamed from drivers/net/wireless/arlan-proc.c)0
-rw-r--r--drivers/staging/arlan/arlan.h (renamed from drivers/net/wireless/arlan.h)0
-rw-r--r--drivers/staging/netwave/Kconfig11
-rw-r--r--drivers/staging/netwave/Makefile1
-rw-r--r--drivers/staging/netwave/TODO7
-rw-r--r--drivers/staging/netwave/netwave_cs.c (renamed from drivers/net/wireless/netwave_cs.c)0
-rw-r--r--drivers/staging/rtl8187se/Kconfig3
-rw-r--r--drivers/staging/rtl8192e/Kconfig3
-rw-r--r--drivers/staging/strip/Kconfig22
-rw-r--r--drivers/staging/strip/Makefile1
-rw-r--r--drivers/staging/strip/TODO7
-rw-r--r--drivers/staging/strip/strip.c (renamed from drivers/net/wireless/strip.c)17
-rw-r--r--drivers/staging/vt6655/Kconfig4
-rw-r--r--drivers/staging/vt6656/Kconfig4
-rw-r--r--drivers/staging/wavelan/Kconfig38
-rw-r--r--drivers/staging/wavelan/Makefile2
-rw-r--r--drivers/staging/wavelan/TODO7
-rw-r--r--drivers/staging/wavelan/i82586.h (renamed from drivers/net/wireless/i82586.h)0
-rw-r--r--drivers/staging/wavelan/wavelan.c (renamed from drivers/net/wireless/wavelan.c)0
-rw-r--r--drivers/staging/wavelan/wavelan.h (renamed from drivers/net/wireless/wavelan.h)0
-rw-r--r--drivers/staging/wavelan/wavelan.p.h (renamed from drivers/net/wireless/wavelan.p.h)0
-rw-r--r--drivers/staging/wavelan/wavelan_cs.c (renamed from drivers/net/wireless/wavelan_cs.c)0
-rw-r--r--drivers/staging/wavelan/wavelan_cs.h (renamed from drivers/net/wireless/wavelan_cs.h)0
-rw-r--r--drivers/staging/wavelan/wavelan_cs.p.h (renamed from drivers/net/wireless/wavelan_cs.p.h)2
798 files changed, 74540 insertions, 36922 deletions
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 66e181345b3a..8af23411743c 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -2351,6 +2351,7 @@ static void __init amb_check_args (void) {
 MODULE_AUTHOR(maintainer_string);
 MODULE_DESCRIPTION(description_string);
 MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("atmsar11.fw");
 module_param(debug,   ushort, 0644);
 module_param(cmds,    uint, 0);
 module_param(txs,     uint, 0);
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index f766cc46b4c4..bc53fed89b1e 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -2906,8 +2906,8 @@ fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
 	u32 media_index    = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
 	u32 oc3_index;
 
-	if ((media_index < 0) || (media_index > 4))
-	    media_index = 5;
+	if (media_index > 4)
+		media_index = 5;
 	
 	switch (fore200e->loop_mode) {
 	    case ATM_LM_NONE:    oc3_index = 0;
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 70667033a568..e90665876c47 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -2739,7 +2739,7 @@ he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
 			spin_lock_irqsave(&he_dev->global_lock, flags);
 			switch (reg.type) {
 				case HE_REGTYPE_PCI:
-					if (reg.addr < 0 || reg.addr >= HE_REGMAP_SIZE) {
+					if (reg.addr >= HE_REGMAP_SIZE) {
 						err = -EINVAL;
 						break;
 					}
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index c5f5186d62a3..51eed679a059 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -142,6 +142,9 @@ MODULE_AUTHOR("Traverse Technologies <support@traverse.com.au>");
 MODULE_DESCRIPTION("Solos PCI driver");
 MODULE_VERSION(VERSION);
 MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("solos-FPGA.bin");
+MODULE_FIRMWARE("solos-Firmware.bin");
+MODULE_FIRMWARE("solos-db-FPGA.bin");
 MODULE_PARM_DESC(reset, "Reset Solos chips on startup");
 MODULE_PARM_DESC(atmdebug, "Print ATM data");
 MODULE_PARM_DESC(firmware_upgrade, "Initiate Solos firmware upgrade");
@@ -528,34 +531,37 @@ static int flash_upgrade(struct solos_card *card, int chip)
 	int numblocks = 0;
 	int offset;
 
-	if (chip == 0) {
+	switch (chip) {
+	case 0:
 		fw_name = "solos-FPGA.bin";
 		blocksize = FPGA_BLOCK;
-	} 
-	
-	if (chip == 1) {
+		break;
+	case 1:
 		fw_name = "solos-Firmware.bin";
 		blocksize = SOLOS_BLOCK;
-	}
-	
-	if (chip == 2){
+		break;
+	case 2:
 		if (card->fpga_version > LEGACY_BUFFERS){
 			fw_name = "solos-db-FPGA.bin";
 			blocksize = FPGA_BLOCK;
 		} else {
-			dev_info(&card->dev->dev, "FPGA version doesn't support daughter board upgrades\n");
+			dev_info(&card->dev->dev, "FPGA version doesn't support"
+					" daughter board upgrades\n");
 			return -EPERM;
 		}
-	}
-	
-	if (chip == 3){
+		break;
+	case 3:
 		if (card->fpga_version > LEGACY_BUFFERS){
 			fw_name = "solos-Firmware.bin";
 			blocksize = SOLOS_BLOCK;
 		} else {
-		dev_info(&card->dev->dev, "FPGA version doesn't support daughter board upgrades\n");
-		return -EPERM;
+			dev_info(&card->dev->dev, "FPGA version doesn't support"
+					" daughter board upgrades\n");
+			return -EPERM;
 		}
+		break;
+	default:
+		return -ENODEV;
 	}
 
 	if (request_firmware(&fw, fw_name, &card->dev->dev))
diff --git a/drivers/bluetooth/btmrvl_debugfs.c b/drivers/bluetooth/btmrvl_debugfs.c
index 4617bd12f63b..d43b5cb864ef 100644
--- a/drivers/bluetooth/btmrvl_debugfs.c
+++ b/drivers/bluetooth/btmrvl_debugfs.c
@@ -29,7 +29,6 @@ struct btmrvl_debugfs_data {
 	struct dentry *root_dir, *config_dir, *status_dir;
 
 	/* config */
-	struct dentry *drvdbg;
 	struct dentry *psmode;
 	struct dentry *pscmd;
 	struct dentry *hsmode;
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index 411c7a77082d..523d197b9824 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -131,6 +131,7 @@ void btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb);
 int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb);
 
 int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd);
+int btmrvl_enable_ps(struct btmrvl_private *priv);
 int btmrvl_prepare_command(struct btmrvl_private *priv);
 
 #ifdef CONFIG_DEBUG_FS
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index e605563b4eaa..f97771ce432c 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -189,6 +189,38 @@ int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd)
 }
 EXPORT_SYMBOL_GPL(btmrvl_send_module_cfg_cmd);
 
+int btmrvl_enable_ps(struct btmrvl_private *priv)
+{
+	struct sk_buff *skb;
+	struct btmrvl_cmd *cmd;
+
+	skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
+	if (skb == NULL) {
+		BT_ERR("No free skb");
+		return -ENOMEM;
+	}
+
+	cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
+	cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF,
+					BT_CMD_AUTO_SLEEP_MODE));
+	cmd->length = 1;
+
+	if (priv->btmrvl_dev.psmode)
+		cmd->data[0] = BT_PS_ENABLE;
+	else
+		cmd->data[0] = BT_PS_DISABLE;
+
+	bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
+
+	skb->dev = (void *) priv->btmrvl_dev.hcidev;
+	skb_queue_head(&priv->adapter->tx_queue, skb);
+
+	BT_DBG("Queue PSMODE Command:%d", cmd->data[0]);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(btmrvl_enable_ps);
+
 static int btmrvl_enable_hs(struct btmrvl_private *priv)
 {
 	struct sk_buff *skb;
@@ -258,28 +290,7 @@ int btmrvl_prepare_command(struct btmrvl_private *priv)
 
 	if (priv->btmrvl_dev.pscmd) {
 		priv->btmrvl_dev.pscmd = 0;
-
-		skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
-		if (skb == NULL) {
-			BT_ERR("No free skb");
-			return -ENOMEM;
-		}
-
-		cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
-		cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_AUTO_SLEEP_MODE));
-		cmd->length = 1;
-
-		if (priv->btmrvl_dev.psmode)
-			cmd->data[0] = BT_PS_ENABLE;
-		else
-			cmd->data[0] = BT_PS_DISABLE;
-
-		bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
-
-		skb->dev = (void *) priv->btmrvl_dev.hcidev;
-		skb_queue_head(&priv->adapter->tx_queue, skb);
-
-		BT_DBG("Queue PSMODE Command:%d", cmd->data[0]);
+		btmrvl_enable_ps(priv);
 	}
 
 	if (priv->btmrvl_dev.hscmd) {
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 5b33b85790f2..1e6eb1aeba2b 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -930,6 +930,8 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
 	priv->hw_wakeup_firmware = btmrvl_sdio_wakeup_fw;
 
 	btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
+	priv->btmrvl_dev.psmode = 1;
+	btmrvl_enable_ps(priv);
 
 	return 0;
 
@@ -1001,3 +1003,5 @@ MODULE_AUTHOR("Marvell International Ltd.");
 MODULE_DESCRIPTION("Marvell BT-over-SDIO driver ver " VERSION);
 MODULE_VERSION(VERSION);
 MODULE_LICENSE("GPL v2");
+MODULE_FIRMWARE("sd8688_helper.bin");
+MODULE_FIRMWARE("sd8688.bin");
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index d5cde6d86f89..7595274103fd 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -41,8 +41,6 @@
 
 #define VERSION "1.3"
 
-static int minor = MISC_DYNAMIC_MINOR;
-
 struct vhci_data {
 	struct hci_dev *hdev;
 
@@ -218,12 +216,6 @@ static unsigned int vhci_poll(struct file *file, poll_table *wait)
 	return POLLOUT | POLLWRNORM;
 }
 
-static int vhci_ioctl(struct inode *inode, struct file *file,
-					unsigned int cmd, unsigned long arg)
-{
-	return -EINVAL;
-}
-
 static int vhci_open(struct inode *inode, struct file *file)
 {
 	struct vhci_data *data;
@@ -284,10 +276,10 @@ static int vhci_release(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations vhci_fops = {
+	.owner		= THIS_MODULE,
 	.read		= vhci_read,
 	.write		= vhci_write,
 	.poll		= vhci_poll,
-	.ioctl		= vhci_ioctl,
 	.open		= vhci_open,
 	.release	= vhci_release,
 };
@@ -302,18 +294,12 @@ static int __init vhci_init(void)
 {
 	BT_INFO("Virtual HCI driver ver %s", VERSION);
 
-	if (misc_register(&vhci_miscdev) < 0) {
-		BT_ERR("Can't register misc device with minor %d", minor);
-		return -EIO;
-	}
-
-	return 0;
+	return misc_register(&vhci_miscdev);
 }
 
 static void __exit vhci_exit(void)
 {
-	if (misc_deregister(&vhci_miscdev) < 0)
-		BT_ERR("Can't unregister misc device with minor %d", minor);
+	misc_deregister(&vhci_miscdev);
 }
 
 module_init(vhci_init);
diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
index 7c544f7c74c4..d9d0e13efe47 100644
--- a/drivers/ieee802154/fakehard.c
+++ b/drivers/ieee802154/fakehard.c
@@ -32,9 +32,29 @@
 #include <net/nl802154.h>
 #include <net/wpan-phy.h>
 
-struct wpan_phy *net_to_phy(struct net_device *dev)
+struct fakehard_priv {
+	struct wpan_phy *phy;
+};
+
+static struct wpan_phy *fake_to_phy(const struct net_device *dev)
 {
-	return container_of(dev->dev.parent, struct wpan_phy, dev);
+	struct fakehard_priv *priv = netdev_priv(dev);
+	return priv->phy;
+}
+
+/**
+ * fake_get_phy - Return a phy corresponding to this device.
+ * @dev: The network device for which to return the wan-phy object
+ *
+ * This function returns a wpan-phy object corresponding to the passed
+ * network device. Reference counter for wpan-phy object is incremented,
+ * so when the wpan-phy isn't necessary, you should drop the reference
+ * via @wpan_phy_put() call.
+ */
+static struct wpan_phy *fake_get_phy(const struct net_device *dev)
+{
+	struct wpan_phy *phy = fake_to_phy(dev);
+	return to_phy(get_device(&phy->dev));
 }
 
 /**
@@ -43,7 +63,7 @@ struct wpan_phy *net_to_phy(struct net_device *dev)
  *
  * Return the ID of the PAN from the PIB.
  */
-static u16 fake_get_pan_id(struct net_device *dev)
+static u16 fake_get_pan_id(const struct net_device *dev)
 {
 	BUG_ON(dev->type != ARPHRD_IEEE802154);
 
@@ -58,7 +78,7 @@ static u16 fake_get_pan_id(struct net_device *dev)
  * device. If the device has not yet had a short address assigned
  * then this should return 0xFFFF to indicate a lack of association.
  */
-static u16 fake_get_short_addr(struct net_device *dev)
+static u16 fake_get_short_addr(const struct net_device *dev)
 {
 	BUG_ON(dev->type != ARPHRD_IEEE802154);
 
@@ -78,7 +98,7 @@ static u16 fake_get_short_addr(struct net_device *dev)
  * Note: This is in section 7.2.1.2 of the IEEE 802.15.4-2006
  *       document.
  */
-static u8 fake_get_dsn(struct net_device *dev)
+static u8 fake_get_dsn(const struct net_device *dev)
 {
 	BUG_ON(dev->type != ARPHRD_IEEE802154);
 
@@ -98,7 +118,7 @@ static u8 fake_get_dsn(struct net_device *dev)
  * Note: This is in section 7.2.1.2 of the IEEE 802.15.4-2006
  *       document.
  */
-static u8 fake_get_bsn(struct net_device *dev)
+static u8 fake_get_bsn(const struct net_device *dev)
 {
 	BUG_ON(dev->type != ARPHRD_IEEE802154);
 
@@ -121,7 +141,7 @@ static u8 fake_get_bsn(struct net_device *dev)
 static int fake_assoc_req(struct net_device *dev,
 		struct ieee802154_addr *addr, u8 channel, u8 page, u8 cap)
 {
-	struct wpan_phy *phy = net_to_phy(dev);
+	struct wpan_phy *phy = fake_to_phy(dev);
 
 	mutex_lock(&phy->pib_lock);
 	phy->current_channel = channel;
@@ -196,7 +216,7 @@ static int fake_start_req(struct net_device *dev, struct ieee802154_addr *addr,
 				u8 bcn_ord, u8 sf_ord, u8 pan_coord, u8 blx,
 				u8 coord_realign)
 {
-	struct wpan_phy *phy = net_to_phy(dev);
+	struct wpan_phy *phy = fake_to_phy(dev);
 
 	mutex_lock(&phy->pib_lock);
 	phy->current_channel = channel;
@@ -239,6 +259,8 @@ static struct ieee802154_mlme_ops fake_mlme = {
 	.start_req = fake_start_req,
 	.scan_req = fake_scan_req,
 
+	.get_phy = fake_get_phy,
+
 	.get_pan_id = fake_get_pan_id,
 	.get_short_addr = fake_get_short_addr,
 	.get_dsn = fake_get_dsn,
@@ -310,7 +332,7 @@ static const struct net_device_ops fake_ops = {
 
 static void ieee802154_fake_destruct(struct net_device *dev)
 {
-	struct wpan_phy *phy = net_to_phy(dev);
+	struct wpan_phy *phy = fake_to_phy(dev);
 
 	wpan_phy_unregister(phy);
 	free_netdev(dev);
@@ -335,13 +357,14 @@ static void ieee802154_fake_setup(struct net_device *dev)
 static int __devinit ieee802154fake_probe(struct platform_device *pdev)
 {
 	struct net_device *dev;
+	struct fakehard_priv *priv;
 	struct wpan_phy *phy = wpan_phy_alloc(0);
 	int err;
 
 	if (!phy)
 		return -ENOMEM;
 
-	dev = alloc_netdev(0, "hardwpan%d", ieee802154_fake_setup);
+	dev = alloc_netdev(sizeof(struct fakehard_priv), "hardwpan%d", ieee802154_fake_setup);
 	if (!dev) {
 		wpan_phy_free(phy);
 		return -ENOMEM;
@@ -353,12 +376,23 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
 			dev->addr_len);
 	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 
-	phy->channels_supported = (1 << 27) - 1;
+	/*
+	 * For now we'd like to emulate 2.4 GHz-only device,
+	 * both O-QPSK and CSS
+	 */
+	/* 2.4 GHz O-QPSK 802.15.4-2003 */
+	phy->channels_supported[0] |= 0x7FFF800;
+	/* 2.4 GHz CSS 802.15.4a-2007 */
+	phy->channels_supported[3] |= 0x3fff;
+
 	phy->transmit_power = 0xbf;
 
 	dev->netdev_ops = &fake_ops;
 	dev->ml_priv = &fake_mlme;
 
+	priv = netdev_priv(dev);
+	priv->phy = phy;
+
 	/*
 	 * If the name is a format string the caller wants us to do a
 	 * name allocation.
@@ -369,11 +403,12 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
 			goto out;
 	}
 
+	wpan_phy_set_dev(phy, &pdev->dev);
 	SET_NETDEV_DEV(dev, &phy->dev);
 
 	platform_set_drvdata(pdev, dev);
 
-	err = wpan_phy_register(&pdev->dev, phy);
+	err = wpan_phy_register(phy);
 	if (err)
 		goto out;
 
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index e593af3354b8..de18fdfdadf2 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1080,11 +1080,14 @@ static int nes_netdev_set_rx_csum(struct net_device *netdev, u32 enable)
 
 
 /**
- * nes_netdev_get_stats_count
+ * nes_netdev_get_sset_count
  */
-static int nes_netdev_get_stats_count(struct net_device *netdev)
+static int nes_netdev_get_sset_count(struct net_device *netdev, int stringset)
 {
-	return NES_ETHTOOL_STAT_COUNT;
+	if (stringset == ETH_SS_STATS)
+		return NES_ETHTOOL_STAT_COUNT;
+	else
+		return -EINVAL;
 }
 
 
@@ -1264,7 +1267,6 @@ static void nes_netdev_get_drvinfo(struct net_device *netdev,
 	sprintf(drvinfo->fw_version, "%u.%u", nesadapter->firmware_version>>16,
 				nesadapter->firmware_version & 0x000000ff);
 	strcpy(drvinfo->version, DRV_VERSION);
-	drvinfo->n_stats = nes_netdev_get_stats_count(netdev);
 	drvinfo->testinfo_len = 0;
 	drvinfo->eedump_len = 0;
 	drvinfo->regdump_len = 0;
@@ -1516,7 +1518,7 @@ static const struct ethtool_ops nes_ethtool_ops = {
 	.get_rx_csum = nes_netdev_get_rx_csum,
 	.get_sg = ethtool_op_get_sg,
 	.get_strings = nes_netdev_get_strings,
-	.get_stats_count = nes_netdev_get_stats_count,
+	.get_sset_count = nes_netdev_get_sset_count,
 	.get_ethtool_stats = nes_netdev_get_ethtool_stats,
 	.get_drvinfo = nes_netdev_get_drvinfo,
 	.get_coalesce = nes_netdev_get_coalesce,
diff --git a/drivers/isdn/gigaset/Kconfig b/drivers/isdn/gigaset/Kconfig
index 18ab8652aa57..dcefedc7044a 100644
--- a/drivers/isdn/gigaset/Kconfig
+++ b/drivers/isdn/gigaset/Kconfig
@@ -1,6 +1,5 @@
 menuconfig ISDN_DRV_GIGASET
 	tristate "Siemens Gigaset support"
-	depends on ISDN_I4L
 	select CRC_CCITT
 	select BITREVERSE
 	help
@@ -11,9 +10,33 @@ menuconfig ISDN_DRV_GIGASET
 	  If you have one of these devices, say M here and for at least
 	  one of the connection specific parts that follow.
 	  This will build a module called "gigaset".
+	  Note: If you build your ISDN subsystem (ISDN_CAPI or ISDN_I4L)
+	  as a module, you have to build this driver as a module too,
+	  otherwise the Gigaset device won't show up as an ISDN device.
 
 if ISDN_DRV_GIGASET
 
+config GIGASET_CAPI
+	bool "Gigaset CAPI support (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	depends on ISDN_CAPI='y'||(ISDN_CAPI='m'&&ISDN_DRV_GIGASET='m')
+	default ISDN_I4L='n'
+	help
+	  Build the Gigaset driver as a CAPI 2.0 driver interfacing with
+	  the Kernel CAPI subsystem. To use it with the old ISDN4Linux
+	  subsystem you'll have to enable the capidrv glue driver.
+	  (select ISDN_CAPI_CAPIDRV.)
+	  Say N to build the old native ISDN4Linux variant.
+
+config GIGASET_I4L
+	bool
+	depends on ISDN_I4L='y'||(ISDN_I4L='m'&&ISDN_DRV_GIGASET='m')
+	default !GIGASET_CAPI
+
+config GIGASET_DUMMYLL
+	bool
+	default !GIGASET_CAPI&&!GIGASET_I4L
+
 config GIGASET_BASE
 	tristate "Gigaset base station support"
 	depends on USB
diff --git a/drivers/isdn/gigaset/Makefile b/drivers/isdn/gigaset/Makefile
index e9d3189f56b7..c453b72272a0 100644
--- a/drivers/isdn/gigaset/Makefile
+++ b/drivers/isdn/gigaset/Makefile
@@ -1,4 +1,7 @@
-gigaset-y := common.o interface.o proc.o ev-layer.o i4l.o asyncdata.o
+gigaset-y := common.o interface.o proc.o ev-layer.o asyncdata.o
+gigaset-$(CONFIG_GIGASET_CAPI) += capi.o
+gigaset-$(CONFIG_GIGASET_I4L) += i4l.o
+gigaset-$(CONFIG_GIGASET_DUMMYLL) += dummyll.o
 usb_gigaset-y := usb-gigaset.o
 ser_gigaset-y := ser-gigaset.o
 bas_gigaset-y := bas-gigaset.o isocdata.o
diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c
index 44a58e6f8f65..ccb2a7b7c41d 100644
--- a/drivers/isdn/gigaset/asyncdata.c
+++ b/drivers/isdn/gigaset/asyncdata.c
@@ -19,7 +19,7 @@
 
 /* check if byte must be stuffed/escaped
  * I'm not sure which data should be encoded.
- * Therefore I will go the hard way and decode every value
+ * Therefore I will go the hard way and encode every value
  * less than 0x20, the flag sequence and the control escape char.
  */
 static inline int muststuff(unsigned char c)
@@ -35,303 +35,383 @@ static inline int muststuff(unsigned char c)
 
 /* == data input =========================================================== */
 
-/* process a block of received bytes in command mode (modem response)
+/* process a block of received bytes in command mode
+ * (mstate != MS_LOCKED && (inputstate & INS_command))
+ * Append received bytes to the command response buffer and forward them
+ * line by line to the response handler. Exit whenever a mode/state change
+ * might have occurred.
  * Return value:
  *	number of processed bytes
  */
-static inline int cmd_loop(unsigned char c, unsigned char *src, int numbytes,
-			   struct inbuf_t *inbuf)
+static unsigned cmd_loop(unsigned numbytes, struct inbuf_t *inbuf)
 {
+	unsigned char *src = inbuf->data + inbuf->head;
 	struct cardstate *cs = inbuf->cs;
-	unsigned cbytes      = cs->cbytes;
-	int inputstate = inbuf->inputstate;
-	int startbytes = numbytes;
-
-	for (;;) {
-		cs->respdata[cbytes] = c;
-		if (c == 10 || c == 13) {
-			gig_dbg(DEBUG_TRANSCMD, "%s: End of Command (%d Bytes)",
+	unsigned cbytes = cs->cbytes;
+	unsigned procbytes = 0;
+	unsigned char c;
+
+	while (procbytes < numbytes) {
+		c = *src++;
+		procbytes++;
+
+		switch (c) {
+		case '\n':
+			if (cbytes == 0 && cs->respdata[0] == '\r') {
+				/* collapse LF with preceding CR */
+				cs->respdata[0] = 0;
+				break;
+			}
+			/* --v-- fall through --v-- */
+		case '\r':
+			/* end of message line, pass to response handler */
+			gig_dbg(DEBUG_TRANSCMD, "%s: End of Message (%d Bytes)",
 				__func__, cbytes);
+			if (cbytes >= MAX_RESP_SIZE) {
+				dev_warn(cs->dev, "response too large (%d)\n",
+					 cbytes);
+				cbytes = MAX_RESP_SIZE;
+			}
 			cs->cbytes = cbytes;
-			gigaset_handle_modem_response(cs); /* can change
-							      cs->dle */
+			gigaset_handle_modem_response(cs);
 			cbytes = 0;
 
-			if (cs->dle &&
-			    !(inputstate & INS_DLE_command)) {
-				inputstate &= ~INS_command;
-				break;
-			}
-		} else {
-			/* advance in line buffer, checking for overflow */
-			if (cbytes < MAX_RESP_SIZE - 1)
-				cbytes++;
-			else
-				dev_warn(cs->dev, "response too large\n");
-		}
+			/* store EOL byte for CRLF collapsing */
+			cs->respdata[0] = c;
 
-		if (!numbytes)
-			break;
-		c = *src++;
-		--numbytes;
-		if (c == DLE_FLAG &&
-		    (cs->dle || inputstate & INS_DLE_command)) {
-			inputstate |= INS_DLE_char;
-			break;
+			/* cs->dle may have changed */
+			if (cs->dle && !(inbuf->inputstate & INS_DLE_command))
+				inbuf->inputstate &= ~INS_command;
+
+			/* return for reevaluating state */
+			goto exit;
+
+		case DLE_FLAG:
+			if (inbuf->inputstate & INS_DLE_char) {
+				/* quoted DLE: clear quote flag */
+				inbuf->inputstate &= ~INS_DLE_char;
+			} else if (cs->dle ||
+				   (inbuf->inputstate & INS_DLE_command)) {
+				/* DLE escape, pass up for handling */
+				inbuf->inputstate |= INS_DLE_char;
+				goto exit;
+			}
+			/* quoted or not in DLE mode: treat as regular data */
+			/* --v-- fall through --v-- */
+		default:
+			/* append to line buffer if possible */
+			if (cbytes < MAX_RESP_SIZE)
+				cs->respdata[cbytes] = c;
+			cbytes++;
 		}
 	}
-
+exit:
 	cs->cbytes = cbytes;
-	inbuf->inputstate = inputstate;
-
-	return startbytes - numbytes;
+	return procbytes;
 }
 
-/* process a block of received bytes in lock mode (tty i/f)
+/* process a block of received bytes in lock mode
+ * All received bytes are passed unmodified to the tty i/f.
  * Return value:
  *	number of processed bytes
  */
-static inline int lock_loop(unsigned char *src, int numbytes,
-			    struct inbuf_t *inbuf)
+static unsigned lock_loop(unsigned numbytes, struct inbuf_t *inbuf)
 {
-	struct cardstate *cs = inbuf->cs;
-
-	gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response",
-			   numbytes, src);
-	gigaset_if_receive(cs, src, numbytes);
+	unsigned char *src = inbuf->data + inbuf->head;
 
+	gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response", numbytes, src);
+	gigaset_if_receive(inbuf->cs, src, numbytes);
 	return numbytes;
 }
 
+/* set up next receive skb for data mode
+ */
+static void new_rcv_skb(struct bc_state *bcs)
+{
+	struct cardstate *cs = bcs->cs;
+	unsigned short hw_hdr_len = cs->hw_hdr_len;
+
+	if (bcs->ignore) {
+		bcs->skb = NULL;
+		return;
+	}
+
+	bcs->skb = dev_alloc_skb(SBUFSIZE + hw_hdr_len);
+	if (bcs->skb == NULL) {
+		dev_warn(cs->dev, "could not allocate new skb\n");
+		return;
+	}
+	skb_reserve(bcs->skb, hw_hdr_len);
+}
+
 /* process a block of received bytes in HDLC data mode
+ * (mstate != MS_LOCKED && !(inputstate & INS_command) && proto2 == L2_HDLC)
  * Collect HDLC frames, undoing byte stuffing and watching for DLE escapes.
  * When a frame is complete, check the FCS and pass valid frames to the LL.
  * If DLE is encountered, return immediately to let the caller handle it.
  * Return value:
  *	number of processed bytes
- *	numbytes (all bytes processed) on error --FIXME
  */
-static inline int hdlc_loop(unsigned char c, unsigned char *src, int numbytes,
-			    struct inbuf_t *inbuf)
+static unsigned hdlc_loop(unsigned numbytes, struct inbuf_t *inbuf)
 {
 	struct cardstate *cs = inbuf->cs;
-	struct bc_state *bcs = inbuf->bcs;
+	struct bc_state *bcs = cs->bcs;
 	int inputstate = bcs->inputstate;
 	__u16 fcs = bcs->fcs;
 	struct sk_buff *skb = bcs->skb;
-	unsigned char error;
-	struct sk_buff *compskb;
-	int startbytes = numbytes;
-	int l;
+	unsigned char *src = inbuf->data + inbuf->head;
+	unsigned procbytes = 0;
+	unsigned char c;
 
-	if (unlikely(inputstate & INS_byte_stuff)) {
+	if (inputstate & INS_byte_stuff) {
+		if (!numbytes)
+			return 0;
 		inputstate &= ~INS_byte_stuff;
 		goto byte_stuff;
 	}
-	for (;;) {
-		if (unlikely(c == PPP_ESCAPE)) {
-			if (unlikely(!numbytes)) {
-				inputstate |= INS_byte_stuff;
+
+	while (procbytes < numbytes) {
+		c = *src++;
+		procbytes++;
+		if (c == DLE_FLAG) {
+			if (inputstate & INS_DLE_char) {
+				/* quoted DLE: clear quote flag */
+				inputstate &= ~INS_DLE_char;
+			} else if (cs->dle || (inputstate & INS_DLE_command)) {
+				/* DLE escape, pass up for handling */
+				inputstate |= INS_DLE_char;
 				break;
 			}
-			c = *src++;
-			--numbytes;
-			if (unlikely(c == DLE_FLAG &&
-				     (cs->dle ||
-				      inbuf->inputstate & INS_DLE_command))) {
-				inbuf->inputstate |= INS_DLE_char;
+		}
+
+		if (c == PPP_ESCAPE) {
+			/* byte stuffing indicator: pull in next byte */
+			if (procbytes >= numbytes) {
+				/* end of buffer, save for later processing */
 				inputstate |= INS_byte_stuff;
 				break;
 			}
 byte_stuff:
+			c = *src++;
+			procbytes++;
+			if (c == DLE_FLAG) {
+				if (inputstate & INS_DLE_char) {
+					/* quoted DLE: clear quote flag */
+					inputstate &= ~INS_DLE_char;
+				} else if (cs->dle ||
+					   (inputstate & INS_DLE_command)) {
+					/* DLE escape, pass up for handling */
+					inputstate |=
+						INS_DLE_char | INS_byte_stuff;
+					break;
+				}
+			}
 			c ^= PPP_TRANS;
-			if (unlikely(!muststuff(c)))
-				gig_dbg(DEBUG_HDLC, "byte stuffed: 0x%02x", c);
-		} else if (unlikely(c == PPP_FLAG)) {
-			if (unlikely(inputstate & INS_skip_frame)) {
-#ifdef CONFIG_GIGASET_DEBUG
-				if (!(inputstate & INS_have_data)) { /* 7E 7E */
-					++bcs->emptycount;
-				} else
-					gig_dbg(DEBUG_HDLC,
-					    "7e----------------------------");
-#endif
-
-				/* end of frame */
-				error = 1;
-				gigaset_rcv_error(NULL, cs, bcs);
-			} else if (!(inputstate & INS_have_data)) { /* 7E 7E */
 #ifdef CONFIG_GIGASET_DEBUG
-				++bcs->emptycount;
+			if (!muststuff(c))
+				gig_dbg(DEBUG_HDLC, "byte stuffed: 0x%02x", c);
 #endif
-				break;
-			} else {
+		} else if (c == PPP_FLAG) {
+			/* end of frame: process content if any */
+			if (inputstate & INS_have_data) {
 				gig_dbg(DEBUG_HDLC,
 					"7e----------------------------");
 
-				/* end of frame */
-				error = 0;
-
-				if (unlikely(fcs != PPP_GOODFCS)) {
+				/* check and pass received frame */
+				if (!skb) {
+					/* skipped frame */
+					gigaset_isdn_rcv_err(bcs);
+				} else if (skb->len < 2) {
+					/* frame too short for FCS */
+					dev_warn(cs->dev,
+						 "short frame (%d)\n",
+						 skb->len);
+					gigaset_isdn_rcv_err(bcs);
+					dev_kfree_skb_any(skb);
+				} else if (fcs != PPP_GOODFCS) {
+					/* frame check error */
 					dev_err(cs->dev,
 				"Checksum failed, %u bytes corrupted!\n",
 						skb->len);
-					compskb = NULL;
-					gigaset_rcv_error(compskb, cs, bcs);
-					error = 1;
+					gigaset_isdn_rcv_err(bcs);
+					dev_kfree_skb_any(skb);
 				} else {
-					if (likely((l = skb->len) > 2)) {
-						skb->tail -= 2;
-						skb->len -= 2;
-					} else {
-						dev_kfree_skb(skb);
-						skb = NULL;
-						inputstate |= INS_skip_frame;
-						if (l == 1) {
-							dev_err(cs->dev,
-						  "invalid packet size (1)!\n");
-							error = 1;
-							gigaset_rcv_error(NULL,
-								cs, bcs);
-						}
-					}
-					if (likely(!(error ||
-						     (inputstate &
-						      INS_skip_frame)))) {
-						gigaset_rcv_skb(skb, cs, bcs);
-					}
+					/* good frame */
+					__skb_trim(skb, skb->len - 2);
+					gigaset_skb_rcvd(bcs, skb);
 				}
-			}
 
-			if (unlikely(error))
-				if (skb)
-					dev_kfree_skb(skb);
-
-			fcs = PPP_INITFCS;
-			inputstate &= ~(INS_have_data | INS_skip_frame);
-			if (unlikely(bcs->ignore)) {
-				inputstate |= INS_skip_frame;
-				skb = NULL;
-			} else if (likely((skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL)) {
-				skb_reserve(skb, HW_HDR_LEN);
+				/* prepare reception of next frame */
+				inputstate &= ~INS_have_data;
+				new_rcv_skb(bcs);
+				skb = bcs->skb;
 			} else {
-				dev_warn(cs->dev,
-					 "could not allocate new skb\n");
-				inputstate |= INS_skip_frame;
+				/* empty frame (7E 7E) */
+#ifdef CONFIG_GIGASET_DEBUG
+				++bcs->emptycount;
+#endif
+				if (!skb) {
+					/* skipped (?) */
+					gigaset_isdn_rcv_err(bcs);
+					new_rcv_skb(bcs);
+					skb = bcs->skb;
+				}
 			}
 
-			break;
-		} else if (unlikely(muststuff(c))) {
+			fcs = PPP_INITFCS;
+			continue;
+#ifdef CONFIG_GIGASET_DEBUG
+		} else if (muststuff(c)) {
 			/* Should not happen. Possible after ZDLE=1<CR><LF>. */
 			gig_dbg(DEBUG_HDLC, "not byte stuffed: 0x%02x", c);
+#endif
 		}
 
-		/* add character */
-
+		/* regular data byte, append to skb */
 #ifdef CONFIG_GIGASET_DEBUG
-		if (unlikely(!(inputstate & INS_have_data))) {
+		if (!(inputstate & INS_have_data)) {
 			gig_dbg(DEBUG_HDLC, "7e (%d x) ================",
 				bcs->emptycount);
 			bcs->emptycount = 0;
 		}
 #endif
-
 		inputstate |= INS_have_data;
-
-		if (likely(!(inputstate & INS_skip_frame))) {
-			if (unlikely(skb->len == SBUFSIZE)) {
+		if (skb) {
+			if (skb->len == SBUFSIZE) {
 				dev_warn(cs->dev, "received packet too long\n");
 				dev_kfree_skb_any(skb);
-				skb = NULL;
-				inputstate |= INS_skip_frame;
-				break;
+				/* skip remainder of packet */
+				bcs->skb = skb = NULL;
+			} else {
+				*__skb_put(skb, 1) = c;
+				fcs = crc_ccitt_byte(fcs, c);
 			}
-			*__skb_put(skb, 1) = c;
-			fcs = crc_ccitt_byte(fcs, c);
-		}
-
-		if (unlikely(!numbytes))
-			break;
-		c = *src++;
-		--numbytes;
-		if (unlikely(c == DLE_FLAG &&
-			     (cs->dle ||
-			      inbuf->inputstate & INS_DLE_command))) {
-			inbuf->inputstate |= INS_DLE_char;
-			break;
 		}
 	}
+
 	bcs->inputstate = inputstate;
 	bcs->fcs = fcs;
-	bcs->skb = skb;
-	return startbytes - numbytes;
+	return procbytes;
 }
 
 /* process a block of received bytes in transparent data mode
+ * (mstate != MS_LOCKED && !(inputstate & INS_command) && proto2 != L2_HDLC)
  * Invert bytes, undoing byte stuffing and watching for DLE escapes.
  * If DLE is encountered, return immediately to let the caller handle it.
  * Return value:
  *	number of processed bytes
- *	numbytes (all bytes processed) on error --FIXME
  */
-static inline int iraw_loop(unsigned char c, unsigned char *src, int numbytes,
-			    struct inbuf_t *inbuf)
+static unsigned iraw_loop(unsigned numbytes, struct inbuf_t *inbuf)
 {
 	struct cardstate *cs = inbuf->cs;
-	struct bc_state *bcs = inbuf->bcs;
+	struct bc_state *bcs = cs->bcs;
 	int inputstate = bcs->inputstate;
 	struct sk_buff *skb = bcs->skb;
-	int startbytes = numbytes;
+	unsigned char *src = inbuf->data + inbuf->head;
+	unsigned procbytes = 0;
+	unsigned char c;
 
-	for (;;) {
-		/* add character */
-		inputstate |= INS_have_data;
+	if (!skb) {
+		/* skip this block */
+		new_rcv_skb(bcs);
+		return numbytes;
+	}
 
-		if (likely(!(inputstate & INS_skip_frame))) {
-			if (unlikely(skb->len == SBUFSIZE)) {
-				//FIXME just pass skb up and allocate a new one
-				dev_warn(cs->dev, "received packet too long\n");
-				dev_kfree_skb_any(skb);
-				skb = NULL;
-				inputstate |= INS_skip_frame;
+	while (procbytes < numbytes && skb->len < SBUFSIZE) {
+		c = *src++;
+		procbytes++;
+
+		if (c == DLE_FLAG) {
+			if (inputstate & INS_DLE_char) {
+				/* quoted DLE: clear quote flag */
+				inputstate &= ~INS_DLE_char;
+			} else if (cs->dle || (inputstate & INS_DLE_command)) {
+				/* DLE escape, pass up for handling */
+				inputstate |= INS_DLE_char;
 				break;
 			}
-			*__skb_put(skb, 1) = bitrev8(c);
 		}
 
-		if (unlikely(!numbytes))
-			break;
-		c = *src++;
-		--numbytes;
-		if (unlikely(c == DLE_FLAG &&
-			     (cs->dle ||
-			      inbuf->inputstate & INS_DLE_command))) {
-			inbuf->inputstate |= INS_DLE_char;
-			break;
-		}
+		/* regular data byte: append to current skb */
+		inputstate |= INS_have_data;
+		*__skb_put(skb, 1) = bitrev8(c);
 	}
 
 	/* pass data up */
-	if (likely(inputstate & INS_have_data)) {
-		if (likely(!(inputstate & INS_skip_frame))) {
-			gigaset_rcv_skb(skb, cs, bcs);
-		}
-		inputstate &= ~(INS_have_data | INS_skip_frame);
-		if (unlikely(bcs->ignore)) {
-			inputstate |= INS_skip_frame;
-			skb = NULL;
-		} else if (likely((skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN))
-				  != NULL)) {
-			skb_reserve(skb, HW_HDR_LEN);
+	if (inputstate & INS_have_data) {
+		gigaset_skb_rcvd(bcs, skb);
+		inputstate &= ~INS_have_data;
+		new_rcv_skb(bcs);
+	}
+
+	bcs->inputstate = inputstate;
+	return procbytes;
+}
+
+/* process DLE escapes
+ * Called whenever a DLE sequence might be encountered in the input stream.
+ * Either processes the entire DLE sequence or, if that isn't possible,
+ * notes the fact that an initial DLE has been received in the INS_DLE_char
+ * inputstate flag and resumes processing of the sequence on the next call.
+ */
+static void handle_dle(struct inbuf_t *inbuf)
+{
+	struct cardstate *cs = inbuf->cs;
+
+	if (cs->mstate == MS_LOCKED)
+		return;		/* no DLE processing in lock mode */
+
+	if (!(inbuf->inputstate & INS_DLE_char)) {
+		/* no DLE pending */
+		if (inbuf->data[inbuf->head] == DLE_FLAG &&
+		    (cs->dle || inbuf->inputstate & INS_DLE_command)) {
+			/* start of DLE sequence */
+			inbuf->head++;
+			if (inbuf->head == inbuf->tail ||
+			    inbuf->head == RBUFSIZE) {
+				/* end of buffer, save for later processing */
+				inbuf->inputstate |= INS_DLE_char;
+				return;
+			}
 		} else {
-			dev_warn(cs->dev, "could not allocate new skb\n");
-			inputstate |= INS_skip_frame;
+			/* regular data byte */
+			return;
 		}
 	}
 
-	bcs->inputstate = inputstate;
-	bcs->skb = skb;
-	return startbytes - numbytes;
+	/* consume pending DLE */
+	inbuf->inputstate &= ~INS_DLE_char;
+
+	switch (inbuf->data[inbuf->head]) {
+	case 'X':	/* begin of event message */
+		if (inbuf->inputstate & INS_command)
+			dev_notice(cs->dev,
+				   "received <DLE>X in command mode\n");
+		inbuf->inputstate |= INS_command | INS_DLE_command;
+		inbuf->head++;	/* byte consumed */
+		break;
+	case '.':	/* end of event message */
+		if (!(inbuf->inputstate & INS_DLE_command))
+			dev_notice(cs->dev,
+				   "received <DLE>. without <DLE>X\n");
+		inbuf->inputstate &= ~INS_DLE_command;
+		/* return to data mode if in DLE mode */
+		if (cs->dle)
+			inbuf->inputstate &= ~INS_command;
+		inbuf->head++;	/* byte consumed */
+		break;
+	case DLE_FLAG:	/* DLE in data stream */
+		/* mark as quoted */
+		inbuf->inputstate |= INS_DLE_char;
+		if (!(cs->dle || inbuf->inputstate & INS_DLE_command))
+			dev_notice(cs->dev,
+				   "received <DLE><DLE> not in DLE mode\n");
+		break;	/* quoted byte left in buffer */
+	default:
+		dev_notice(cs->dev, "received <DLE><%02x>\n",
+			   inbuf->data[inbuf->head]);
+		/* quoted byte left in buffer */
+	}
 }
 
 /**
@@ -345,94 +425,39 @@ static inline int iraw_loop(unsigned char c, unsigned char *src, int numbytes,
  */
 void gigaset_m10x_input(struct inbuf_t *inbuf)
 {
-	struct cardstate *cs;
-	unsigned tail, head, numbytes;
-	unsigned char *src, c;
-	int procbytes;
-
-	head = inbuf->head;
-	tail = inbuf->tail;
-	gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
-
-	if (head != tail) {
-		cs = inbuf->cs;
-		src = inbuf->data + head;
-		numbytes = (head > tail ? RBUFSIZE : tail) - head;
-		gig_dbg(DEBUG_INTR, "processing %u bytes", numbytes);
+	struct cardstate *cs = inbuf->cs;
+	unsigned numbytes, procbytes;
 
-		while (numbytes) {
-			if (cs->mstate == MS_LOCKED) {
-				procbytes = lock_loop(src, numbytes, inbuf);
-				src += procbytes;
-				numbytes -= procbytes;
-			} else {
-				c = *src++;
-				--numbytes;
-				if (c == DLE_FLAG && (cs->dle ||
-				    inbuf->inputstate & INS_DLE_command)) {
-					if (!(inbuf->inputstate & INS_DLE_char)) {
-						inbuf->inputstate |= INS_DLE_char;
-						goto nextbyte;
-					}
-					/* <DLE> <DLE> => <DLE> in data stream */
-					inbuf->inputstate &= ~INS_DLE_char;
-				}
+	gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", inbuf->head, inbuf->tail);
 
-				if (!(inbuf->inputstate & INS_DLE_char)) {
-
-					/* FIXME use function pointers?  */
-					if (inbuf->inputstate & INS_command)
-						procbytes = cmd_loop(c, src, numbytes, inbuf);
-					else if (inbuf->bcs->proto2 == ISDN_PROTO_L2_HDLC)
-						procbytes = hdlc_loop(c, src, numbytes, inbuf);
-					else
-						procbytes = iraw_loop(c, src, numbytes, inbuf);
-
-					src += procbytes;
-					numbytes -= procbytes;
-				} else {  /* DLE char */
-					inbuf->inputstate &= ~INS_DLE_char;
-					switch (c) {
-					case 'X': /*begin of command*/
-						if (inbuf->inputstate & INS_command)
-							dev_warn(cs->dev,
-					"received <DLE> 'X' in command mode\n");
-						inbuf->inputstate |=
-							INS_command | INS_DLE_command;
-						break;
-					case '.': /*end of command*/
-						if (!(inbuf->inputstate & INS_command))
-							dev_warn(cs->dev,
-					"received <DLE> '.' in hdlc mode\n");
-						inbuf->inputstate &= cs->dle ?
-							~(INS_DLE_command|INS_command)
-							: ~INS_DLE_command;
-						break;
-					//case DLE_FLAG: /*DLE_FLAG in data stream*/ /* schon oben behandelt! */
-					default:
-						dev_err(cs->dev,
-						      "received 0x10 0x%02x!\n",
-							(int) c);
-						/* FIXME: reset driver?? */
-					}
-				}
-			}
-nextbyte:
-			if (!numbytes) {
-				/* end of buffer, check for wrap */
-				if (head > tail) {
-					head = 0;
-					src = inbuf->data;
-					numbytes = tail;
-				} else {
-					head = tail;
-					break;
-				}
-			}
-		}
+	while (inbuf->head != inbuf->tail) {
+		/* check for DLE escape */
+		handle_dle(inbuf);
 
-		gig_dbg(DEBUG_INTR, "setting head to %u", head);
-		inbuf->head = head;
+		/* process a contiguous block of bytes */
+		numbytes = (inbuf->head > inbuf->tail ?
+			    RBUFSIZE : inbuf->tail) - inbuf->head;
+		gig_dbg(DEBUG_INTR, "processing %u bytes", numbytes);
+		/*
+		 * numbytes may be 0 if handle_dle() ate the last byte.
+		 * This does no harm, *_loop() will just return 0 immediately.
+		 */
+
+		if (cs->mstate == MS_LOCKED)
+			procbytes = lock_loop(numbytes, inbuf);
+		else if (inbuf->inputstate & INS_command)
+			procbytes = cmd_loop(numbytes, inbuf);
+		else if (cs->bcs->proto2 == L2_HDLC)
+			procbytes = hdlc_loop(numbytes, inbuf);
+		else
+			procbytes = iraw_loop(numbytes, inbuf);
+		inbuf->head += procbytes;
+
+		/* check for buffer wraparound */
+		if (inbuf->head >= RBUFSIZE)
+			inbuf->head = 0;
+
+		gig_dbg(DEBUG_INTR, "head set to %u", inbuf->head);
 	}
 }
 EXPORT_SYMBOL_GPL(gigaset_m10x_input);
@@ -440,16 +465,16 @@ EXPORT_SYMBOL_GPL(gigaset_m10x_input);
 
 /* == data output ========================================================== */
 
-/* Encoding of a PPP packet into an octet stuffed HDLC frame
- * with FCS, opening and closing flags.
+/*
+ * Encode a data packet into an octet stuffed HDLC frame with FCS,
+ * opening and closing flags, preserving headroom data.
  * parameters:
- *	skb	skb containing original packet (freed upon return)
- *	head	number of headroom bytes to allocate in result skb
- *	tail	number of tailroom bytes to allocate in result skb
+ *	skb		skb containing original packet (freed upon return)
  * Return value:
  *	pointer to newly allocated skb containing the result frame
+ *	and the original link layer header, NULL on error
  */
-static struct sk_buff *HDLC_Encode(struct sk_buff *skb, int head, int tail)
+static struct sk_buff *HDLC_Encode(struct sk_buff *skb)
 {
 	struct sk_buff *hdlc_skb;
 	__u16 fcs;
@@ -471,16 +496,19 @@ static struct sk_buff *HDLC_Encode(struct sk_buff *skb, int head, int tail)
 
 	/* size of new buffer: original size + number of stuffing bytes
 	 * + 2 bytes FCS + 2 stuffing bytes for FCS (if needed) + 2 flag bytes
+	 * + room for link layer header
 	 */
-	hdlc_skb = dev_alloc_skb(skb->len + stuf_cnt + 6 + tail + head);
+	hdlc_skb = dev_alloc_skb(skb->len + stuf_cnt + 6 + skb->mac_len);
 	if (!hdlc_skb) {
-		dev_kfree_skb(skb);
+		dev_kfree_skb_any(skb);
 		return NULL;
 	}
-	skb_reserve(hdlc_skb, head);
 
-	/* Copy acknowledge request into new skb */
-	memcpy(hdlc_skb->head, skb->head, 2);
+	/* Copy link layer header into new skb */
+	skb_reset_mac_header(hdlc_skb);
+	skb_reserve(hdlc_skb, skb->mac_len);
+	memcpy(skb_mac_header(hdlc_skb), skb_mac_header(skb), skb->mac_len);
+	hdlc_skb->mac_len = skb->mac_len;
 
 	/* Add flag sequence in front of everything.. */
 	*(skb_put(hdlc_skb, 1)) = PPP_FLAG;
@@ -511,33 +539,42 @@ static struct sk_buff *HDLC_Encode(struct sk_buff *skb, int head, int tail)
 
 	*(skb_put(hdlc_skb, 1)) = PPP_FLAG;
 
-	dev_kfree_skb(skb);
+	dev_kfree_skb_any(skb);
 	return hdlc_skb;
 }
 
-/* Encoding of a raw packet into an octet stuffed bit inverted frame
+/*
+ * Encode a data packet into an octet stuffed raw bit inverted frame,
+ * preserving headroom data.
  * parameters:
- *	skb	skb containing original packet (freed upon return)
- *	head	number of headroom bytes to allocate in result skb
- *	tail	number of tailroom bytes to allocate in result skb
+ *	skb		skb containing original packet (freed upon return)
  * Return value:
  *	pointer to newly allocated skb containing the result frame
+ *	and the original link layer header, NULL on error
  */
-static struct sk_buff *iraw_encode(struct sk_buff *skb, int head, int tail)
+static struct sk_buff *iraw_encode(struct sk_buff *skb)
 {
 	struct sk_buff *iraw_skb;
 	unsigned char c;
 	unsigned char *cp;
 	int len;
 
-	/* worst case: every byte must be stuffed */
-	iraw_skb = dev_alloc_skb(2*skb->len + tail + head);
+	/* size of new buffer (worst case = every byte must be stuffed):
+	 * 2 * original size + room for link layer header
+	 */
+	iraw_skb = dev_alloc_skb(2*skb->len + skb->mac_len);
 	if (!iraw_skb) {
-		dev_kfree_skb(skb);
+		dev_kfree_skb_any(skb);
 		return NULL;
 	}
-	skb_reserve(iraw_skb, head);
 
+	/* copy link layer header into new skb */
+	skb_reset_mac_header(iraw_skb);
+	skb_reserve(iraw_skb, skb->mac_len);
+	memcpy(skb_mac_header(iraw_skb), skb_mac_header(skb), skb->mac_len);
+	iraw_skb->mac_len = skb->mac_len;
+
+	/* copy and stuff data */
 	cp = skb->data;
 	len = skb->len;
 	while (len--) {
@@ -546,7 +583,7 @@ static struct sk_buff *iraw_encode(struct sk_buff *skb, int head, int tail)
 			*(skb_put(iraw_skb, 1)) = c;
 		*(skb_put(iraw_skb, 1)) = c;
 	}
-	dev_kfree_skb(skb);
+	dev_kfree_skb_any(skb);
 	return iraw_skb;
 }
 
@@ -555,8 +592,10 @@ static struct sk_buff *iraw_encode(struct sk_buff *skb, int head, int tail)
  * @bcs:	B channel descriptor structure.
  * @skb:	data to send.
  *
- * Called by i4l.c to encode and queue an skb for sending, and start
+ * Called by LL to encode and queue an skb for sending, and start
  * transmission if necessary.
+ * Once the payload data has been transmitted completely, gigaset_skb_sent()
+ * will be called with the skb's link layer header preserved.
  *
  * Return value:
  *	number of bytes accepted for sending (skb->len) if ok,
@@ -564,24 +603,25 @@ static struct sk_buff *iraw_encode(struct sk_buff *skb, int head, int tail)
  */
 int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb)
 {
+	struct cardstate *cs = bcs->cs;
 	unsigned len = skb->len;
 	unsigned long flags;
 
-	if (bcs->proto2 == ISDN_PROTO_L2_HDLC)
-		skb = HDLC_Encode(skb, HW_HDR_LEN, 0);
+	if (bcs->proto2 == L2_HDLC)
+		skb = HDLC_Encode(skb);
 	else
-		skb = iraw_encode(skb, HW_HDR_LEN, 0);
+		skb = iraw_encode(skb);
 	if (!skb) {
-		dev_err(bcs->cs->dev,
+		dev_err(cs->dev,
 			"unable to allocate memory for encoding!\n");
 		return -ENOMEM;
 	}
 
 	skb_queue_tail(&bcs->squeue, skb);
-	spin_lock_irqsave(&bcs->cs->lock, flags);
-	if (bcs->cs->connected)
-		tasklet_schedule(&bcs->cs->write_tasklet);
-	spin_unlock_irqrestore(&bcs->cs->lock, flags);
+	spin_lock_irqsave(&cs->lock, flags);
+	if (cs->connected)
+		tasklet_schedule(&cs->write_tasklet);
+	spin_unlock_irqrestore(&cs->lock, flags);
 
 	return len;	/* ok so far */
 }
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 5ed1d99eb9f3..95ebc5129895 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -57,7 +57,7 @@ MODULE_PARM_DESC(cidmode, "Call-ID mode");
 #define USB_SX353_PRODUCT_ID    0x0022
 
 /* table of devices that work with this driver */
-static const struct usb_device_id gigaset_table [] = {
+static const struct usb_device_id gigaset_table[] = {
 	{ USB_DEVICE(USB_GIGA_VENDOR_ID, USB_3070_PRODUCT_ID) },
 	{ USB_DEVICE(USB_GIGA_VENDOR_ID, USB_3075_PRODUCT_ID) },
 	{ USB_DEVICE(USB_GIGA_VENDOR_ID, USB_SX303_PRODUCT_ID) },
@@ -137,7 +137,7 @@ struct bas_cardstate {
 #define BS_RESETTING	0x200	/* waiting for HD_RESET_INTERRUPT_PIPE_ACK */
 
 
-static struct gigaset_driver *driver = NULL;
+static struct gigaset_driver *driver;
 
 /* usb specific object needed to register this driver with the usb subsystem */
 static struct usb_driver gigaset_usb_driver = {
@@ -601,11 +601,12 @@ static int atread_submit(struct cardstate *cs, int timeout)
 	ucs->dr_cmd_in.wLength = cpu_to_le16(ucs->rcvbuf_size);
 	usb_fill_control_urb(ucs->urb_cmd_in, ucs->udev,
 			     usb_rcvctrlpipe(ucs->udev, 0),
-			     (unsigned char*) & ucs->dr_cmd_in,
+			     (unsigned char *) &ucs->dr_cmd_in,
 			     ucs->rcvbuf, ucs->rcvbuf_size,
 			     read_ctrl_callback, cs->inbuf);
 
-	if ((ret = usb_submit_urb(ucs->urb_cmd_in, GFP_ATOMIC)) != 0) {
+	ret = usb_submit_urb(ucs->urb_cmd_in, GFP_ATOMIC);
+	if (ret != 0) {
 		update_basstate(ucs, 0, BS_ATRDPEND);
 		dev_err(cs->dev, "could not submit HD_READ_ATMESSAGE: %s\n",
 			get_usb_rcmsg(ret));
@@ -652,13 +653,11 @@ static void read_int_callback(struct urb *urb)
 		return;
 	case -ENODEV:			/* device removed */
 	case -ESHUTDOWN:		/* device shut down */
-		//FIXME use this as disconnect indicator?
 		gig_dbg(DEBUG_USBREQ, "%s: device disconnected", __func__);
 		return;
 	default:		/* severe trouble */
 		dev_warn(cs->dev, "interrupt read: %s\n",
 			 get_usb_statmsg(status));
-		//FIXME corrective action? resubmission always ok?
 		goto resubmit;
 	}
 
@@ -742,7 +741,8 @@ static void read_int_callback(struct urb *urb)
 			kfree(ucs->rcvbuf);
 			ucs->rcvbuf_size = 0;
 		}
-		if ((ucs->rcvbuf = kmalloc(l, GFP_ATOMIC)) == NULL) {
+		ucs->rcvbuf = kmalloc(l, GFP_ATOMIC);
+		if (ucs->rcvbuf == NULL) {
 			spin_unlock_irqrestore(&cs->lock, flags);
 			dev_err(cs->dev, "out of memory receiving AT data\n");
 			error_reset(cs);
@@ -750,12 +750,12 @@ static void read_int_callback(struct urb *urb)
 		}
 		ucs->rcvbuf_size = l;
 		ucs->retry_cmd_in = 0;
-		if ((rc = atread_submit(cs, BAS_TIMEOUT)) < 0) {
+		rc = atread_submit(cs, BAS_TIMEOUT);
+		if (rc < 0) {
 			kfree(ucs->rcvbuf);
 			ucs->rcvbuf = NULL;
 			ucs->rcvbuf_size = 0;
 			if (rc != -ENODEV) {
-				//FIXME corrective action?
 				spin_unlock_irqrestore(&cs->lock, flags);
 				error_reset(cs);
 				break;
@@ -911,7 +911,7 @@ static int starturbs(struct bc_state *bcs)
 	int rc;
 
 	/* initialize L2 reception */
-	if (bcs->proto2 == ISDN_PROTO_L2_HDLC)
+	if (bcs->proto2 == L2_HDLC)
 		bcs->inputstate |= INS_flag_hunt;
 
 	/* submit all isochronous input URBs */
@@ -940,7 +940,8 @@ static int starturbs(struct bc_state *bcs)
 		}
 
 		dump_urb(DEBUG_ISO, "Initial isoc read", urb);
-		if ((rc = usb_submit_urb(urb, GFP_ATOMIC)) != 0)
+		rc = usb_submit_urb(urb, GFP_ATOMIC);
+		if (rc != 0)
 			goto error;
 	}
 
@@ -1045,7 +1046,8 @@ static int submit_iso_write_urb(struct isow_urbctx_t *ucx)
 
 		/* compute frame length according to flow control */
 		ifd->length = BAS_NORMFRAME;
-		if ((corrbytes = atomic_read(&ubc->corrbytes)) != 0) {
+		corrbytes = atomic_read(&ubc->corrbytes);
+		if (corrbytes != 0) {
 			gig_dbg(DEBUG_ISO, "%s: corrbytes=%d",
 				__func__, corrbytes);
 			if (corrbytes > BAS_HIGHFRAME - BAS_NORMFRAME)
@@ -1064,7 +1066,7 @@ static int submit_iso_write_urb(struct isow_urbctx_t *ucx)
 					"%s: buffer busy at frame %d",
 					__func__, nframe);
 				/* tasklet will be restarted from
-				   gigaset_send_skb() */
+				   gigaset_isoc_send_skb() */
 			} else {
 				dev_err(ucx->bcs->cs->dev,
 					"%s: buffer error %d at frame %d\n",
@@ -1284,7 +1286,8 @@ static void read_iso_tasklet(unsigned long data)
 	for (;;) {
 		/* retrieve URB */
 		spin_lock_irqsave(&ubc->isoinlock, flags);
-		if (!(urb = ubc->isoindone)) {
+		urb = ubc->isoindone;
+		if (!urb) {
 			spin_unlock_irqrestore(&ubc->isoinlock, flags);
 			return;
 		}
@@ -1371,7 +1374,7 @@ static void read_iso_tasklet(unsigned long data)
 				 "isochronous read: %d data bytes missing\n",
 				 totleft);
 
-	error:
+error:
 		/* URB processed, resubmit */
 		for (frame = 0; frame < BAS_NUMFRAMES; frame++) {
 			urb->iso_frame_desc[frame].status = 0;
@@ -1568,7 +1571,7 @@ static int req_submit(struct bc_state *bcs, int req, int val, int timeout)
 	ucs->dr_ctrl.wLength = 0;
 	usb_fill_control_urb(ucs->urb_ctrl, ucs->udev,
 			     usb_sndctrlpipe(ucs->udev, 0),
-			     (unsigned char*) &ucs->dr_ctrl, NULL, 0,
+			     (unsigned char *) &ucs->dr_ctrl, NULL, 0,
 			     write_ctrl_callback, ucs);
 	ucs->retry_ctrl = 0;
 	ret = usb_submit_urb(ucs->urb_ctrl, GFP_ATOMIC);
@@ -1621,7 +1624,8 @@ static int gigaset_init_bchannel(struct bc_state *bcs)
 		return -EHOSTUNREACH;
 	}
 
-	if ((ret = starturbs(bcs)) < 0) {
+	ret = starturbs(bcs);
+	if (ret < 0) {
 		dev_err(cs->dev,
 			"could not start isochronous I/O for channel B%d: %s\n",
 			bcs->channel + 1,
@@ -1633,7 +1637,8 @@ static int gigaset_init_bchannel(struct bc_state *bcs)
 	}
 
 	req = bcs->channel ? HD_OPEN_B2CHANNEL : HD_OPEN_B1CHANNEL;
-	if ((ret = req_submit(bcs, req, 0, BAS_TIMEOUT)) < 0) {
+	ret = req_submit(bcs, req, 0, BAS_TIMEOUT);
+	if (ret < 0) {
 		dev_err(cs->dev, "could not open channel B%d\n",
 			bcs->channel + 1);
 		stopurbs(bcs->hw.bas);
@@ -1677,7 +1682,8 @@ static int gigaset_close_bchannel(struct bc_state *bcs)
 
 	/* channel running: tell device to close it */
 	req = bcs->channel ? HD_CLOSE_B2CHANNEL : HD_CLOSE_B1CHANNEL;
-	if ((ret = req_submit(bcs, req, 0, BAS_TIMEOUT)) < 0)
+	ret = req_submit(bcs, req, 0, BAS_TIMEOUT);
+	if (ret < 0)
 		dev_err(cs->dev, "closing channel B%d failed\n",
 			bcs->channel + 1);
 
@@ -1703,10 +1709,12 @@ static void complete_cb(struct cardstate *cs)
 	gig_dbg(DEBUG_TRANSCMD|DEBUG_LOCKCMD,
 		"write_command: sent %u bytes, %u left",
 		cs->curlen, cs->cmdbytes);
-	if ((cs->cmdbuf = cb->next) != NULL) {
+	if (cb->next != NULL) {
+		cs->cmdbuf = cb->next;
 		cs->cmdbuf->prev = NULL;
 		cs->curlen = cs->cmdbuf->len;
 	} else {
+		cs->cmdbuf = NULL;
 		cs->lastcmdbuf = NULL;
 		cs->curlen = 0;
 	}
@@ -1833,7 +1841,7 @@ static int atwrite_submit(struct cardstate *cs, unsigned char *buf, int len)
 	ucs->dr_cmd_out.wLength = cpu_to_le16(len);
 	usb_fill_control_urb(ucs->urb_cmd_out, ucs->udev,
 			     usb_sndctrlpipe(ucs->udev, 0),
-			     (unsigned char*) &ucs->dr_cmd_out, buf, len,
+			     (unsigned char *) &ucs->dr_cmd_out, buf, len,
 			     write_command_callback, cs);
 	rc = usb_submit_urb(ucs->urb_cmd_out, GFP_ATOMIC);
 	if (unlikely(rc)) {
@@ -1953,7 +1961,8 @@ static int gigaset_write_cmd(struct cardstate *cs,
 
 	if (len > IF_WRITEBUF)
 		len = IF_WRITEBUF;
-	if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) {
+	cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC);
+	if (!cb) {
 		dev_err(cs->dev, "%s: out of memory\n", __func__);
 		rc = -ENOMEM;
 		goto notqueued;
@@ -2100,14 +2109,15 @@ static int gigaset_initbcshw(struct bc_state *bcs)
 	}
 	ubc->isooutdone = ubc->isooutfree = ubc->isooutovfl = NULL;
 	ubc->numsub = 0;
-	if (!(ubc->isooutbuf = kmalloc(sizeof(struct isowbuf_t), GFP_KERNEL))) {
+	ubc->isooutbuf = kmalloc(sizeof(struct isowbuf_t), GFP_KERNEL);
+	if (!ubc->isooutbuf) {
 		pr_err("out of memory\n");
 		kfree(ubc);
 		bcs->hw.bas = NULL;
 		return 0;
 	}
 	tasklet_init(&ubc->sent_tasklet,
-		     &write_iso_tasklet, (unsigned long) bcs);
+		     write_iso_tasklet, (unsigned long) bcs);
 
 	spin_lock_init(&ubc->isoinlock);
 	for (i = 0; i < BAS_INURBS; ++i)
@@ -2128,7 +2138,7 @@ static int gigaset_initbcshw(struct bc_state *bcs)
 	ubc->shared0s = 0;
 	ubc->stolen0s = 0;
 	tasklet_init(&ubc->rcvd_tasklet,
-		     &read_iso_tasklet, (unsigned long) bcs);
+		     read_iso_tasklet, (unsigned long) bcs);
 	return 1;
 }
 
@@ -2252,7 +2262,8 @@ static int gigaset_probe(struct usb_interface *interface,
 		gig_dbg(DEBUG_ANY,
 			"%s: wrong alternate setting %d - trying to switch",
 			__func__, hostif->desc.bAlternateSetting);
-		if (usb_set_interface(udev, hostif->desc.bInterfaceNumber, 3) < 0) {
+		if (usb_set_interface(udev, hostif->desc.bInterfaceNumber, 3)
+		    < 0) {
 			dev_warn(&udev->dev, "usb_set_interface failed, "
 				 "device %d interface %d altsetting %d\n",
 				 udev->devnum, hostif->desc.bInterfaceNumber,
@@ -2321,14 +2332,16 @@ static int gigaset_probe(struct usb_interface *interface,
 					(endpoint->bEndpointAddress) & 0x0f),
 			 ucs->int_in_buf, IP_MSGSIZE, read_int_callback, cs,
 			 endpoint->bInterval);
-	if ((rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL)) != 0) {
+	rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL);
+	if (rc != 0) {
 		dev_err(cs->dev, "could not submit interrupt URB: %s\n",
 			get_usb_rcmsg(rc));
 		goto error;
 	}
 
 	/* tell the device that the driver is ready */
-	if ((rc = req_submit(cs->bcs, HD_DEVICE_INIT_ACK, 0, 0)) != 0)
+	rc = req_submit(cs->bcs, HD_DEVICE_INIT_ACK, 0, 0);
+	if (rc != 0)
 		goto error;
 
 	/* tell common part that the device is ready */
@@ -2524,9 +2537,10 @@ static int __init bas_gigaset_init(void)
 	int result;
 
 	/* allocate memory for our driver state and intialize it */
-	if ((driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
-				       GIGASET_MODULENAME, GIGASET_DEVNAME,
-				       &gigops, THIS_MODULE)) == NULL)
+	driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
+				    GIGASET_MODULENAME, GIGASET_DEVNAME,
+				    &gigops, THIS_MODULE);
+	if (driver == NULL)
 		goto error;
 
 	/* register this driver with the USB subsystem */
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
new file mode 100644
index 000000000000..3f5cd06af104
--- /dev/null
+++ b/drivers/isdn/gigaset/capi.c
@@ -0,0 +1,2292 @@
+/*
+ * Kernel CAPI interface for the Gigaset driver
+ *
+ * Copyright (c) 2009 by Tilman Schmidt <tilman@imap.cc>.
+ *
+ * =====================================================================
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License as
+ *	published by the Free Software Foundation; either version 2 of
+ *	the License, or (at your option) any later version.
+ * =====================================================================
+ */
+
+#include "gigaset.h"
+#include <linux/ctype.h>
+#include <linux/isdn/capilli.h>
+#include <linux/isdn/capicmd.h>
+#include <linux/isdn/capiutil.h>
+
+/* missing from kernelcapi.h */
+#define CapiNcpiNotSupportedByProtocol	0x0001
+#define CapiFlagsNotSupportedByProtocol	0x0002
+#define CapiAlertAlreadySent		0x0003
+#define CapiFacilitySpecificFunctionNotSupported	0x3011
+
+/* missing from capicmd.h */
+#define CAPI_CONNECT_IND_BASELEN	(CAPI_MSG_BASELEN+4+2+8*1)
+#define CAPI_CONNECT_ACTIVE_IND_BASELEN	(CAPI_MSG_BASELEN+4+3*1)
+#define CAPI_CONNECT_B3_IND_BASELEN	(CAPI_MSG_BASELEN+4+1)
+#define CAPI_CONNECT_B3_ACTIVE_IND_BASELEN	(CAPI_MSG_BASELEN+4+1)
+#define CAPI_DATA_B3_REQ_LEN64		(CAPI_MSG_BASELEN+4+4+2+2+2+8)
+#define CAPI_DATA_B3_CONF_LEN		(CAPI_MSG_BASELEN+4+2+2)
+#define CAPI_DISCONNECT_IND_LEN		(CAPI_MSG_BASELEN+4+2)
+#define CAPI_DISCONNECT_B3_IND_BASELEN	(CAPI_MSG_BASELEN+4+2+1)
+#define CAPI_FACILITY_CONF_BASELEN	(CAPI_MSG_BASELEN+4+2+2+1)
+/* most _CONF messages contain only Controller/PLCI/NCCI and Info parameters */
+#define CAPI_STDCONF_LEN		(CAPI_MSG_BASELEN+4+2)
+
+#define CAPI_FACILITY_HANDSET	0x0000
+#define CAPI_FACILITY_DTMF	0x0001
+#define CAPI_FACILITY_V42BIS	0x0002
+#define CAPI_FACILITY_SUPPSVC	0x0003
+#define CAPI_FACILITY_WAKEUP	0x0004
+#define CAPI_FACILITY_LI	0x0005
+
+#define CAPI_SUPPSVC_GETSUPPORTED	0x0000
+
+/* missing from capiutil.h */
+#define CAPIMSG_PLCI_PART(m)	CAPIMSG_U8(m, 9)
+#define CAPIMSG_NCCI_PART(m)	CAPIMSG_U16(m, 10)
+#define CAPIMSG_HANDLE_REQ(m)	CAPIMSG_U16(m, 18) /* DATA_B3_REQ/_IND only! */
+#define CAPIMSG_FLAGS(m)	CAPIMSG_U16(m, 20)
+#define CAPIMSG_SETCONTROLLER(m, contr)	capimsg_setu8(m, 8, contr)
+#define CAPIMSG_SETPLCI_PART(m, plci)	capimsg_setu8(m, 9, plci)
+#define CAPIMSG_SETNCCI_PART(m, ncci)	capimsg_setu16(m, 10, ncci)
+#define CAPIMSG_SETFLAGS(m, flags)	capimsg_setu16(m, 20, flags)
+
+/* parameters with differing location in DATA_B3_CONF/_RESP: */
+#define CAPIMSG_SETHANDLE_CONF(m, handle)	capimsg_setu16(m, 12, handle)
+#define	CAPIMSG_SETINFO_CONF(m, info)		capimsg_setu16(m, 14, info)
+
+/* Flags (DATA_B3_REQ/_IND) */
+#define CAPI_FLAGS_DELIVERY_CONFIRMATION	0x04
+#define CAPI_FLAGS_RESERVED			(~0x1f)
+
+/* buffer sizes */
+#define MAX_BC_OCTETS 11
+#define MAX_HLC_OCTETS 3
+#define MAX_NUMBER_DIGITS 20
+#define MAX_FMT_IE_LEN 20
+
+/* values for gigaset_capi_appl.connected */
+#define APCONN_NONE	0	/* inactive/listening */
+#define APCONN_SETUP	1	/* connecting */
+#define APCONN_ACTIVE	2	/* B channel up */
+
+/* registered application data structure */
+struct gigaset_capi_appl {
+	struct list_head ctrlist;
+	struct gigaset_capi_appl *bcnext;
+	u16 id;
+	u16 nextMessageNumber;
+	u32 listenInfoMask;
+	u32 listenCIPmask;
+	int connected;
+};
+
+/* CAPI specific controller data structure */
+struct gigaset_capi_ctr {
+	struct capi_ctr ctr;
+	struct list_head appls;
+	struct sk_buff_head sendqueue;
+	atomic_t sendqlen;
+	/* two _cmsg structures possibly used concurrently: */
+	_cmsg hcmsg;	/* for message composition triggered from hardware */
+	_cmsg acmsg;	/* for dissection of messages sent from application */
+	u8 bc_buf[MAX_BC_OCTETS+1];
+	u8 hlc_buf[MAX_HLC_OCTETS+1];
+	u8 cgpty_buf[MAX_NUMBER_DIGITS+3];
+	u8 cdpty_buf[MAX_NUMBER_DIGITS+2];
+};
+
+/* CIP Value table (from CAPI 2.0 standard, ch. 6.1) */
+static struct {
+	u8 *bc;
+	u8 *hlc;
+} cip2bchlc[] = {
+	[1] = { "8090A3", NULL },
+		/* Speech (A-law) */
+	[2] = { "8890", NULL },
+		/* Unrestricted digital information */
+	[3] = { "8990", NULL },
+		/* Restricted digital information */
+	[4] = { "9090A3", NULL },
+		/* 3,1 kHz audio (A-law) */
+	[5] = { "9190", NULL },
+		/* 7 kHz audio */
+	[6] = { "9890", NULL },
+		/* Video */
+	[7] = { "88C0C6E6", NULL },
+		/* Packet mode */
+	[8] = { "8890218F", NULL },
+		/* 56 kbit/s rate adaptation */
+	[9] = { "9190A5", NULL },
+		/* Unrestricted digital information with tones/announcements */
+	[16] = { "8090A3", "9181" },
+		/* Telephony */
+	[17] = { "9090A3", "9184" },
+		/* Group 2/3 facsimile */
+	[18] = { "8890", "91A1" },
+		/* Group 4 facsimile Class 1 */
+	[19] = { "8890", "91A4" },
+		/* Teletex service basic and mixed mode
+		   and Group 4 facsimile service Classes II and III */
+	[20] = { "8890", "91A8" },
+		/* Teletex service basic and processable mode */
+	[21] = { "8890", "91B1" },
+		/* Teletex service basic mode */
+	[22] = { "8890", "91B2" },
+		/* International interworking for Videotex */
+	[23] = { "8890", "91B5" },
+		/* Telex */
+	[24] = { "8890", "91B8" },
+		/* Message Handling Systems in accordance with X.400 */
+	[25] = { "8890", "91C1" },
+		/* OSI application in accordance with X.200 */
+	[26] = { "9190A5", "9181" },
+		/* 7 kHz telephony */
+	[27] = { "9190A5", "916001" },
+		/* Video telephony, first connection */
+	[28] = { "8890", "916002" },
+		/* Video telephony, second connection */
+};
+
+/*
+ * helper functions
+ * ================
+ */
+
+/*
+ * emit unsupported parameter warning
+ */
+static inline void ignore_cstruct_param(struct cardstate *cs, _cstruct param,
+				       char *msgname, char *paramname)
+{
+	if (param && *param)
+		dev_warn(cs->dev, "%s: ignoring unsupported parameter: %s\n",
+			 msgname, paramname);
+}
+
+/*
+ * check for legal hex digit
+ */
+static inline int ishexdigit(char c)
+{
+	if (c >= '0' && c <= '9')
+		return 1;
+	if (c >= 'A' && c <= 'F')
+		return 1;
+	if (c >= 'a' && c <= 'f')
+		return 1;
+	return 0;
+}
+
+/*
+ * convert hex to binary
+ */
+static inline u8 hex2bin(char c)
+{
+	int result = c & 0x0f;
+	if (c & 0x40)
+		result += 9;
+	return result;
+}
+
+/*
+ * convert an IE from Gigaset hex string to ETSI binary representation
+ * including length byte
+ * return value: result length, -1 on error
+ */
+static int encode_ie(char *in, u8 *out, int maxlen)
+{
+	int l = 0;
+	while (*in) {
+		if (!ishexdigit(in[0]) || !ishexdigit(in[1]) || l >= maxlen)
+			return -1;
+		out[++l] = (hex2bin(in[0]) << 4) + hex2bin(in[1]);
+		in += 2;
+	}
+	out[0] = l;
+	return l;
+}
+
+/*
+ * convert an IE from ETSI binary representation including length byte
+ * to Gigaset hex string
+ */
+static void decode_ie(u8 *in, char *out)
+{
+	int i = *in;
+	while (i-- > 0) {
+		/* ToDo: conversion to upper case necessary? */
+		*out++ = toupper(hex_asc_hi(*++in));
+		*out++ = toupper(hex_asc_lo(*in));
+	}
+}
+
+/*
+ * retrieve application data structure for an application ID
+ */
+static inline struct gigaset_capi_appl *
+get_appl(struct gigaset_capi_ctr *iif, u16 appl)
+{
+	struct gigaset_capi_appl *ap;
+
+	list_for_each_entry(ap, &iif->appls, ctrlist)
+		if (ap->id == appl)
+			return ap;
+	return NULL;
+}
+
+/*
+ * dump CAPI message to kernel messages for debugging
+ */
+static inline void dump_cmsg(enum debuglevel level, const char *tag, _cmsg *p)
+{
+#ifdef CONFIG_GIGASET_DEBUG
+	_cdebbuf *cdb;
+
+	if (!(gigaset_debuglevel & level))
+		return;
+
+	cdb = capi_cmsg2str(p);
+	if (cdb) {
+		gig_dbg(level, "%s: [%d] %s", tag, p->ApplId, cdb->buf);
+		cdebbuf_free(cdb);
+	} else {
+		gig_dbg(level, "%s: [%d] %s", tag, p->ApplId,
+			capi_cmd2str(p->Command, p->Subcommand));
+	}
+#endif
+}
+
+static inline void dump_rawmsg(enum debuglevel level, const char *tag,
+			       unsigned char *data)
+{
+#ifdef CONFIG_GIGASET_DEBUG
+	char *dbgline;
+	int i, l;
+
+	if (!(gigaset_debuglevel & level))
+		return;
+
+	l = CAPIMSG_LEN(data);
+	if (l < 12) {
+		gig_dbg(level, "%s: ??? LEN=%04d", tag, l);
+		return;
+	}
+	gig_dbg(level, "%s: 0x%02x:0x%02x: ID=%03d #0x%04x LEN=%04d NCCI=0x%x",
+		tag, CAPIMSG_COMMAND(data), CAPIMSG_SUBCOMMAND(data),
+		CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l,
+		CAPIMSG_CONTROL(data));
+	l -= 12;
+	dbgline = kmalloc(3*l, GFP_ATOMIC);
+	if (!dbgline)
+		return;
+	for (i = 0; i < l; i++) {
+		dbgline[3*i] = hex_asc_hi(data[12+i]);
+		dbgline[3*i+1] = hex_asc_lo(data[12+i]);
+		dbgline[3*i+2] = ' ';
+	}
+	dbgline[3*l-1] = '\0';
+	gig_dbg(level, "  %s", dbgline);
+	kfree(dbgline);
+	if (CAPIMSG_COMMAND(data) == CAPI_DATA_B3 &&
+	    (CAPIMSG_SUBCOMMAND(data) == CAPI_REQ ||
+	     CAPIMSG_SUBCOMMAND(data) == CAPI_IND) &&
+	    CAPIMSG_DATALEN(data) > 0) {
+		l = CAPIMSG_DATALEN(data);
+		dbgline = kmalloc(3*l, GFP_ATOMIC);
+		if (!dbgline)
+			return;
+		data += CAPIMSG_LEN(data);
+		for (i = 0; i < l; i++) {
+			dbgline[3*i] = hex_asc_hi(data[i]);
+			dbgline[3*i+1] = hex_asc_lo(data[i]);
+			dbgline[3*i+2] = ' ';
+		}
+		dbgline[3*l-1] = '\0';
+		gig_dbg(level, "  %s", dbgline);
+		kfree(dbgline);
+	}
+#endif
+}
+
+/*
+ * format CAPI IE as string
+ */
+
+static const char *format_ie(const char *ie)
+{
+	static char result[3*MAX_FMT_IE_LEN];
+	int len, count;
+	char *pout = result;
+
+	if (!ie)
+		return "NULL";
+
+	count = len = ie[0];
+	if (count > MAX_FMT_IE_LEN)
+		count = MAX_FMT_IE_LEN-1;
+	while (count--) {
+		*pout++ = hex_asc_hi(*++ie);
+		*pout++ = hex_asc_lo(*ie);
+		*pout++ = ' ';
+	}
+	if (len > MAX_FMT_IE_LEN) {
+		*pout++ = '.';
+		*pout++ = '.';
+		*pout++ = '.';
+	}
+	*--pout = 0;
+	return result;
+}
+
+
+/*
+ * driver interface functions
+ * ==========================
+ */
+
+/**
+ * gigaset_skb_sent() - acknowledge transmission of outgoing skb
+ * @bcs:	B channel descriptor structure.
+ * @skb:	sent data.
+ *
+ * Called by hardware module {bas,ser,usb}_gigaset when the data in a
+ * skb has been successfully sent, for signalling completion to the LL.
+ */
+void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *dskb)
+{
+	struct cardstate *cs = bcs->cs;
+	struct gigaset_capi_ctr *iif = cs->iif;
+	struct gigaset_capi_appl *ap = bcs->ap;
+	unsigned char *req = skb_mac_header(dskb);
+	struct sk_buff *cskb;
+	u16 flags;
+
+	/* update statistics */
+	++bcs->trans_up;
+
+	if (!ap) {
+		dev_err(cs->dev, "%s: no application\n", __func__);
+		return;
+	}
+
+	/* don't send further B3 messages if disconnected */
+	if (ap->connected < APCONN_ACTIVE) {
+		gig_dbg(DEBUG_LLDATA, "disconnected, discarding ack");
+		return;
+	}
+
+	/* ToDo: honor unset "delivery confirmation" bit */
+	flags = CAPIMSG_FLAGS(req);
+
+	/* build DATA_B3_CONF message */
+	cskb = alloc_skb(CAPI_DATA_B3_CONF_LEN, GFP_ATOMIC);
+	if (!cskb) {
+		dev_err(cs->dev, "%s: out of memory\n", __func__);
+		return;
+	}
+	/* frequent message, avoid _cmsg overhead */
+	CAPIMSG_SETLEN(cskb->data, CAPI_DATA_B3_CONF_LEN);
+	CAPIMSG_SETAPPID(cskb->data, ap->id);
+	CAPIMSG_SETCOMMAND(cskb->data, CAPI_DATA_B3);
+	CAPIMSG_SETSUBCOMMAND(cskb->data,  CAPI_CONF);
+	CAPIMSG_SETMSGID(cskb->data, CAPIMSG_MSGID(req));
+	CAPIMSG_SETCONTROLLER(cskb->data, iif->ctr.cnr);
+	CAPIMSG_SETPLCI_PART(cskb->data, bcs->channel + 1);
+	CAPIMSG_SETNCCI_PART(cskb->data, 1);
+	CAPIMSG_SETHANDLE_CONF(cskb->data, CAPIMSG_HANDLE_REQ(req));
+	if (flags & ~CAPI_FLAGS_DELIVERY_CONFIRMATION)
+		CAPIMSG_SETINFO_CONF(cskb->data,
+				     CapiFlagsNotSupportedByProtocol);
+	else
+		CAPIMSG_SETINFO_CONF(cskb->data, CAPI_NOERROR);
+
+	/* emit message */
+	dump_rawmsg(DEBUG_LLDATA, "DATA_B3_CONF", cskb->data);
+	capi_ctr_handle_message(&iif->ctr, ap->id, cskb);
+}
+EXPORT_SYMBOL_GPL(gigaset_skb_sent);
+
+/**
+ * gigaset_skb_rcvd() - pass received skb to LL
+ * @bcs:	B channel descriptor structure.
+ * @skb:	received data.
+ *
+ * Called by hardware module {bas,ser,usb}_gigaset when user data has
+ * been successfully received, for passing to the LL.
+ * Warning: skb must not be accessed anymore!
+ */
+void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb)
+{
+	struct cardstate *cs = bcs->cs;
+	struct gigaset_capi_ctr *iif = cs->iif;
+	struct gigaset_capi_appl *ap = bcs->ap;
+	int len = skb->len;
+
+	/* update statistics */
+	bcs->trans_down++;
+
+	if (!ap) {
+		dev_err(cs->dev, "%s: no application\n", __func__);
+		return;
+	}
+
+	/* don't send further B3 messages if disconnected */
+	if (ap->connected < APCONN_ACTIVE) {
+		gig_dbg(DEBUG_LLDATA, "disconnected, discarding data");
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	/*
+	 * prepend DATA_B3_IND message to payload
+	 * Parameters: NCCI = 1, all others 0/unused
+	 * frequent message, avoid _cmsg overhead
+	 */
+	skb_push(skb, CAPI_DATA_B3_REQ_LEN);
+	CAPIMSG_SETLEN(skb->data, CAPI_DATA_B3_REQ_LEN);
+	CAPIMSG_SETAPPID(skb->data, ap->id);
+	CAPIMSG_SETCOMMAND(skb->data, CAPI_DATA_B3);
+	CAPIMSG_SETSUBCOMMAND(skb->data,  CAPI_IND);
+	CAPIMSG_SETMSGID(skb->data, ap->nextMessageNumber++);
+	CAPIMSG_SETCONTROLLER(skb->data, iif->ctr.cnr);
+	CAPIMSG_SETPLCI_PART(skb->data, bcs->channel + 1);
+	CAPIMSG_SETNCCI_PART(skb->data, 1);
+	/* Data parameter not used */
+	CAPIMSG_SETDATALEN(skb->data, len);
+	/* Data handle parameter not used */
+	CAPIMSG_SETFLAGS(skb->data, 0);
+	/* Data64 parameter not present */
+
+	/* emit message */
+	dump_rawmsg(DEBUG_LLDATA, "DATA_B3_IND", skb->data);
+	capi_ctr_handle_message(&iif->ctr, ap->id, skb);
+}
+EXPORT_SYMBOL_GPL(gigaset_skb_rcvd);
+
+/**
+ * gigaset_isdn_rcv_err() - signal receive error
+ * @bcs:	B channel descriptor structure.
+ *
+ * Called by hardware module {bas,ser,usb}_gigaset when a receive error
+ * has occurred, for signalling to the LL.
+ */
+void gigaset_isdn_rcv_err(struct bc_state *bcs)
+{
+	/* if currently ignoring packets, just count down */
+	if (bcs->ignore) {
+		bcs->ignore--;
+		return;
+	}
+
+	/* update statistics */
+	bcs->corrupted++;
+
+	/* ToDo: signal error -> LL */
+}
+EXPORT_SYMBOL_GPL(gigaset_isdn_rcv_err);
+
+/**
+ * gigaset_isdn_icall() - signal incoming call
+ * @at_state:	connection state structure.
+ *
+ * Called by main module at tasklet level to notify the LL that an incoming
+ * call has been received. @at_state contains the parameters of the call.
+ *
+ * Return value: call disposition (ICALL_*)
+ */
+int gigaset_isdn_icall(struct at_state_t *at_state)
+{
+	struct cardstate *cs = at_state->cs;
+	struct bc_state *bcs = at_state->bcs;
+	struct gigaset_capi_ctr *iif = cs->iif;
+	struct gigaset_capi_appl *ap;
+	u32 actCIPmask;
+	struct sk_buff *skb;
+	unsigned int msgsize;
+	int i;
+
+	/*
+	 * ToDo: signal calls without a free B channel, too
+	 * (requires a u8 handle for the at_state structure that can
+	 * be stored in the PLCI and used in the CONNECT_RESP message
+	 * handler to retrieve it)
+	 */
+	if (!bcs)
+		return ICALL_IGNORE;
+
+	/* prepare CONNECT_IND message, using B channel number as PLCI */
+	capi_cmsg_header(&iif->hcmsg, 0, CAPI_CONNECT, CAPI_IND, 0,
+			 iif->ctr.cnr | ((bcs->channel + 1) << 8));
+
+	/* minimum size, all structs empty */
+	msgsize = CAPI_CONNECT_IND_BASELEN;
+
+	/* Bearer Capability (mandatory) */
+	if (at_state->str_var[STR_ZBC]) {
+		/* pass on BC from Gigaset */
+		if (encode_ie(at_state->str_var[STR_ZBC], iif->bc_buf,
+			      MAX_BC_OCTETS) < 0) {
+			dev_warn(cs->dev, "RING ignored - bad BC %s\n",
+				 at_state->str_var[STR_ZBC]);
+			return ICALL_IGNORE;
+		}
+
+		/* look up corresponding CIP value */
+		iif->hcmsg.CIPValue = 0;	/* default if nothing found */
+		for (i = 0; i < ARRAY_SIZE(cip2bchlc); i++)
+			if (cip2bchlc[i].bc != NULL &&
+			    cip2bchlc[i].hlc == NULL &&
+			    !strcmp(cip2bchlc[i].bc,
+				    at_state->str_var[STR_ZBC])) {
+				iif->hcmsg.CIPValue = i;
+				break;
+			}
+	} else {
+		/* no BC (internal call): assume CIP 1 (speech, A-law) */
+		iif->hcmsg.CIPValue = 1;
+		encode_ie(cip2bchlc[1].bc, iif->bc_buf, MAX_BC_OCTETS);
+	}
+	iif->hcmsg.BC = iif->bc_buf;
+	msgsize += iif->hcmsg.BC[0];
+
+	/* High Layer Compatibility (optional) */
+	if (at_state->str_var[STR_ZHLC]) {
+		/* pass on HLC from Gigaset */
+		if (encode_ie(at_state->str_var[STR_ZHLC], iif->hlc_buf,
+			      MAX_HLC_OCTETS) < 0) {
+			dev_warn(cs->dev, "RING ignored - bad HLC %s\n",
+				 at_state->str_var[STR_ZHLC]);
+			return ICALL_IGNORE;
+		}
+		iif->hcmsg.HLC = iif->hlc_buf;
+		msgsize += iif->hcmsg.HLC[0];
+
+		/* look up corresponding CIP value */
+		/* keep BC based CIP value if none found */
+		if (at_state->str_var[STR_ZBC])
+			for (i = 0; i < ARRAY_SIZE(cip2bchlc); i++)
+				if (cip2bchlc[i].hlc != NULL &&
+				    !strcmp(cip2bchlc[i].hlc,
+					    at_state->str_var[STR_ZHLC]) &&
+				    !strcmp(cip2bchlc[i].bc,
+					    at_state->str_var[STR_ZBC])) {
+					iif->hcmsg.CIPValue = i;
+					break;
+				}
+	}
+
+	/* Called Party Number (optional) */
+	if (at_state->str_var[STR_ZCPN]) {
+		i = strlen(at_state->str_var[STR_ZCPN]);
+		if (i > MAX_NUMBER_DIGITS) {
+			dev_warn(cs->dev, "RING ignored - bad number %s\n",
+				 at_state->str_var[STR_ZBC]);
+			return ICALL_IGNORE;
+		}
+		iif->cdpty_buf[0] = i + 1;
+		iif->cdpty_buf[1] = 0x80; /* type / numbering plan unknown */
+		memcpy(iif->cdpty_buf+2, at_state->str_var[STR_ZCPN], i);
+		iif->hcmsg.CalledPartyNumber = iif->cdpty_buf;
+		msgsize += iif->hcmsg.CalledPartyNumber[0];
+	}
+
+	/* Calling Party Number (optional) */
+	if (at_state->str_var[STR_NMBR]) {
+		i = strlen(at_state->str_var[STR_NMBR]);
+		if (i > MAX_NUMBER_DIGITS) {
+			dev_warn(cs->dev, "RING ignored - bad number %s\n",
+				 at_state->str_var[STR_ZBC]);
+			return ICALL_IGNORE;
+		}
+		iif->cgpty_buf[0] = i + 2;
+		iif->cgpty_buf[1] = 0x00; /* type / numbering plan unknown */
+		iif->cgpty_buf[2] = 0x80; /* pres. allowed, not screened */
+		memcpy(iif->cgpty_buf+3, at_state->str_var[STR_NMBR], i);
+		iif->hcmsg.CallingPartyNumber = iif->cgpty_buf;
+		msgsize += iif->hcmsg.CallingPartyNumber[0];
+	}
+
+	/* remaining parameters (not supported, always left NULL):
+	 * - CalledPartySubaddress
+	 * - CallingPartySubaddress
+	 * - AdditionalInfo
+	 *   - BChannelinformation
+	 *   - Keypadfacility
+	 *   - Useruserdata
+	 *   - Facilitydataarray
+	 */
+
+	gig_dbg(DEBUG_CMD, "icall: PLCI %x CIP %d BC %s",
+		iif->hcmsg.adr.adrPLCI, iif->hcmsg.CIPValue,
+		format_ie(iif->hcmsg.BC));
+	gig_dbg(DEBUG_CMD, "icall: HLC %s",
+		format_ie(iif->hcmsg.HLC));
+	gig_dbg(DEBUG_CMD, "icall: CgPty %s",
+		format_ie(iif->hcmsg.CallingPartyNumber));
+	gig_dbg(DEBUG_CMD, "icall: CdPty %s",
+		format_ie(iif->hcmsg.CalledPartyNumber));
+
+	/* scan application list for matching listeners */
+	bcs->ap = NULL;
+	actCIPmask = 1 | (1 << iif->hcmsg.CIPValue);
+	list_for_each_entry(ap, &iif->appls, ctrlist)
+		if (actCIPmask & ap->listenCIPmask) {
+			/* build CONNECT_IND message for this application */
+			iif->hcmsg.ApplId = ap->id;
+			iif->hcmsg.Messagenumber = ap->nextMessageNumber++;
+
+			skb = alloc_skb(msgsize, GFP_ATOMIC);
+			if (!skb) {
+				dev_err(cs->dev, "%s: out of memory\n",
+					__func__);
+				break;
+			}
+			capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize));
+			dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
+
+			/* add to listeners on this B channel, update state */
+			ap->bcnext = bcs->ap;
+			bcs->ap = ap;
+			bcs->chstate |= CHS_NOTIFY_LL;
+			ap->connected = APCONN_SETUP;
+
+			/* emit message */
+			capi_ctr_handle_message(&iif->ctr, ap->id, skb);
+		}
+
+	/*
+	 * Return "accept" if any listeners.
+	 * Gigaset will send ALERTING.
+	 * There doesn't seem to be a way to avoid this.
+	 */
+	return bcs->ap ? ICALL_ACCEPT : ICALL_IGNORE;
+}
+
+/*
+ * send a DISCONNECT_IND message to an application
+ * does not sleep, clobbers the controller's hcmsg structure
+ */
+static void send_disconnect_ind(struct bc_state *bcs,
+				struct gigaset_capi_appl *ap, u16 reason)
+{
+	struct cardstate *cs = bcs->cs;
+	struct gigaset_capi_ctr *iif = cs->iif;
+	struct sk_buff *skb;
+
+	if (ap->connected == APCONN_NONE)
+		return;
+
+	capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_DISCONNECT, CAPI_IND,
+			 ap->nextMessageNumber++,
+			 iif->ctr.cnr | ((bcs->channel + 1) << 8));
+	iif->hcmsg.Reason = reason;
+	skb = alloc_skb(CAPI_DISCONNECT_IND_LEN, GFP_ATOMIC);
+	if (!skb) {
+		dev_err(cs->dev, "%s: out of memory\n", __func__);
+		return;
+	}
+	capi_cmsg2message(&iif->hcmsg, __skb_put(skb, CAPI_DISCONNECT_IND_LEN));
+	dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
+	ap->connected = APCONN_NONE;
+	capi_ctr_handle_message(&iif->ctr, ap->id, skb);
+}
+
+/*
+ * send a DISCONNECT_B3_IND message to an application
+ * Parameters: NCCI = 1, NCPI empty, Reason_B3 = 0
+ * does not sleep, clobbers the controller's hcmsg structure
+ */
+static void send_disconnect_b3_ind(struct bc_state *bcs,
+				   struct gigaset_capi_appl *ap)
+{
+	struct cardstate *cs = bcs->cs;
+	struct gigaset_capi_ctr *iif = cs->iif;
+	struct sk_buff *skb;
+
+	/* nothing to do if no logical connection active */
+	if (ap->connected < APCONN_ACTIVE)
+		return;
+	ap->connected = APCONN_SETUP;
+
+	capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_DISCONNECT_B3, CAPI_IND,
+			 ap->nextMessageNumber++,
+			 iif->ctr.cnr | ((bcs->channel + 1) << 8) | (1 << 16));
+	skb = alloc_skb(CAPI_DISCONNECT_B3_IND_BASELEN, GFP_ATOMIC);
+	if (!skb) {
+		dev_err(cs->dev, "%s: out of memory\n", __func__);
+		return;
+	}
+	capi_cmsg2message(&iif->hcmsg,
+			  __skb_put(skb, CAPI_DISCONNECT_B3_IND_BASELEN));
+	dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
+	capi_ctr_handle_message(&iif->ctr, ap->id, skb);
+}
+
+/**
+ * gigaset_isdn_connD() - signal D channel connect
+ * @bcs:	B channel descriptor structure.
+ *
+ * Called by main module at tasklet level to notify the LL that the D channel
+ * connection has been established.
+ */
+void gigaset_isdn_connD(struct bc_state *bcs)
+{
+	struct cardstate *cs = bcs->cs;
+	struct gigaset_capi_ctr *iif = cs->iif;
+	struct gigaset_capi_appl *ap = bcs->ap;
+	struct sk_buff *skb;
+	unsigned int msgsize;
+
+	if (!ap) {
+		dev_err(cs->dev, "%s: no application\n", __func__);
+		return;
+	}
+	while (ap->bcnext) {
+		/* this should never happen */
+		dev_warn(cs->dev, "%s: dropping extra application %u\n",
+			 __func__, ap->bcnext->id);
+		send_disconnect_ind(bcs, ap->bcnext,
+				    CapiCallGivenToOtherApplication);
+		ap->bcnext = ap->bcnext->bcnext;
+	}
+	if (ap->connected == APCONN_NONE) {
+		dev_warn(cs->dev, "%s: application %u not connected\n",
+			 __func__, ap->id);
+		return;
+	}
+
+	/* prepare CONNECT_ACTIVE_IND message
+	 * Note: LLC not supported by device
+	 */
+	capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_CONNECT_ACTIVE, CAPI_IND,
+			 ap->nextMessageNumber++,
+			 iif->ctr.cnr | ((bcs->channel + 1) << 8));
+
+	/* minimum size, all structs empty */
+	msgsize = CAPI_CONNECT_ACTIVE_IND_BASELEN;
+
+	/* ToDo: set parameter: Connected number
+	 * (requires ev-layer state machine extension to collect
+	 * ZCON device reply)
+	 */
+
+	/* build and emit CONNECT_ACTIVE_IND message */
+	skb = alloc_skb(msgsize, GFP_ATOMIC);
+	if (!skb) {
+		dev_err(cs->dev, "%s: out of memory\n", __func__);
+		return;
+	}
+	capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize));
+	dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
+	capi_ctr_handle_message(&iif->ctr, ap->id, skb);
+}
+
+/**
+ * gigaset_isdn_hupD() - signal D channel hangup
+ * @bcs:	B channel descriptor structure.
+ *
+ * Called by main module at tasklet level to notify the LL that the D channel
+ * connection has been shut down.
+ */
+void gigaset_isdn_hupD(struct bc_state *bcs)
+{
+	struct gigaset_capi_appl *ap;
+
+	/*
+	 * ToDo: pass on reason code reported by device
+	 * (requires ev-layer state machine extension to collect
+	 * ZCAU device reply)
+	 */
+	for (ap = bcs->ap; ap != NULL; ap = ap->bcnext) {
+		send_disconnect_b3_ind(bcs, ap);
+		send_disconnect_ind(bcs, ap, 0);
+	}
+	bcs->ap = NULL;
+}
+
+/**
+ * gigaset_isdn_connB() - signal B channel connect
+ * @bcs:	B channel descriptor structure.
+ *
+ * Called by main module at tasklet level to notify the LL that the B channel
+ * connection has been established.
+ */
+void gigaset_isdn_connB(struct bc_state *bcs)
+{
+	struct cardstate *cs = bcs->cs;
+	struct gigaset_capi_ctr *iif = cs->iif;
+	struct gigaset_capi_appl *ap = bcs->ap;
+	struct sk_buff *skb;
+	unsigned int msgsize;
+	u8 command;
+
+	if (!ap) {
+		dev_err(cs->dev, "%s: no application\n", __func__);
+		return;
+	}
+	while (ap->bcnext) {
+		/* this should never happen */
+		dev_warn(cs->dev, "%s: dropping extra application %u\n",
+			 __func__, ap->bcnext->id);
+		send_disconnect_ind(bcs, ap->bcnext,
+				    CapiCallGivenToOtherApplication);
+		ap->bcnext = ap->bcnext->bcnext;
+	}
+	if (!ap->connected) {
+		dev_warn(cs->dev, "%s: application %u not connected\n",
+			 __func__, ap->id);
+		return;
+	}
+
+	/*
+	 * emit CONNECT_B3_ACTIVE_IND if we already got CONNECT_B3_REQ;
+	 * otherwise we have to emit CONNECT_B3_IND first, and follow up with
+	 * CONNECT_B3_ACTIVE_IND in reply to CONNECT_B3_RESP
+	 * Parameters in both cases always: NCCI = 1, NCPI empty
+	 */
+	if (ap->connected >= APCONN_ACTIVE) {
+		command = CAPI_CONNECT_B3_ACTIVE;
+		msgsize = CAPI_CONNECT_B3_ACTIVE_IND_BASELEN;
+	} else {
+		command = CAPI_CONNECT_B3;
+		msgsize = CAPI_CONNECT_B3_IND_BASELEN;
+	}
+	capi_cmsg_header(&iif->hcmsg, ap->id, command, CAPI_IND,
+			 ap->nextMessageNumber++,
+			 iif->ctr.cnr | ((bcs->channel + 1) << 8) | (1 << 16));
+	skb = alloc_skb(msgsize, GFP_ATOMIC);
+	if (!skb) {
+		dev_err(cs->dev, "%s: out of memory\n", __func__);
+		return;
+	}
+	capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize));
+	dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
+	ap->connected = APCONN_ACTIVE;
+	capi_ctr_handle_message(&iif->ctr, ap->id, skb);
+}
+
+/**
+ * gigaset_isdn_hupB() - signal B channel hangup
+ * @bcs:	B channel descriptor structure.
+ *
+ * Called by main module to notify the LL that the B channel connection has
+ * been shut down.
+ */
+void gigaset_isdn_hupB(struct bc_state *bcs)
+{
+	struct cardstate *cs = bcs->cs;
+	struct gigaset_capi_appl *ap = bcs->ap;
+
+	/* ToDo: assure order of DISCONNECT_B3_IND and DISCONNECT_IND ? */
+
+	if (!ap) {
+		dev_err(cs->dev, "%s: no application\n", __func__);
+		return;
+	}
+
+	send_disconnect_b3_ind(bcs, ap);
+}
+
+/**
+ * gigaset_isdn_start() - signal device availability
+ * @cs:		device descriptor structure.
+ *
+ * Called by main module to notify the LL that the device is available for
+ * use.
+ */
+void gigaset_isdn_start(struct cardstate *cs)
+{
+	struct gigaset_capi_ctr *iif = cs->iif;
+
+	/* fill profile data: manufacturer name */
+	strcpy(iif->ctr.manu, "Siemens");
+	/* CAPI and device version */
+	iif->ctr.version.majorversion = 2;		/* CAPI 2.0 */
+	iif->ctr.version.minorversion = 0;
+	/* ToDo: check/assert cs->gotfwver? */
+	iif->ctr.version.majormanuversion = cs->fwver[0];
+	iif->ctr.version.minormanuversion = cs->fwver[1];
+	/* number of B channels supported */
+	iif->ctr.profile.nbchannel = cs->channels;
+	/* global options: internal controller, supplementary services */
+	iif->ctr.profile.goptions = 0x11;
+	/* B1 protocols: 64 kbit/s HDLC or transparent */
+	iif->ctr.profile.support1 =  0x03;
+	/* B2 protocols: transparent only */
+	/* ToDo: X.75 SLP ? */
+	iif->ctr.profile.support2 =  0x02;
+	/* B3 protocols: transparent only */
+	iif->ctr.profile.support3 =  0x01;
+	/* no serial number */
+	strcpy(iif->ctr.serial, "0");
+	capi_ctr_ready(&iif->ctr);
+}
+
+/**
+ * gigaset_isdn_stop() - signal device unavailability
+ * @cs:		device descriptor structure.
+ *
+ * Called by main module to notify the LL that the device is no longer
+ * available for use.
+ */
+void gigaset_isdn_stop(struct cardstate *cs)
+{
+	struct gigaset_capi_ctr *iif = cs->iif;
+	capi_ctr_down(&iif->ctr);
+}
+
+/*
+ * kernel CAPI callback methods
+ * ============================
+ */
+
+/*
+ * load firmware
+ */
+static int gigaset_load_firmware(struct capi_ctr *ctr, capiloaddata *data)
+{
+	struct cardstate *cs = ctr->driverdata;
+
+	/* AVM specific operation, not needed for Gigaset -- ignore */
+	dev_notice(cs->dev, "load_firmware ignored\n");
+
+	return 0;
+}
+
+/*
+ * reset (deactivate) controller
+ */
+static void gigaset_reset_ctr(struct capi_ctr *ctr)
+{
+	struct cardstate *cs = ctr->driverdata;
+
+	/* AVM specific operation, not needed for Gigaset -- ignore */
+	dev_notice(cs->dev, "reset_ctr ignored\n");
+}
+
+/*
+ * register CAPI application
+ */
+static void gigaset_register_appl(struct capi_ctr *ctr, u16 appl,
+			   capi_register_params *rp)
+{
+	struct gigaset_capi_ctr *iif
+		= container_of(ctr, struct gigaset_capi_ctr, ctr);
+	struct cardstate *cs = ctr->driverdata;
+	struct gigaset_capi_appl *ap;
+
+	list_for_each_entry(ap, &iif->appls, ctrlist)
+		if (ap->id == appl) {
+			dev_notice(cs->dev,
+				   "application %u already registered\n", appl);
+			return;
+		}
+
+	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
+	if (!ap) {
+		dev_err(cs->dev, "%s: out of memory\n", __func__);
+		return;
+	}
+	ap->id = appl;
+
+	list_add(&ap->ctrlist, &iif->appls);
+}
+
+/*
+ * release CAPI application
+ */
+static void gigaset_release_appl(struct capi_ctr *ctr, u16 appl)
+{
+	struct gigaset_capi_ctr *iif
+		= container_of(ctr, struct gigaset_capi_ctr, ctr);
+	struct cardstate *cs = iif->ctr.driverdata;
+	struct gigaset_capi_appl *ap, *tmp;
+
+	list_for_each_entry_safe(ap, tmp, &iif->appls, ctrlist)
+		if (ap->id == appl) {
+			if (ap->connected != APCONN_NONE) {
+				dev_err(cs->dev,
+					"%s: application %u still connected\n",
+					__func__, ap->id);
+				/* ToDo: clear active connection */
+			}
+			list_del(&ap->ctrlist);
+			kfree(ap);
+		}
+
+}
+
+/*
+ * =====================================================================
+ * outgoing CAPI message handler
+ * =====================================================================
+ */
+
+/*
+ * helper function: emit reply message with given Info value
+ */
+static void send_conf(struct gigaset_capi_ctr *iif,
+		      struct gigaset_capi_appl *ap,
+		      struct sk_buff *skb,
+		      u16 info)
+{
+	/*
+	 * _CONF replies always only have NCCI and Info parameters
+	 * so they'll fit into the _REQ message skb
+	 */
+	capi_cmsg_answer(&iif->acmsg);
+	iif->acmsg.Info = info;
+	capi_cmsg2message(&iif->acmsg, skb->data);
+	__skb_trim(skb, CAPI_STDCONF_LEN);
+	dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
+	capi_ctr_handle_message(&iif->ctr, ap->id, skb);
+}
+
+/*
+ * process FACILITY_REQ message
+ */
+static void do_facility_req(struct gigaset_capi_ctr *iif,
+			    struct gigaset_capi_appl *ap,
+			    struct sk_buff *skb)
+{
+	struct cardstate *cs = iif->ctr.driverdata;
+	_cmsg *cmsg = &iif->acmsg;
+	struct sk_buff *cskb;
+	u8 *pparam;
+	unsigned int msgsize = CAPI_FACILITY_CONF_BASELEN;
+	u16 function, info;
+	static u8 confparam[10];	/* max. 9 octets + length byte */
+
+	/* decode message */
+	capi_message2cmsg(cmsg, skb->data);
+	dump_cmsg(DEBUG_CMD, __func__, cmsg);
+
+	/*
+	 * Facility Request Parameter is not decoded by capi_message2cmsg()
+	 * encoding depends on Facility Selector
+	 */
+	switch (cmsg->FacilitySelector) {
+	case CAPI_FACILITY_DTMF:	/* ToDo */
+		info = CapiFacilityNotSupported;
+		confparam[0] = 2;	/* length */
+		/* DTMF information: Unknown DTMF request */
+		capimsg_setu16(confparam, 1, 2);
+		break;
+
+	case CAPI_FACILITY_V42BIS:	/* not supported */
+		info = CapiFacilityNotSupported;
+		confparam[0] = 2;	/* length */
+		/* V.42 bis information: not available */
+		capimsg_setu16(confparam, 1, 1);
+		break;
+
+	case CAPI_FACILITY_SUPPSVC:
+		/* decode Function parameter */
+		pparam = cmsg->FacilityRequestParameter;
+		if (pparam == NULL || *pparam < 2) {
+			dev_notice(cs->dev, "%s: %s missing\n", "FACILITY_REQ",
+				   "Facility Request Parameter");
+			send_conf(iif, ap, skb, CapiIllMessageParmCoding);
+			return;
+		}
+		function = CAPIMSG_U16(pparam, 1);
+		switch (function) {
+		case CAPI_SUPPSVC_GETSUPPORTED:
+			info = CapiSuccess;
+			/* Supplementary Service specific parameter */
+			confparam[3] = 6;	/* length */
+			/* Supplementary services info: Success */
+			capimsg_setu16(confparam, 4, CapiSuccess);
+			/* Supported Services: none */
+			capimsg_setu32(confparam, 6, 0);
+			break;
+		/* ToDo: add supported services */
+		default:
+			info = CapiFacilitySpecificFunctionNotSupported;
+			/* Supplementary Service specific parameter */
+			confparam[3] = 2;	/* length */
+			/* Supplementary services info: not supported */
+			capimsg_setu16(confparam, 4,
+				       CapiSupplementaryServiceNotSupported);
+		}
+
+		/* Facility confirmation parameter */
+		confparam[0] = confparam[3] + 3;	/* total length */
+		/* Function: copy from _REQ message */
+		capimsg_setu16(confparam, 1, function);
+		/* Supplementary Service specific parameter already set above */
+		break;
+
+	case CAPI_FACILITY_WAKEUP:	/* ToDo */
+		info = CapiFacilityNotSupported;
+		confparam[0] = 2;	/* length */
+		/* Number of accepted awake request parameters: 0 */
+		capimsg_setu16(confparam, 1, 0);
+		break;
+
+	default:
+		info = CapiFacilityNotSupported;
+		confparam[0] = 0;	/* empty struct */
+	}
+
+	/* send FACILITY_CONF with given Info and confirmation parameter */
+	capi_cmsg_answer(cmsg);
+	cmsg->Info = info;
+	cmsg->FacilityConfirmationParameter = confparam;
+	msgsize += confparam[0];	/* length */
+	cskb = alloc_skb(msgsize, GFP_ATOMIC);
+	if (!cskb) {
+		dev_err(cs->dev, "%s: out of memory\n", __func__);
+		return;
+	}
+	capi_cmsg2message(cmsg, __skb_put(cskb, msgsize));
+	dump_cmsg(DEBUG_CMD, __func__, cmsg);
+	capi_ctr_handle_message(&iif->ctr, ap->id, cskb);
+}
+
+
+/*
+ * process LISTEN_REQ message
+ * just store the masks in the application data structure
+ */
+static void do_listen_req(struct gigaset_capi_ctr *iif,
+			  struct gigaset_capi_appl *ap,
+			  struct sk_buff *skb)
+{
+	/* decode message */
+	capi_message2cmsg(&iif->acmsg, skb->data);
+	dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
+
+	/* store listening parameters */
+	ap->listenInfoMask = iif->acmsg.InfoMask;
+	ap->listenCIPmask = iif->acmsg.CIPmask;
+	send_conf(iif, ap, skb, CapiSuccess);
+}
+
+/*
+ * process ALERT_REQ message
+ * nothing to do, Gigaset always alerts anyway
+ */
+static void do_alert_req(struct gigaset_capi_ctr *iif,
+			 struct gigaset_capi_appl *ap,
+			 struct sk_buff *skb)
+{
+	/* decode message */
+	capi_message2cmsg(&iif->acmsg, skb->data);
+	dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
+	send_conf(iif, ap, skb, CapiAlertAlreadySent);
+}
+
+/*
+ * process CONNECT_REQ message
+ * allocate a B channel, prepare dial commands, queue a DIAL event,
+ * emit CONNECT_CONF reply
+ */
+static void do_connect_req(struct gigaset_capi_ctr *iif,
+			   struct gigaset_capi_appl *ap,
+			   struct sk_buff *skb)
+{
+	struct cardstate *cs = iif->ctr.driverdata;
+	_cmsg *cmsg = &iif->acmsg;
+	struct bc_state *bcs;
+	char **commands;
+	char *s;
+	u8 *pp;
+	int i, l;
+	u16 info;
+
+	/* decode message */
+	capi_message2cmsg(cmsg, skb->data);
+	dump_cmsg(DEBUG_CMD, __func__, cmsg);
+
+	/* get free B channel & construct PLCI */
+	bcs = gigaset_get_free_channel(cs);
+	if (!bcs) {
+		dev_notice(cs->dev, "%s: no B channel available\n",
+			   "CONNECT_REQ");
+		send_conf(iif, ap, skb, CapiNoPlciAvailable);
+		return;
+	}
+	ap->bcnext = NULL;
+	bcs->ap = ap;
+	cmsg->adr.adrPLCI |= (bcs->channel + 1) << 8;
+
+	/* build command table */
+	commands = kzalloc(AT_NUM*(sizeof *commands), GFP_KERNEL);
+	if (!commands)
+		goto oom;
+
+	/* encode parameter: Called party number */
+	pp = cmsg->CalledPartyNumber;
+	if (pp == NULL || *pp == 0) {
+		dev_notice(cs->dev, "%s: %s missing\n",
+			   "CONNECT_REQ", "Called party number");
+		info = CapiIllMessageParmCoding;
+		goto error;
+	}
+	l = *pp++;
+	/* check type of number/numbering plan byte */
+	switch (*pp) {
+	case 0x80:	/* unknown type / unknown numbering plan */
+	case 0x81:	/* unknown type / ISDN/Telephony numbering plan */
+		break;
+	default:	/* others: warn about potential misinterpretation */
+		dev_notice(cs->dev, "%s: %s type/plan 0x%02x unsupported\n",
+			   "CONNECT_REQ", "Called party number", *pp);
+	}
+	pp++;
+	l--;
+	/* translate "**" internal call prefix to CTP value */
+	if (l >= 2 && pp[0] == '*' && pp[1] == '*') {
+		s = "^SCTP=0\r";
+		pp += 2;
+		l -= 2;
+	} else {
+		s = "^SCTP=1\r";
+	}
+	commands[AT_TYPE] = kstrdup(s, GFP_KERNEL);
+	if (!commands[AT_TYPE])
+		goto oom;
+	commands[AT_DIAL] = kmalloc(l+3, GFP_KERNEL);
+	if (!commands[AT_DIAL])
+		goto oom;
+	snprintf(commands[AT_DIAL], l+3, "D%.*s\r", l, pp);
+
+	/* encode parameter: Calling party number */
+	pp = cmsg->CallingPartyNumber;
+	if (pp != NULL && *pp > 0) {
+		l = *pp++;
+
+		/* check type of number/numbering plan byte */
+		/* ToDo: allow for/handle Ext=1? */
+		switch (*pp) {
+		case 0x00:	/* unknown type / unknown numbering plan */
+		case 0x01:	/* unknown type / ISDN/Telephony num. plan */
+			break;
+		default:
+			dev_notice(cs->dev,
+				   "%s: %s type/plan 0x%02x unsupported\n",
+				   "CONNECT_REQ", "Calling party number", *pp);
+		}
+		pp++;
+		l--;
+
+		/* check presentation indicator */
+		if (!l) {
+			dev_notice(cs->dev, "%s: %s IE truncated\n",
+				   "CONNECT_REQ", "Calling party number");
+			info = CapiIllMessageParmCoding;
+			goto error;
+		}
+		switch (*pp & 0xfc) { /* ignore Screening indicator */
+		case 0x80:	/* Presentation allowed */
+			s = "^SCLIP=1\r";
+			break;
+		case 0xa0:	/* Presentation restricted */
+			s = "^SCLIP=0\r";
+			break;
+		default:
+			dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
+				   "CONNECT_REQ",
+				   "Presentation/Screening indicator",
+				   *pp);
+			s = "^SCLIP=1\r";
+		}
+		commands[AT_CLIP] = kstrdup(s, GFP_KERNEL);
+		if (!commands[AT_CLIP])
+			goto oom;
+		pp++;
+		l--;
+
+		if (l) {
+			/* number */
+			commands[AT_MSN] = kmalloc(l+8, GFP_KERNEL);
+			if (!commands[AT_MSN])
+				goto oom;
+			snprintf(commands[AT_MSN], l+8, "^SMSN=%*s\r", l, pp);
+		}
+	}
+
+	/* check parameter: CIP Value */
+	if (cmsg->CIPValue > ARRAY_SIZE(cip2bchlc) ||
+	    (cmsg->CIPValue > 0 && cip2bchlc[cmsg->CIPValue].bc == NULL)) {
+		dev_notice(cs->dev, "%s: unknown CIP value %d\n",
+			   "CONNECT_REQ", cmsg->CIPValue);
+		info = CapiCipValueUnknown;
+		goto error;
+	}
+
+	/* check/encode parameter: BC */
+	if (cmsg->BC && cmsg->BC[0]) {
+		/* explicit BC overrides CIP */
+		l = 2*cmsg->BC[0] + 7;
+		commands[AT_BC] = kmalloc(l, GFP_KERNEL);
+		if (!commands[AT_BC])
+			goto oom;
+		strcpy(commands[AT_BC], "^SBC=");
+		decode_ie(cmsg->BC, commands[AT_BC]+5);
+		strcpy(commands[AT_BC] + l - 2, "\r");
+	} else if (cip2bchlc[cmsg->CIPValue].bc) {
+		l = strlen(cip2bchlc[cmsg->CIPValue].bc) + 7;
+		commands[AT_BC] = kmalloc(l, GFP_KERNEL);
+		if (!commands[AT_BC])
+			goto oom;
+		snprintf(commands[AT_BC], l, "^SBC=%s\r",
+			 cip2bchlc[cmsg->CIPValue].bc);
+	}
+
+	/* check/encode parameter: HLC */
+	if (cmsg->HLC && cmsg->HLC[0]) {
+		/* explicit HLC overrides CIP */
+		l = 2*cmsg->HLC[0] + 7;
+		commands[AT_HLC] = kmalloc(l, GFP_KERNEL);
+		if (!commands[AT_HLC])
+			goto oom;
+		strcpy(commands[AT_HLC], "^SHLC=");
+		decode_ie(cmsg->HLC, commands[AT_HLC]+5);
+		strcpy(commands[AT_HLC] + l - 2, "\r");
+	} else if (cip2bchlc[cmsg->CIPValue].hlc) {
+		l = strlen(cip2bchlc[cmsg->CIPValue].hlc) + 7;
+		commands[AT_HLC] = kmalloc(l, GFP_KERNEL);
+		if (!commands[AT_HLC])
+			goto oom;
+		snprintf(commands[AT_HLC], l, "^SHLC=%s\r",
+			 cip2bchlc[cmsg->CIPValue].hlc);
+	}
+
+	/* check/encode parameter: B Protocol */
+	if (cmsg->BProtocol == CAPI_DEFAULT) {
+		bcs->proto2 = L2_HDLC;
+		dev_warn(cs->dev,
+		    "B2 Protocol X.75 SLP unsupported, using Transparent\n");
+	} else {
+		switch (cmsg->B1protocol) {
+		case 0:
+			bcs->proto2 = L2_HDLC;
+			break;
+		case 1:
+			bcs->proto2 = L2_BITSYNC;
+			break;
+		default:
+			dev_warn(cs->dev,
+			    "B1 Protocol %u unsupported, using Transparent\n",
+				 cmsg->B1protocol);
+			bcs->proto2 = L2_BITSYNC;
+		}
+		if (cmsg->B2protocol != 1)
+			dev_warn(cs->dev,
+			    "B2 Protocol %u unsupported, using Transparent\n",
+				 cmsg->B2protocol);
+		if (cmsg->B3protocol != 0)
+			dev_warn(cs->dev,
+			    "B3 Protocol %u unsupported, using Transparent\n",
+				 cmsg->B3protocol);
+		ignore_cstruct_param(cs, cmsg->B1configuration,
+					"CONNECT_REQ", "B1 Configuration");
+		ignore_cstruct_param(cs, cmsg->B2configuration,
+					"CONNECT_REQ", "B2 Configuration");
+		ignore_cstruct_param(cs, cmsg->B3configuration,
+					"CONNECT_REQ", "B3 Configuration");
+	}
+	commands[AT_PROTO] = kmalloc(9, GFP_KERNEL);
+	if (!commands[AT_PROTO])
+		goto oom;
+	snprintf(commands[AT_PROTO], 9, "^SBPR=%u\r", bcs->proto2);
+
+	/* ToDo: check/encode remaining parameters */
+	ignore_cstruct_param(cs, cmsg->CalledPartySubaddress,
+					"CONNECT_REQ", "Called pty subaddr");
+	ignore_cstruct_param(cs, cmsg->CallingPartySubaddress,
+					"CONNECT_REQ", "Calling pty subaddr");
+	ignore_cstruct_param(cs, cmsg->LLC,
+					"CONNECT_REQ", "LLC");
+	if (cmsg->AdditionalInfo != CAPI_DEFAULT) {
+		ignore_cstruct_param(cs, cmsg->BChannelinformation,
+					"CONNECT_REQ", "B Channel Information");
+		ignore_cstruct_param(cs, cmsg->Keypadfacility,
+					"CONNECT_REQ", "Keypad Facility");
+		ignore_cstruct_param(cs, cmsg->Useruserdata,
+					"CONNECT_REQ", "User-User Data");
+		ignore_cstruct_param(cs, cmsg->Facilitydataarray,
+					"CONNECT_REQ", "Facility Data Array");
+	}
+
+	/* encode parameter: B channel to use */
+	commands[AT_ISO] = kmalloc(9, GFP_KERNEL);
+	if (!commands[AT_ISO])
+		goto oom;
+	snprintf(commands[AT_ISO], 9, "^SISO=%u\r",
+		 (unsigned) bcs->channel + 1);
+
+	/* queue & schedule EV_DIAL event */
+	if (!gigaset_add_event(cs, &bcs->at_state, EV_DIAL, commands,
+			       bcs->at_state.seq_index, NULL))
+		goto oom;
+	gig_dbg(DEBUG_CMD, "scheduling DIAL");
+	gigaset_schedule_event(cs);
+	ap->connected = APCONN_SETUP;
+	send_conf(iif, ap, skb, CapiSuccess);
+	return;
+
+oom:
+	dev_err(cs->dev, "%s: out of memory\n", __func__);
+	info = CAPI_MSGOSRESOURCEERR;
+error:
+	if (commands)
+		for (i = 0; i < AT_NUM; i++)
+			kfree(commands[i]);
+	kfree(commands);
+	gigaset_free_channel(bcs);
+	send_conf(iif, ap, skb, info);
+}
+
+/*
+ * process CONNECT_RESP message
+ * checks protocol parameters and queues an ACCEPT or HUP event
+ */
+static void do_connect_resp(struct gigaset_capi_ctr *iif,
+			    struct gigaset_capi_appl *ap,
+			    struct sk_buff *skb)
+{
+	struct cardstate *cs = iif->ctr.driverdata;
+	_cmsg *cmsg = &iif->acmsg;
+	struct bc_state *bcs;
+	struct gigaset_capi_appl *oap;
+	int channel;
+
+	/* decode message */
+	capi_message2cmsg(cmsg, skb->data);
+	dump_cmsg(DEBUG_CMD, __func__, cmsg);
+	dev_kfree_skb_any(skb);
+
+	/* extract and check channel number from PLCI */
+	channel = (cmsg->adr.adrPLCI >> 8) & 0xff;
+	if (!channel || channel > cs->channels) {
+		dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
+			   "CONNECT_RESP", "PLCI", cmsg->adr.adrPLCI);
+		return;
+	}
+	bcs = cs->bcs + channel - 1;
+
+	switch (cmsg->Reject) {
+	case 0:		/* Accept */
+		/* drop all competing applications, keep only this one */
+		for (oap = bcs->ap; oap != NULL; oap = oap->bcnext)
+			if (oap != ap)
+				send_disconnect_ind(bcs, oap,
+					CapiCallGivenToOtherApplication);
+		ap->bcnext = NULL;
+		bcs->ap = ap;
+		bcs->chstate |= CHS_NOTIFY_LL;
+
+		/* check/encode B channel protocol */
+		if (cmsg->BProtocol == CAPI_DEFAULT) {
+			bcs->proto2 = L2_HDLC;
+			dev_warn(cs->dev,
+		"B2 Protocol X.75 SLP unsupported, using Transparent\n");
+		} else {
+			switch (cmsg->B1protocol) {
+			case 0:
+				bcs->proto2 = L2_HDLC;
+				break;
+			case 1:
+				bcs->proto2 = L2_BITSYNC;
+				break;
+			default:
+				dev_warn(cs->dev,
+			"B1 Protocol %u unsupported, using Transparent\n",
+					 cmsg->B1protocol);
+				bcs->proto2 = L2_BITSYNC;
+			}
+			if (cmsg->B2protocol != 1)
+				dev_warn(cs->dev,
+			"B2 Protocol %u unsupported, using Transparent\n",
+					 cmsg->B2protocol);
+			if (cmsg->B3protocol != 0)
+				dev_warn(cs->dev,
+			"B3 Protocol %u unsupported, using Transparent\n",
+					 cmsg->B3protocol);
+			ignore_cstruct_param(cs, cmsg->B1configuration,
+					"CONNECT_RESP", "B1 Configuration");
+			ignore_cstruct_param(cs, cmsg->B2configuration,
+					"CONNECT_RESP", "B2 Configuration");
+			ignore_cstruct_param(cs, cmsg->B3configuration,
+					"CONNECT_RESP", "B3 Configuration");
+		}
+
+		/* ToDo: check/encode remaining parameters */
+		ignore_cstruct_param(cs, cmsg->ConnectedNumber,
+					"CONNECT_RESP", "Connected Number");
+		ignore_cstruct_param(cs, cmsg->ConnectedSubaddress,
+					"CONNECT_RESP", "Connected Subaddress");
+		ignore_cstruct_param(cs, cmsg->LLC,
+					"CONNECT_RESP", "LLC");
+		if (cmsg->AdditionalInfo != CAPI_DEFAULT) {
+			ignore_cstruct_param(cs, cmsg->BChannelinformation,
+					"CONNECT_RESP", "BChannel Information");
+			ignore_cstruct_param(cs, cmsg->Keypadfacility,
+					"CONNECT_RESP", "Keypad Facility");
+			ignore_cstruct_param(cs, cmsg->Useruserdata,
+					"CONNECT_RESP", "User-User Data");
+			ignore_cstruct_param(cs, cmsg->Facilitydataarray,
+					"CONNECT_RESP", "Facility Data Array");
+		}
+
+		/* Accept call */
+		if (!gigaset_add_event(cs, &cs->bcs[channel-1].at_state,
+				       EV_ACCEPT, NULL, 0, NULL))
+			return;
+		gig_dbg(DEBUG_CMD, "scheduling ACCEPT");
+		gigaset_schedule_event(cs);
+		return;
+
+	case 1:			/* Ignore */
+		/* send DISCONNECT_IND to this application */
+		send_disconnect_ind(bcs, ap, 0);
+
+		/* remove it from the list of listening apps */
+		if (bcs->ap == ap) {
+			bcs->ap = ap->bcnext;
+			if (bcs->ap == NULL)
+				/* last one: stop ev-layer hupD notifications */
+				bcs->chstate &= ~CHS_NOTIFY_LL;
+			return;
+		}
+		for (oap = bcs->ap; oap != NULL; oap = oap->bcnext) {
+			if (oap->bcnext == ap) {
+				oap->bcnext = oap->bcnext->bcnext;
+				return;
+			}
+		}
+		dev_err(cs->dev, "%s: application %u not found\n",
+			__func__, ap->id);
+		return;
+
+	default:		/* Reject */
+		/* drop all competing applications, keep only this one */
+		for (oap = bcs->ap; oap != NULL; oap = oap->bcnext)
+			if (oap != ap)
+				send_disconnect_ind(bcs, oap,
+					CapiCallGivenToOtherApplication);
+		ap->bcnext = NULL;
+		bcs->ap = ap;
+
+		/* reject call - will trigger DISCONNECT_IND for this app */
+		dev_info(cs->dev, "%s: Reject=%x\n",
+			 "CONNECT_RESP", cmsg->Reject);
+		if (!gigaset_add_event(cs, &cs->bcs[channel-1].at_state,
+				       EV_HUP, NULL, 0, NULL))
+			return;
+		gig_dbg(DEBUG_CMD, "scheduling HUP");
+		gigaset_schedule_event(cs);
+		return;
+	}
+}
+
+/*
+ * process CONNECT_B3_REQ message
+ * build NCCI and emit CONNECT_B3_CONF reply
+ */
+static void do_connect_b3_req(struct gigaset_capi_ctr *iif,
+			      struct gigaset_capi_appl *ap,
+			      struct sk_buff *skb)
+{
+	struct cardstate *cs = iif->ctr.driverdata;
+	_cmsg *cmsg = &iif->acmsg;
+	int channel;
+
+	/* decode message */
+	capi_message2cmsg(cmsg, skb->data);
+	dump_cmsg(DEBUG_CMD, __func__, cmsg);
+
+	/* extract and check channel number from PLCI */
+	channel = (cmsg->adr.adrPLCI >> 8) & 0xff;
+	if (!channel || channel > cs->channels) {
+		dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
+			   "CONNECT_B3_REQ", "PLCI", cmsg->adr.adrPLCI);
+		send_conf(iif, ap, skb, CapiIllContrPlciNcci);
+		return;
+	}
+
+	/* mark logical connection active */
+	ap->connected = APCONN_ACTIVE;
+
+	/* build NCCI: always 1 (one B3 connection only) */
+	cmsg->adr.adrNCCI |= 1 << 16;
+
+	/* NCPI parameter: not applicable for B3 Transparent */
+	ignore_cstruct_param(cs, cmsg->NCPI, "CONNECT_B3_REQ", "NCPI");
+	send_conf(iif, ap, skb, (cmsg->NCPI && cmsg->NCPI[0]) ?
+				CapiNcpiNotSupportedByProtocol : CapiSuccess);
+}
+
+/*
+ * process CONNECT_B3_RESP message
+ * Depending on the Reject parameter, either emit CONNECT_B3_ACTIVE_IND
+ * or queue EV_HUP and emit DISCONNECT_B3_IND.
+ * The emitted message is always shorter than the received one,
+ * allowing to reuse the skb.
+ */
+static void do_connect_b3_resp(struct gigaset_capi_ctr *iif,
+			       struct gigaset_capi_appl *ap,
+			       struct sk_buff *skb)
+{
+	struct cardstate *cs = iif->ctr.driverdata;
+	_cmsg *cmsg = &iif->acmsg;
+	struct bc_state *bcs;
+	int channel;
+	unsigned int msgsize;
+	u8 command;
+
+	/* decode message */
+	capi_message2cmsg(cmsg, skb->data);
+	dump_cmsg(DEBUG_CMD, __func__, cmsg);
+
+	/* extract and check channel number and NCCI */
+	channel = (cmsg->adr.adrNCCI >> 8) & 0xff;
+	if (!channel || channel > cs->channels ||
+	    ((cmsg->adr.adrNCCI >> 16) & 0xffff) != 1) {
+		dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
+			   "CONNECT_B3_RESP", "NCCI", cmsg->adr.adrNCCI);
+		dev_kfree_skb_any(skb);
+		return;
+	}
+	bcs = &cs->bcs[channel-1];
+
+	if (cmsg->Reject) {
+		/* Reject: clear B3 connect received flag */
+		ap->connected = APCONN_SETUP;
+
+		/* trigger hangup, causing eventual DISCONNECT_IND */
+		if (!gigaset_add_event(cs, &bcs->at_state,
+				       EV_HUP, NULL, 0, NULL)) {
+			dev_err(cs->dev, "%s: out of memory\n", __func__);
+			dev_kfree_skb_any(skb);
+			return;
+		}
+		gig_dbg(DEBUG_CMD, "scheduling HUP");
+		gigaset_schedule_event(cs);
+
+		/* emit DISCONNECT_B3_IND */
+		command = CAPI_DISCONNECT_B3;
+		msgsize = CAPI_DISCONNECT_B3_IND_BASELEN;
+	} else {
+		/*
+		 * Accept: emit CONNECT_B3_ACTIVE_IND immediately, as
+		 * we only send CONNECT_B3_IND if the B channel is up
+		 */
+		command = CAPI_CONNECT_B3_ACTIVE;
+		msgsize = CAPI_CONNECT_B3_ACTIVE_IND_BASELEN;
+	}
+	capi_cmsg_header(cmsg, ap->id, command, CAPI_IND,
+			 ap->nextMessageNumber++, cmsg->adr.adrNCCI);
+	__skb_trim(skb, msgsize);
+	capi_cmsg2message(cmsg, skb->data);
+	dump_cmsg(DEBUG_CMD, __func__, cmsg);
+	capi_ctr_handle_message(&iif->ctr, ap->id, skb);
+}
+
+/*
+ * process DISCONNECT_REQ message
+ * schedule EV_HUP and emit DISCONNECT_B3_IND if necessary,
+ * emit DISCONNECT_CONF reply
+ */
+static void do_disconnect_req(struct gigaset_capi_ctr *iif,
+			      struct gigaset_capi_appl *ap,
+			      struct sk_buff *skb)
+{
+	struct cardstate *cs = iif->ctr.driverdata;
+	_cmsg *cmsg = &iif->acmsg;
+	struct bc_state *bcs;
+	_cmsg *b3cmsg;
+	struct sk_buff *b3skb;
+	int channel;
+
+	/* decode message */
+	capi_message2cmsg(cmsg, skb->data);
+	dump_cmsg(DEBUG_CMD, __func__, cmsg);
+
+	/* extract and check channel number from PLCI */
+	channel = (cmsg->adr.adrPLCI >> 8) & 0xff;
+	if (!channel || channel > cs->channels) {
+		dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
+			   "DISCONNECT_REQ", "PLCI", cmsg->adr.adrPLCI);
+		send_conf(iif, ap, skb, CapiIllContrPlciNcci);
+		return;
+	}
+	bcs = cs->bcs + channel - 1;
+
+	/* ToDo: process parameter: Additional info */
+	if (cmsg->AdditionalInfo != CAPI_DEFAULT) {
+		ignore_cstruct_param(cs, cmsg->BChannelinformation,
+				     "DISCONNECT_REQ", "B Channel Information");
+		ignore_cstruct_param(cs, cmsg->Keypadfacility,
+				     "DISCONNECT_REQ", "Keypad Facility");
+		ignore_cstruct_param(cs, cmsg->Useruserdata,
+				     "DISCONNECT_REQ", "User-User Data");
+		ignore_cstruct_param(cs, cmsg->Facilitydataarray,
+				     "DISCONNECT_REQ", "Facility Data Array");
+	}
+
+	/* skip if DISCONNECT_IND already sent */
+	if (!ap->connected)
+		return;
+
+	/* check for active logical connection */
+	if (ap->connected >= APCONN_ACTIVE) {
+		/*
+		 * emit DISCONNECT_B3_IND with cause 0x3301
+		 * use separate cmsg structure, as the content of iif->acmsg
+		 * is still needed for creating the _CONF message
+		 */
+		b3cmsg = kmalloc(sizeof(*b3cmsg), GFP_KERNEL);
+		if (!b3cmsg) {
+			dev_err(cs->dev, "%s: out of memory\n", __func__);
+			send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
+			return;
+		}
+		capi_cmsg_header(b3cmsg, ap->id, CAPI_DISCONNECT_B3, CAPI_IND,
+				 ap->nextMessageNumber++,
+				 cmsg->adr.adrPLCI | (1 << 16));
+		b3cmsg->Reason_B3 = CapiProtocolErrorLayer1;
+		b3skb = alloc_skb(CAPI_DISCONNECT_B3_IND_BASELEN, GFP_KERNEL);
+		if (b3skb == NULL) {
+			dev_err(cs->dev, "%s: out of memory\n", __func__);
+			send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
+			return;
+		}
+		capi_cmsg2message(b3cmsg,
+			__skb_put(b3skb, CAPI_DISCONNECT_B3_IND_BASELEN));
+		kfree(b3cmsg);
+		capi_ctr_handle_message(&iif->ctr, ap->id, b3skb);
+	}
+
+	/* trigger hangup, causing eventual DISCONNECT_IND */
+	if (!gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL)) {
+		dev_err(cs->dev, "%s: out of memory\n", __func__);
+		send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
+		return;
+	}
+	gig_dbg(DEBUG_CMD, "scheduling HUP");
+	gigaset_schedule_event(cs);
+
+	/* emit reply */
+	send_conf(iif, ap, skb, CapiSuccess);
+}
+
+/*
+ * process DISCONNECT_B3_REQ message
+ * schedule EV_HUP and emit DISCONNECT_B3_CONF reply
+ */
+static void do_disconnect_b3_req(struct gigaset_capi_ctr *iif,
+				 struct gigaset_capi_appl *ap,
+				 struct sk_buff *skb)
+{
+	struct cardstate *cs = iif->ctr.driverdata;
+	_cmsg *cmsg = &iif->acmsg;
+	int channel;
+
+	/* decode message */
+	capi_message2cmsg(cmsg, skb->data);
+	dump_cmsg(DEBUG_CMD, __func__, cmsg);
+
+	/* extract and check channel number and NCCI */
+	channel = (cmsg->adr.adrNCCI >> 8) & 0xff;
+	if (!channel || channel > cs->channels ||
+	    ((cmsg->adr.adrNCCI >> 16) & 0xffff) != 1) {
+		dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
+			   "DISCONNECT_B3_REQ", "NCCI", cmsg->adr.adrNCCI);
+		send_conf(iif, ap, skb, CapiIllContrPlciNcci);
+		return;
+	}
+
+	/* reject if logical connection not active */
+	if (ap->connected < APCONN_ACTIVE) {
+		send_conf(iif, ap, skb,
+			  CapiMessageNotSupportedInCurrentState);
+		return;
+	}
+
+	/* trigger hangup, causing eventual DISCONNECT_B3_IND */
+	if (!gigaset_add_event(cs, &cs->bcs[channel-1].at_state,
+			       EV_HUP, NULL, 0, NULL)) {
+		dev_err(cs->dev, "%s: out of memory\n", __func__);
+		send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
+		return;
+	}
+	gig_dbg(DEBUG_CMD, "scheduling HUP");
+	gigaset_schedule_event(cs);
+
+	/* NCPI parameter: not applicable for B3 Transparent */
+	ignore_cstruct_param(cs, cmsg->NCPI,
+				"DISCONNECT_B3_REQ", "NCPI");
+	send_conf(iif, ap, skb, (cmsg->NCPI && cmsg->NCPI[0]) ?
+				CapiNcpiNotSupportedByProtocol : CapiSuccess);
+}
+
+/*
+ * process DATA_B3_REQ message
+ */
+static void do_data_b3_req(struct gigaset_capi_ctr *iif,
+			   struct gigaset_capi_appl *ap,
+			   struct sk_buff *skb)
+{
+	struct cardstate *cs = iif->ctr.driverdata;
+	int channel = CAPIMSG_PLCI_PART(skb->data);
+	u16 ncci = CAPIMSG_NCCI_PART(skb->data);
+	u16 msglen = CAPIMSG_LEN(skb->data);
+	u16 datalen = CAPIMSG_DATALEN(skb->data);
+	u16 flags = CAPIMSG_FLAGS(skb->data);
+
+	/* frequent message, avoid _cmsg overhead */
+	dump_rawmsg(DEBUG_LLDATA, "DATA_B3_REQ", skb->data);
+
+	gig_dbg(DEBUG_LLDATA,
+		"Receiving data from LL (ch: %d, flg: %x, sz: %d|%d)",
+		channel, flags, msglen, datalen);
+
+	/* check parameters */
+	if (channel == 0 || channel > cs->channels || ncci != 1) {
+		dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
+			   "DATA_B3_REQ", "NCCI", CAPIMSG_NCCI(skb->data));
+		send_conf(iif, ap, skb, CapiIllContrPlciNcci);
+		return;
+	}
+	if (msglen != CAPI_DATA_B3_REQ_LEN && msglen != CAPI_DATA_B3_REQ_LEN64)
+		dev_notice(cs->dev, "%s: unexpected length %d\n",
+			   "DATA_B3_REQ", msglen);
+	if (msglen + datalen != skb->len)
+		dev_notice(cs->dev, "%s: length mismatch (%d+%d!=%d)\n",
+			   "DATA_B3_REQ", msglen, datalen, skb->len);
+	if (msglen + datalen > skb->len) {
+		/* message too short for announced data length */
+		send_conf(iif, ap, skb, CapiIllMessageParmCoding); /* ? */
+		return;
+	}
+	if (flags & CAPI_FLAGS_RESERVED) {
+		dev_notice(cs->dev, "%s: reserved flags set (%x)\n",
+			   "DATA_B3_REQ", flags);
+		send_conf(iif, ap, skb, CapiIllMessageParmCoding);
+		return;
+	}
+
+	/* reject if logical connection not active */
+	if (ap->connected < APCONN_ACTIVE) {
+		send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState);
+		return;
+	}
+
+	/* pull CAPI message into link layer header */
+	skb_reset_mac_header(skb);
+	skb->mac_len = msglen;
+	skb_pull(skb, msglen);
+
+	/* pass to device-specific module */
+	if (cs->ops->send_skb(&cs->bcs[channel-1], skb) < 0) {
+		send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
+		return;
+	}
+
+	/* DATA_B3_CONF reply will be sent by gigaset_skb_sent() */
+
+	/*
+	 * ToDo: honor unset "delivery confirmation" bit
+	 * (send DATA_B3_CONF immediately?)
+	 */
+}
+
+/*
+ * process RESET_B3_REQ message
+ * just always reply "not supported by current protocol"
+ */
+static void do_reset_b3_req(struct gigaset_capi_ctr *iif,
+			    struct gigaset_capi_appl *ap,
+			    struct sk_buff *skb)
+{
+	/* decode message */
+	capi_message2cmsg(&iif->acmsg, skb->data);
+	dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
+	send_conf(iif, ap, skb,
+		  CapiResetProcedureNotSupportedByCurrentProtocol);
+}
+
+/*
+ * dump unsupported/ignored messages at most twice per minute,
+ * some apps send those very frequently
+ */
+static unsigned long ignored_msg_dump_time;
+
+/*
+ * unsupported CAPI message handler
+ */
+static void do_unsupported(struct gigaset_capi_ctr *iif,
+			   struct gigaset_capi_appl *ap,
+			   struct sk_buff *skb)
+{
+	/* decode message */
+	capi_message2cmsg(&iif->acmsg, skb->data);
+	if (printk_timed_ratelimit(&ignored_msg_dump_time, 30 * 1000))
+		dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
+	send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState);
+}
+
+/*
+ * CAPI message handler: no-op
+ */
+static void do_nothing(struct gigaset_capi_ctr *iif,
+		       struct gigaset_capi_appl *ap,
+		       struct sk_buff *skb)
+{
+	if (printk_timed_ratelimit(&ignored_msg_dump_time, 30 * 1000)) {
+		/* decode message */
+		capi_message2cmsg(&iif->acmsg, skb->data);
+		dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
+	}
+	dev_kfree_skb_any(skb);
+}
+
+static void do_data_b3_resp(struct gigaset_capi_ctr *iif,
+			    struct gigaset_capi_appl *ap,
+			    struct sk_buff *skb)
+{
+	dump_rawmsg(DEBUG_LLDATA, __func__, skb->data);
+	dev_kfree_skb_any(skb);
+}
+
+/* table of outgoing CAPI message handlers with lookup function */
+typedef void (*capi_send_handler_t)(struct gigaset_capi_ctr *,
+				    struct gigaset_capi_appl *,
+				    struct sk_buff *);
+
+static struct {
+	u16 cmd;
+	capi_send_handler_t handler;
+} capi_send_handler_table[] = {
+	/* most frequent messages first for faster lookup */
+	{ CAPI_DATA_B3_REQ, do_data_b3_req },
+	{ CAPI_DATA_B3_RESP, do_data_b3_resp },
+
+	{ CAPI_ALERT_REQ, do_alert_req },
+	{ CAPI_CONNECT_ACTIVE_RESP, do_nothing },
+	{ CAPI_CONNECT_B3_ACTIVE_RESP, do_nothing },
+	{ CAPI_CONNECT_B3_REQ, do_connect_b3_req },
+	{ CAPI_CONNECT_B3_RESP, do_connect_b3_resp },
+	{ CAPI_CONNECT_B3_T90_ACTIVE_RESP, do_nothing },
+	{ CAPI_CONNECT_REQ, do_connect_req },
+	{ CAPI_CONNECT_RESP, do_connect_resp },
+	{ CAPI_DISCONNECT_B3_REQ, do_disconnect_b3_req },
+	{ CAPI_DISCONNECT_B3_RESP, do_nothing },
+	{ CAPI_DISCONNECT_REQ, do_disconnect_req },
+	{ CAPI_DISCONNECT_RESP, do_nothing },
+	{ CAPI_FACILITY_REQ, do_facility_req },
+	{ CAPI_FACILITY_RESP, do_nothing },
+	{ CAPI_LISTEN_REQ, do_listen_req },
+	{ CAPI_SELECT_B_PROTOCOL_REQ, do_unsupported },
+	{ CAPI_RESET_B3_REQ, do_reset_b3_req },
+	{ CAPI_RESET_B3_RESP, do_nothing },
+
+	/*
+	 * ToDo: support overlap sending (requires ev-layer state
+	 * machine extension to generate additional ATD commands)
+	 */
+	{ CAPI_INFO_REQ, do_unsupported },
+	{ CAPI_INFO_RESP, do_nothing },
+
+	/*
+	 * ToDo: what's the proper response for these?
+	 */
+	{ CAPI_MANUFACTURER_REQ, do_nothing },
+	{ CAPI_MANUFACTURER_RESP, do_nothing },
+};
+
+/* look up handler */
+static inline capi_send_handler_t lookup_capi_send_handler(const u16 cmd)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(capi_send_handler_table); i++)
+		if (capi_send_handler_table[i].cmd == cmd)
+			return capi_send_handler_table[i].handler;
+	return NULL;
+}
+
+
+/**
+ * gigaset_send_message() - accept a CAPI message from an application
+ * @ctr:	controller descriptor structure.
+ * @skb:	CAPI message.
+ *
+ * Return value: CAPI error code
+ * Note: capidrv (and probably others, too) only uses the return value to
+ * decide whether it has to free the skb (only if result != CAPI_NOERROR (0))
+ */
+static u16 gigaset_send_message(struct capi_ctr *ctr, struct sk_buff *skb)
+{
+	struct gigaset_capi_ctr *iif
+		= container_of(ctr, struct gigaset_capi_ctr, ctr);
+	struct cardstate *cs = ctr->driverdata;
+	struct gigaset_capi_appl *ap;
+	capi_send_handler_t handler;
+
+	/* can only handle linear sk_buffs */
+	if (skb_linearize(skb) < 0) {
+		dev_warn(cs->dev, "%s: skb_linearize failed\n", __func__);
+		return CAPI_MSGOSRESOURCEERR;
+	}
+
+	/* retrieve application data structure */
+	ap = get_appl(iif, CAPIMSG_APPID(skb->data));
+	if (!ap) {
+		dev_notice(cs->dev, "%s: application %u not registered\n",
+			   __func__, CAPIMSG_APPID(skb->data));
+		return CAPI_ILLAPPNR;
+	}
+
+	/* look up command */
+	handler = lookup_capi_send_handler(CAPIMSG_CMD(skb->data));
+	if (!handler) {
+		/* unknown/unsupported message type */
+		if (printk_ratelimit())
+			dev_notice(cs->dev, "%s: unsupported message %u\n",
+				   __func__, CAPIMSG_CMD(skb->data));
+		return CAPI_ILLCMDORSUBCMDORMSGTOSMALL;
+	}
+
+	/* serialize */
+	if (atomic_add_return(1, &iif->sendqlen) > 1) {
+		/* queue behind other messages */
+		skb_queue_tail(&iif->sendqueue, skb);
+		return CAPI_NOERROR;
+	}
+
+	/* process message */
+	handler(iif, ap, skb);
+
+	/* process other messages arrived in the meantime */
+	while (atomic_sub_return(1, &iif->sendqlen) > 0) {
+		skb = skb_dequeue(&iif->sendqueue);
+		if (!skb) {
+			/* should never happen */
+			dev_err(cs->dev, "%s: send queue empty\n", __func__);
+			continue;
+		}
+		ap = get_appl(iif, CAPIMSG_APPID(skb->data));
+		if (!ap) {
+			/* could that happen? */
+			dev_warn(cs->dev, "%s: application %u vanished\n",
+				 __func__, CAPIMSG_APPID(skb->data));
+			continue;
+		}
+		handler = lookup_capi_send_handler(CAPIMSG_CMD(skb->data));
+		if (!handler) {
+			/* should never happen */
+			dev_err(cs->dev, "%s: handler %x vanished\n",
+				__func__, CAPIMSG_CMD(skb->data));
+			continue;
+		}
+		handler(iif, ap, skb);
+	}
+
+	return CAPI_NOERROR;
+}
+
+/**
+ * gigaset_procinfo() - build single line description for controller
+ * @ctr:	controller descriptor structure.
+ *
+ * Return value: pointer to generated string (null terminated)
+ */
+static char *gigaset_procinfo(struct capi_ctr *ctr)
+{
+	return ctr->name;	/* ToDo: more? */
+}
+
+/**
+ * gigaset_ctr_read_proc() - build controller proc file entry
+ * @page:	buffer of PAGE_SIZE bytes for receiving the entry.
+ * @start:	unused.
+ * @off:	unused.
+ * @count:	unused.
+ * @eof:	unused.
+ * @ctr:	controller descriptor structure.
+ *
+ * Return value: length of generated entry
+ */
+static int gigaset_ctr_read_proc(char *page, char **start, off_t off,
+			  int count, int *eof, struct capi_ctr *ctr)
+{
+	struct cardstate *cs = ctr->driverdata;
+	char *s;
+	int i;
+	int len = 0;
+	len += sprintf(page+len, "%-16s %s\n", "name", ctr->name);
+	len += sprintf(page+len, "%-16s %s %s\n", "dev",
+			dev_driver_string(cs->dev), dev_name(cs->dev));
+	len += sprintf(page+len, "%-16s %d\n", "id", cs->myid);
+	if (cs->gotfwver)
+		len += sprintf(page+len, "%-16s %d.%d.%d.%d\n", "firmware",
+			cs->fwver[0], cs->fwver[1], cs->fwver[2], cs->fwver[3]);
+	len += sprintf(page+len, "%-16s %d\n", "channels",
+			cs->channels);
+	len += sprintf(page+len, "%-16s %s\n", "onechannel",
+			cs->onechannel ? "yes" : "no");
+
+	switch (cs->mode) {
+	case M_UNKNOWN:
+		s = "unknown";
+		break;
+	case M_CONFIG:
+		s = "config";
+		break;
+	case M_UNIMODEM:
+		s = "Unimodem";
+		break;
+	case M_CID:
+		s = "CID";
+		break;
+	default:
+		s = "??";
+	}
+	len += sprintf(page+len, "%-16s %s\n", "mode", s);
+
+	switch (cs->mstate) {
+	case MS_UNINITIALIZED:
+		s = "uninitialized";
+		break;
+	case MS_INIT:
+		s = "init";
+		break;
+	case MS_LOCKED:
+		s = "locked";
+		break;
+	case MS_SHUTDOWN:
+		s = "shutdown";
+		break;
+	case MS_RECOVER:
+		s = "recover";
+		break;
+	case MS_READY:
+		s = "ready";
+		break;
+	default:
+		s = "??";
+	}
+	len += sprintf(page+len, "%-16s %s\n", "mstate", s);
+
+	len += sprintf(page+len, "%-16s %s\n", "running",
+			cs->running ? "yes" : "no");
+	len += sprintf(page+len, "%-16s %s\n", "connected",
+			cs->connected ? "yes" : "no");
+	len += sprintf(page+len, "%-16s %s\n", "isdn_up",
+			cs->isdn_up ? "yes" : "no");
+	len += sprintf(page+len, "%-16s %s\n", "cidmode",
+			cs->cidmode ? "yes" : "no");
+
+	for (i = 0; i < cs->channels; i++) {
+		len += sprintf(page+len, "[%d]%-13s %d\n", i, "corrupted",
+				cs->bcs[i].corrupted);
+		len += sprintf(page+len, "[%d]%-13s %d\n", i, "trans_down",
+				cs->bcs[i].trans_down);
+		len += sprintf(page+len, "[%d]%-13s %d\n", i, "trans_up",
+				cs->bcs[i].trans_up);
+		len += sprintf(page+len, "[%d]%-13s %d\n", i, "chstate",
+				cs->bcs[i].chstate);
+		switch (cs->bcs[i].proto2) {
+		case L2_BITSYNC:
+			s = "bitsync";
+			break;
+		case L2_HDLC:
+			s = "HDLC";
+			break;
+		case L2_VOICE:
+			s = "voice";
+			break;
+		default:
+			s = "??";
+		}
+		len += sprintf(page+len, "[%d]%-13s %s\n", i, "proto2", s);
+	}
+	return len;
+}
+
+
+static struct capi_driver capi_driver_gigaset = {
+	.name		= "gigaset",
+	.revision	= "1.0",
+};
+
+/**
+ * gigaset_isdn_register() - register to LL
+ * @cs:		device descriptor structure.
+ * @isdnid:	device name.
+ *
+ * Called by main module to register the device with the LL.
+ *
+ * Return value: 1 for success, 0 for failure
+ */
+int gigaset_isdn_register(struct cardstate *cs, const char *isdnid)
+{
+	struct gigaset_capi_ctr *iif;
+	int rc;
+
+	pr_info("Kernel CAPI interface\n");
+
+	iif = kmalloc(sizeof(*iif), GFP_KERNEL);
+	if (!iif) {
+		pr_err("%s: out of memory\n", __func__);
+		return 0;
+	}
+
+	/* register driver with CAPI (ToDo: what for?) */
+	register_capi_driver(&capi_driver_gigaset);
+
+	/* prepare controller structure */
+	iif->ctr.owner         = THIS_MODULE;
+	iif->ctr.driverdata    = cs;
+	strncpy(iif->ctr.name, isdnid, sizeof(iif->ctr.name));
+	iif->ctr.driver_name   = "gigaset";
+	iif->ctr.load_firmware = gigaset_load_firmware;
+	iif->ctr.reset_ctr     = gigaset_reset_ctr;
+	iif->ctr.register_appl = gigaset_register_appl;
+	iif->ctr.release_appl  = gigaset_release_appl;
+	iif->ctr.send_message  = gigaset_send_message;
+	iif->ctr.procinfo      = gigaset_procinfo;
+	iif->ctr.ctr_read_proc = gigaset_ctr_read_proc;
+	INIT_LIST_HEAD(&iif->appls);
+	skb_queue_head_init(&iif->sendqueue);
+	atomic_set(&iif->sendqlen, 0);
+
+	/* register controller with CAPI */
+	rc = attach_capi_ctr(&iif->ctr);
+	if (rc) {
+		pr_err("attach_capi_ctr failed (%d)\n", rc);
+		unregister_capi_driver(&capi_driver_gigaset);
+		kfree(iif);
+		return 0;
+	}
+
+	cs->iif = iif;
+	cs->hw_hdr_len = CAPI_DATA_B3_REQ_LEN;
+	return 1;
+}
+
+/**
+ * gigaset_isdn_unregister() - unregister from LL
+ * @cs:		device descriptor structure.
+ *
+ * Called by main module to unregister the device from the LL.
+ */
+void gigaset_isdn_unregister(struct cardstate *cs)
+{
+	struct gigaset_capi_ctr *iif = cs->iif;
+
+	detach_capi_ctr(&iif->ctr);
+	kfree(iif);
+	cs->iif = NULL;
+	unregister_capi_driver(&capi_driver_gigaset);
+}
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index 33dcd8d72b7c..82ed1cd14ff5 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -108,7 +108,7 @@ int gigaset_enterconfigmode(struct cardstate *cs)
 {
 	int i, r;
 
-	cs->control_state = TIOCM_RTS; //FIXME
+	cs->control_state = TIOCM_RTS;
 
 	r = setflags(cs, TIOCM_DTR, 200);
 	if (r < 0)
@@ -132,10 +132,10 @@ int gigaset_enterconfigmode(struct cardstate *cs)
 
 error:
 	dev_err(cs->dev, "error %d on setuartbits\n", -r);
-	cs->control_state = TIOCM_RTS|TIOCM_DTR; // FIXME is this a good value?
+	cs->control_state = TIOCM_RTS|TIOCM_DTR;
 	cs->ops->set_modem_ctrl(cs, 0, TIOCM_RTS|TIOCM_DTR);
 
-	return -1; //r
+	return -1;
 }
 
 static int test_timeout(struct at_state_t *at_state)
@@ -150,10 +150,9 @@ static int test_timeout(struct at_state_t *at_state)
 	}
 
 	if (!gigaset_add_event(at_state->cs, at_state, EV_TIMEOUT, NULL,
-			       at_state->timer_index, NULL)) {
-		//FIXME what should we do?
-	}
-
+			       at_state->timer_index, NULL))
+			dev_err(at_state->cs->dev, "%s: out of memory\n",
+				__func__);
 	return 1;
 }
 
@@ -207,6 +206,32 @@ int gigaset_get_channel(struct bc_state *bcs)
 	return 1;
 }
 
+struct bc_state *gigaset_get_free_channel(struct cardstate *cs)
+{
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&cs->lock, flags);
+	if (!try_module_get(cs->driver->owner)) {
+		gig_dbg(DEBUG_ANY,
+			"could not get module for allocating channel");
+		spin_unlock_irqrestore(&cs->lock, flags);
+		return NULL;
+	}
+	for (i = 0; i < cs->channels; ++i)
+		if (!cs->bcs[i].use_count) {
+			++cs->bcs[i].use_count;
+			cs->bcs[i].busy = 1;
+			spin_unlock_irqrestore(&cs->lock, flags);
+			gig_dbg(DEBUG_ANY, "allocated channel %d", i);
+			return cs->bcs + i;
+		}
+	module_put(cs->driver->owner);
+	spin_unlock_irqrestore(&cs->lock, flags);
+	gig_dbg(DEBUG_ANY, "no free channel");
+	return NULL;
+}
+
 void gigaset_free_channel(struct bc_state *bcs)
 {
 	unsigned long flags;
@@ -367,16 +392,15 @@ static void gigaset_freebcs(struct bc_state *bcs)
 	int i;
 
 	gig_dbg(DEBUG_INIT, "freeing bcs[%d]->hw", bcs->channel);
-	if (!bcs->cs->ops->freebcshw(bcs)) {
+	if (!bcs->cs->ops->freebcshw(bcs))
 		gig_dbg(DEBUG_INIT, "failed");
-	}
 
 	gig_dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel);
 	clear_at_state(&bcs->at_state);
 	gig_dbg(DEBUG_INIT, "freeing bcs[%d]->skb", bcs->channel);
+	dev_kfree_skb(bcs->skb);
+	bcs->skb = NULL;
 
-	if (bcs->skb)
-		dev_kfree_skb(bcs->skb);
 	for (i = 0; i < AT_NUM; ++i) {
 		kfree(bcs->commands[i]);
 		bcs->commands[i] = NULL;
@@ -463,6 +487,12 @@ void gigaset_freecs(struct cardstate *cs)
 
 	switch (cs->cs_init) {
 	default:
+		/* clear B channel structures */
+		for (i = 0; i < cs->channels; ++i) {
+			gig_dbg(DEBUG_INIT, "clearing bcs[%d]", i);
+			gigaset_freebcs(cs->bcs + i);
+		}
+
 		/* clear device sysfs */
 		gigaset_free_dev_sysfs(cs);
 
@@ -471,28 +501,20 @@ void gigaset_freecs(struct cardstate *cs)
 		gig_dbg(DEBUG_INIT, "clearing hw");
 		cs->ops->freecshw(cs);
 
-		//FIXME cmdbuf
-
 		/* fall through */
 	case 2: /* error in initcshw */
 		/* Deregister from LL */
 		make_invalid(cs, VALID_ID);
-		gig_dbg(DEBUG_INIT, "clearing iif");
-		gigaset_i4l_cmd(cs, ISDN_STAT_UNLOAD);
+		gigaset_isdn_unregister(cs);
 
 		/* fall through */
-	case 1: /* error when regestering to LL */
+	case 1: /* error when registering to LL */
 		gig_dbg(DEBUG_INIT, "clearing at_state");
 		clear_at_state(&cs->at_state);
 		dealloc_at_states(cs);
 
 		/* fall through */
-	case 0: /* error in one call to initbcs */
-		for (i = 0; i < cs->channels; ++i) {
-			gig_dbg(DEBUG_INIT, "clearing bcs[%d]", i);
-			gigaset_freebcs(cs->bcs + i);
-		}
-
+	case 0:	/* error in basic setup */
 		clear_events(cs);
 		gig_dbg(DEBUG_INIT, "freeing inbuf");
 		kfree(cs->inbuf);
@@ -534,16 +556,13 @@ void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs,
 }
 
 
-static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct bc_state *bcs,
-			       struct cardstate *cs, int inputstate)
+static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct cardstate *cs)
 /* inbuf->read must be allocated before! */
 {
 	inbuf->head = 0;
 	inbuf->tail = 0;
 	inbuf->cs = cs;
-	inbuf->bcs = bcs; /*base driver: NULL*/
-	inbuf->rcvbuf = NULL;
-	inbuf->inputstate = inputstate;
+	inbuf->inputstate = INS_command;
 }
 
 /**
@@ -599,7 +618,7 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs,
 {
 	int i;
 
-	bcs->tx_skb = NULL; //FIXME -> hw part
+	bcs->tx_skb = NULL;
 
 	skb_queue_head_init(&bcs->squeue);
 
@@ -618,13 +637,13 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs,
 	bcs->fcs = PPP_INITFCS;
 	bcs->inputstate = 0;
 	if (cs->ignoreframes) {
-		bcs->inputstate |= INS_skip_frame;
 		bcs->skb = NULL;
-	} else if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL)
-		skb_reserve(bcs->skb, HW_HDR_LEN);
-	else {
-		pr_err("out of memory\n");
-		bcs->inputstate |= INS_skip_frame;
+	} else {
+		bcs->skb = dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len);
+		if (bcs->skb != NULL)
+			skb_reserve(bcs->skb, cs->hw_hdr_len);
+		else
+			pr_err("out of memory\n");
 	}
 
 	bcs->channel = channel;
@@ -645,8 +664,8 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs,
 	gig_dbg(DEBUG_INIT, "  failed");
 
 	gig_dbg(DEBUG_INIT, "  freeing bcs[%d]->skb", channel);
-	if (bcs->skb)
-		dev_kfree_skb(bcs->skb);
+	dev_kfree_skb(bcs->skb);
+	bcs->skb = NULL;
 
 	return NULL;
 }
@@ -673,12 +692,13 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
 				 int onechannel, int ignoreframes,
 				 int cidmode, const char *modulename)
 {
-	struct cardstate *cs = NULL;
+	struct cardstate *cs;
 	unsigned long flags;
 	int i;
 
 	gig_dbg(DEBUG_INIT, "allocating cs");
-	if (!(cs = alloc_cs(drv))) {
+	cs = alloc_cs(drv);
+	if (!cs) {
 		pr_err("maximum number of devices exceeded\n");
 		return NULL;
 	}
@@ -707,7 +727,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
 	cs->ev_tail = 0;
 	cs->ev_head = 0;
 
-	tasklet_init(&cs->event_tasklet, &gigaset_handle_event,
+	tasklet_init(&cs->event_tasklet, gigaset_handle_event,
 		     (unsigned long) cs);
 	cs->commands_pending = 0;
 	cs->cur_at_seq = 0;
@@ -726,14 +746,6 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
 	cs->mode = M_UNKNOWN;
 	cs->mstate = MS_UNINITIALIZED;
 
-	for (i = 0; i < channels; ++i) {
-		gig_dbg(DEBUG_INIT, "setting up bcs[%d].read", i);
-		if (!gigaset_initbcs(cs->bcs + i, cs, i)) {
-			pr_err("could not allocate channel %d data\n", i);
-			goto error;
-		}
-	}
-
 	++cs->cs_init;
 
 	gig_dbg(DEBUG_INIT, "setting up at_state");
@@ -743,10 +755,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
 	cs->cbytes = 0;
 
 	gig_dbg(DEBUG_INIT, "setting up inbuf");
-	if (onechannel) {			//FIXME distinction necessary?
-		gigaset_inbuf_init(cs->inbuf, cs->bcs, cs, INS_command);
-	} else
-		gigaset_inbuf_init(cs->inbuf, NULL,    cs, INS_command);
+	gigaset_inbuf_init(cs->inbuf, cs);
 
 	cs->connected = 0;
 	cs->isdn_up = 0;
@@ -758,7 +767,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
 	cs->cmdbytes = 0;
 
 	gig_dbg(DEBUG_INIT, "setting up iif");
-	if (!gigaset_register_to_LL(cs, modulename)) {
+	if (!gigaset_isdn_register(cs, modulename)) {
 		pr_err("error registering ISDN device\n");
 		goto error;
 	}
@@ -777,6 +786,15 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
 	/* set up device sysfs */
 	gigaset_init_dev_sysfs(cs);
 
+	/* set up channel data structures */
+	for (i = 0; i < channels; ++i) {
+		gig_dbg(DEBUG_INIT, "setting up bcs[%d]", i);
+		if (!gigaset_initbcs(cs->bcs + i, cs, i)) {
+			pr_err("could not allocate channel %d data\n", i);
+			goto error;
+		}
+	}
+
 	spin_lock_irqsave(&cs->lock, flags);
 	cs->running = 1;
 	spin_unlock_irqrestore(&cs->lock, flags);
@@ -824,9 +842,10 @@ void gigaset_bcs_reinit(struct bc_state *bcs)
 	bcs->chstate = 0;
 
 	bcs->ignore = cs->ignoreframes;
-	if (bcs->ignore)
-		bcs->inputstate |= INS_skip_frame;
-
+	if (bcs->ignore) {
+		dev_kfree_skb(bcs->skb);
+		bcs->skb = NULL;
+	}
 
 	cs->ops->reinitbcshw(bcs);
 }
@@ -847,8 +866,6 @@ static void cleanup_cs(struct cardstate *cs)
 	free_strings(&cs->at_state);
 	gigaset_at_init(&cs->at_state, NULL, cs, 0);
 
-	kfree(cs->inbuf->rcvbuf);
-	cs->inbuf->rcvbuf = NULL;
 	cs->inbuf->inputstate = INS_command;
 	cs->inbuf->head = 0;
 	cs->inbuf->tail = 0;
@@ -911,15 +928,13 @@ int gigaset_start(struct cardstate *cs)
 		cs->ops->baud_rate(cs, B115200);
 		cs->ops->set_line_ctrl(cs, CS8);
 		cs->control_state = TIOCM_DTR|TIOCM_RTS;
-	} else {
-		//FIXME use some saved values?
 	}
 
 	cs->waiting = 1;
 
 	if (!gigaset_add_event(cs, &cs->at_state, EV_START, NULL, 0, NULL)) {
 		cs->waiting = 0;
-		//FIXME what should we do?
+		dev_err(cs->dev, "%s: out of memory\n", __func__);
 		goto error;
 	}
 
@@ -959,7 +974,7 @@ int gigaset_shutdown(struct cardstate *cs)
 	cs->waiting = 1;
 
 	if (!gigaset_add_event(cs, &cs->at_state, EV_SHUTDOWN, NULL, 0, NULL)) {
-		//FIXME what should we do?
+		dev_err(cs->dev, "%s: out of memory\n", __func__);
 		goto exit;
 	}
 
@@ -990,7 +1005,7 @@ void gigaset_stop(struct cardstate *cs)
 	cs->waiting = 1;
 
 	if (!gigaset_add_event(cs, &cs->at_state, EV_STOP, NULL, 0, NULL)) {
-		//FIXME what should we do?
+		dev_err(cs->dev, "%s: out of memory\n", __func__);
 		goto exit;
 	}
 
diff --git a/drivers/isdn/gigaset/dummyll.c b/drivers/isdn/gigaset/dummyll.c
new file mode 100644
index 000000000000..5b27c996af6d
--- /dev/null
+++ b/drivers/isdn/gigaset/dummyll.c
@@ -0,0 +1,68 @@
+/*
+ * Dummy LL interface for the Gigaset driver
+ *
+ * Copyright (c) 2009 by Tilman Schmidt <tilman@imap.cc>.
+ *
+ * =====================================================================
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License as
+ *	published by the Free Software Foundation; either version 2 of
+ *	the License, or (at your option) any later version.
+ * =====================================================================
+ */
+
+#include "gigaset.h"
+
+void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
+{
+}
+EXPORT_SYMBOL_GPL(gigaset_skb_sent);
+
+void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb)
+{
+}
+EXPORT_SYMBOL_GPL(gigaset_skb_rcvd);
+
+void gigaset_isdn_rcv_err(struct bc_state *bcs)
+{
+}
+EXPORT_SYMBOL_GPL(gigaset_isdn_rcv_err);
+
+int gigaset_isdn_icall(struct at_state_t *at_state)
+{
+	return ICALL_IGNORE;
+}
+
+void gigaset_isdn_connD(struct bc_state *bcs)
+{
+}
+
+void gigaset_isdn_hupD(struct bc_state *bcs)
+{
+}
+
+void gigaset_isdn_connB(struct bc_state *bcs)
+{
+}
+
+void gigaset_isdn_hupB(struct bc_state *bcs)
+{
+}
+
+void gigaset_isdn_start(struct cardstate *cs)
+{
+}
+
+void gigaset_isdn_stop(struct cardstate *cs)
+{
+}
+
+int gigaset_isdn_register(struct cardstate *cs, const char *isdnid)
+{
+	pr_info("no ISDN subsystem interface\n");
+	return 1;
+}
+
+void gigaset_isdn_unregister(struct cardstate *cs)
+{
+}
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
index cc768caa38f5..ddeb0456d202 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -40,8 +40,8 @@
 
 /* Possible ASCII responses */
 #define RSP_OK		0
-//#define RSP_BUSY	1
-//#define RSP_CONNECT	2
+#define RSP_BUSY	1
+#define RSP_CONNECT	2
 #define RSP_ZGCI	3
 #define RSP_RING	4
 #define RSP_ZAOC	5
@@ -68,7 +68,6 @@
 #define RSP_ZHLC	(RSP_STR + STR_ZHLC)
 #define RSP_ERROR	-1	/* ERROR              */
 #define RSP_WRONG_CID	-2	/* unknown cid in cmd */
-//#define RSP_EMPTY	-3
 #define RSP_UNKNOWN	-4	/* unknown response   */
 #define RSP_FAIL	-5	/* internal error     */
 #define RSP_INVAL	-6	/* invalid response   */
@@ -76,9 +75,9 @@
 #define RSP_NONE	-19
 #define RSP_STRING	-20
 #define RSP_NULL	-21
-//#define RSP_RETRYFAIL	-22
-//#define RSP_RETRY	-23
-//#define RSP_SKIP	-24
+#define RSP_RETRYFAIL	-22
+#define RSP_RETRY	-23
+#define RSP_SKIP	-24
 #define RSP_INIT	-27
 #define RSP_ANY		-26
 #define RSP_LAST	-28
@@ -127,7 +126,6 @@
 #define ACT_NOTIFY_BC_UP	39
 #define ACT_DIAL		40
 #define ACT_ACCEPT		41
-#define ACT_PROTO_L2		42
 #define ACT_HUP			43
 #define ACT_IF_LOCK		44
 #define ACT_START		45
@@ -159,229 +157,229 @@
 #define SEQ_UMMODE	11
 
 
-// 100: init, 200: dle0, 250:dle1, 300: get cid (dial), 350: "hup" (no cid), 400: hup, 500: reset, 600: dial, 700: ring
+/* 100: init, 200: dle0, 250:dle1, 300: get cid (dial), 350: "hup" (no cid),
+ * 400: hup, 500: reset, 600: dial, 700: ring */
 struct reply_t gigaset_tab_nocid[] =
 {
-	/* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */
-
-	/* initialize device, set cid mode if possible */
-	//{RSP_INIT,     -1, -1,100,                900, 0, {ACT_TEST}},
-	//{RSP_ERROR,   900,900, -1,                  0, 0, {ACT_FAILINIT}},
-	//{RSP_OK,      900,900, -1,                100, INIT_TIMEOUT,
-	//                                                  {ACT_TIMEOUT}},
-
-	{RSP_INIT,     -1, -1,SEQ_INIT,           100, INIT_TIMEOUT,
-							  {ACT_TIMEOUT}},                /* wait until device is ready */
-
-	{EV_TIMEOUT,  100,100, -1,                101, 3, {0},             "Z\r"},       /* device in transparent mode? try to initialize it. */
-	{RSP_OK,      101,103, -1,                120, 5, {ACT_GETSTRING}, "+GMR\r"},    /* get version */
-
-	{EV_TIMEOUT,  101,101, -1,                102, 5, {0},             "Z\r"},       /* timeout => try once again. */
-	{RSP_ERROR,   101,101, -1,                102, 5, {0},             "Z\r"},       /* error => try once again. */
-
-	{EV_TIMEOUT,  102,102, -1,                108, 5, {ACT_SETDLE1},   "^SDLE=0\r"}, /* timeout => try again in DLE mode. */
-	{RSP_OK,      108,108, -1,                104,-1},
-	{RSP_ZDLE,    104,104,  0,                103, 5, {0},             "Z\r"},
-	{EV_TIMEOUT,  104,104, -1,                  0, 0, {ACT_FAILINIT}},
-	{RSP_ERROR,   108,108, -1,                  0, 0, {ACT_FAILINIT}},
-
-	{EV_TIMEOUT,  108,108, -1,                105, 2, {ACT_SETDLE0,
-							   ACT_HUPMODEM,
-							   ACT_TIMEOUT}},                /* still timeout => connection in unimodem mode? */
-	{EV_TIMEOUT,  105,105, -1,                103, 5, {0},             "Z\r"},
-
-	{RSP_ERROR,   102,102, -1,                107, 5, {0},             "^GETPRE\r"}, /* ERROR on ATZ => maybe in config mode? */
-	{RSP_OK,      107,107, -1,                  0, 0, {ACT_CONFIGMODE}},
-	{RSP_ERROR,   107,107, -1,                  0, 0, {ACT_FAILINIT}},
-	{EV_TIMEOUT,  107,107, -1,                  0, 0, {ACT_FAILINIT}},
-
-	{RSP_ERROR,   103,103, -1,                  0, 0, {ACT_FAILINIT}},
-	{EV_TIMEOUT,  103,103, -1,                  0, 0, {ACT_FAILINIT}},
-
-	{RSP_STRING,  120,120, -1,                121,-1, {ACT_SETVER}},
-
-	{EV_TIMEOUT,  120,121, -1,                  0, 0, {ACT_FAILVER, ACT_INIT}},
-	{RSP_ERROR,   120,121, -1,                  0, 0, {ACT_FAILVER, ACT_INIT}},
-	{RSP_OK,      121,121, -1,                  0, 0, {ACT_GOTVER,  ACT_INIT}},
-
-	/* leave dle mode */
-	{RSP_INIT,      0,  0,SEQ_DLE0,           201, 5, {0},             "^SDLE=0\r"},
-	{RSP_OK,      201,201, -1,                202,-1},
-	{RSP_ZDLE,    202,202,  0,                  0, 0, {ACT_DLE0}},
-	{RSP_NODEV,   200,249, -1,                  0, 0, {ACT_FAKEDLE0}},
-	{RSP_ERROR,   200,249, -1,                  0, 0, {ACT_FAILDLE0}},
-	{EV_TIMEOUT,  200,249, -1,                  0, 0, {ACT_FAILDLE0}},
-
-	/* enter dle mode */
-	{RSP_INIT,      0,  0,SEQ_DLE1,           251, 5, {0},             "^SDLE=1\r"},
-	{RSP_OK,      251,251, -1,                252,-1},
-	{RSP_ZDLE,    252,252,  1,                  0, 0, {ACT_DLE1}},
-	{RSP_ERROR,   250,299, -1,                  0, 0, {ACT_FAILDLE1}},
-	{EV_TIMEOUT,  250,299, -1,                  0, 0, {ACT_FAILDLE1}},
-
-	/* incoming call */
-	{RSP_RING,     -1, -1, -1,                 -1,-1, {ACT_RING}},
-
-	/* get cid */
-	//{RSP_INIT,      0,  0,300,                901, 0, {ACT_TEST}},
-	//{RSP_ERROR,   901,901, -1,                  0, 0, {ACT_FAILCID}},
-	//{RSP_OK,      901,901, -1,                301, 5, {0},             "^SGCI?\r"},
-
-	{RSP_INIT,      0,  0,SEQ_CID,            301, 5, {0},             "^SGCI?\r"},
-	{RSP_OK,      301,301, -1,                302,-1},
-	{RSP_ZGCI,    302,302, -1,                  0, 0, {ACT_CID}},
-	{RSP_ERROR,   301,349, -1,                  0, 0, {ACT_FAILCID}},
-	{EV_TIMEOUT,  301,349, -1,                  0, 0, {ACT_FAILCID}},
-
-	/* enter cid mode */
-	{RSP_INIT,      0,  0,SEQ_CIDMODE,        150, 5, {0},             "^SGCI=1\r"},
-	{RSP_OK,      150,150, -1,                  0, 0, {ACT_CMODESET}},
-	{RSP_ERROR,   150,150, -1,                  0, 0, {ACT_FAILCMODE}},
-	{EV_TIMEOUT,  150,150, -1,                  0, 0, {ACT_FAILCMODE}},
-
-	/* leave cid mode */
-	//{RSP_INIT,      0,  0,SEQ_UMMODE,         160, 5, {0},             "^SGCI=0\r"},
-	{RSP_INIT,      0,  0,SEQ_UMMODE,         160, 5, {0},             "Z\r"},
-	{RSP_OK,      160,160, -1,                  0, 0, {ACT_UMODESET}},
-	{RSP_ERROR,   160,160, -1,                  0, 0, {ACT_FAILUMODE}},
-	{EV_TIMEOUT,  160,160, -1,                  0, 0, {ACT_FAILUMODE}},
-
-	/* abort getting cid */
-	{RSP_INIT,      0,  0,SEQ_NOCID,            0, 0, {ACT_ABORTCID}},
-
-	/* reset */
-	{RSP_INIT,      0,  0,SEQ_SHUTDOWN,       504, 5, {0},             "Z\r"},
-	{RSP_OK,      504,504, -1,                  0, 0, {ACT_SDOWN}},
-	{RSP_ERROR,   501,599, -1,                  0, 0, {ACT_FAILSDOWN}},
-	{EV_TIMEOUT,  501,599, -1,                  0, 0, {ACT_FAILSDOWN}},
-	{RSP_NODEV,   501,599, -1,                  0, 0, {ACT_FAKESDOWN}},
-
-	{EV_PROC_CIDMODE,-1, -1, -1,               -1,-1, {ACT_PROC_CIDMODE}}, //FIXME
-	{EV_IF_LOCK,   -1, -1, -1,                 -1,-1, {ACT_IF_LOCK}}, //FIXME
-	{EV_IF_VER,    -1, -1, -1,                 -1,-1, {ACT_IF_VER}}, //FIXME
-	{EV_START,     -1, -1, -1,                 -1,-1, {ACT_START}}, //FIXME
-	{EV_STOP,      -1, -1, -1,                 -1,-1, {ACT_STOP}}, //FIXME
-	{EV_SHUTDOWN,  -1, -1, -1,                 -1,-1, {ACT_SHUTDOWN}}, //FIXME
-
-	/* misc. */
-	{RSP_ERROR,    -1, -1, -1,                 -1, -1, {ACT_ERROR} },
-	{RSP_EMPTY,    -1, -1, -1,                 -1,-1, {ACT_DEBUG}}, //FIXME
-	{RSP_ZCFGT,    -1, -1, -1,                 -1,-1, {ACT_DEBUG}}, //FIXME
-	{RSP_ZCFG,     -1, -1, -1,                 -1,-1, {ACT_DEBUG}}, //FIXME
-	{RSP_ZLOG,     -1, -1, -1,                 -1,-1, {ACT_DEBUG}}, //FIXME
-	{RSP_ZMWI,     -1, -1, -1,                 -1,-1, {ACT_DEBUG}}, //FIXME
-	{RSP_ZABINFO,  -1, -1, -1,                 -1,-1, {ACT_DEBUG}}, //FIXME
-	{RSP_ZSMLSTCHG,-1, -1, -1,                 -1,-1, {ACT_DEBUG}}, //FIXME
-
-	{RSP_ZCAU,     -1, -1, -1,                 -1,-1, {ACT_ZCAU}},
-	{RSP_NONE,     -1, -1, -1,                 -1,-1, {ACT_DEBUG}},
-	{RSP_ANY,      -1, -1, -1,                 -1,-1, {ACT_WARN}},
-	{RSP_LAST}
+/* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout,
+ * action, command */
+
+/* initialize device, set cid mode if possible */
+{RSP_INIT,	 -1,  -1, SEQ_INIT,		100,  1, {ACT_TIMEOUT} },
+
+{EV_TIMEOUT,	100, 100, -1,			101,  3, {0},	"Z\r"},
+{RSP_OK,	101, 103, -1,			120,  5, {ACT_GETSTRING},
+								"+GMR\r"},
+
+{EV_TIMEOUT,	101, 101, -1,			102,  5, {0},	"Z\r"},
+{RSP_ERROR,	101, 101, -1,			102,  5, {0},	"Z\r"},
+
+{EV_TIMEOUT,	102, 102, -1,			108,  5, {ACT_SETDLE1},
+								"^SDLE=0\r"},
+{RSP_OK,	108, 108, -1,			104, -1},
+{RSP_ZDLE,	104, 104,  0,			103,  5, {0},	"Z\r"},
+{EV_TIMEOUT,	104, 104, -1,			  0,  0, {ACT_FAILINIT} },
+{RSP_ERROR,	108, 108, -1,			  0,  0, {ACT_FAILINIT} },
+
+{EV_TIMEOUT,	108, 108, -1,			105,  2, {ACT_SETDLE0,
+							  ACT_HUPMODEM,
+							  ACT_TIMEOUT} },
+{EV_TIMEOUT,	105, 105, -1,			103,  5, {0},	"Z\r"},
+
+{RSP_ERROR,	102, 102, -1,			107,  5, {0},	"^GETPRE\r"},
+{RSP_OK,	107, 107, -1,			  0,  0, {ACT_CONFIGMODE} },
+{RSP_ERROR,	107, 107, -1,			  0,  0, {ACT_FAILINIT} },
+{EV_TIMEOUT,	107, 107, -1,			  0,  0, {ACT_FAILINIT} },
+
+{RSP_ERROR,	103, 103, -1,			  0,  0, {ACT_FAILINIT} },
+{EV_TIMEOUT,	103, 103, -1,			  0,  0, {ACT_FAILINIT} },
+
+{RSP_STRING,	120, 120, -1,			121, -1, {ACT_SETVER} },
+
+{EV_TIMEOUT,	120, 121, -1,			  0,  0, {ACT_FAILVER,
+							  ACT_INIT} },
+{RSP_ERROR,	120, 121, -1,			  0,  0, {ACT_FAILVER,
+							  ACT_INIT} },
+{RSP_OK,	121, 121, -1,			  0,  0, {ACT_GOTVER,
+							  ACT_INIT} },
+
+/* leave dle mode */
+{RSP_INIT,	  0,   0, SEQ_DLE0,		201,  5, {0},	"^SDLE=0\r"},
+{RSP_OK,	201, 201, -1,			202, -1},
+{RSP_ZDLE,	202, 202,  0,			  0,  0, {ACT_DLE0} },
+{RSP_NODEV,	200, 249, -1,			  0,  0, {ACT_FAKEDLE0} },
+{RSP_ERROR,	200, 249, -1,			  0,  0, {ACT_FAILDLE0} },
+{EV_TIMEOUT,	200, 249, -1,			  0,  0, {ACT_FAILDLE0} },
+
+/* enter dle mode */
+{RSP_INIT,	  0,   0, SEQ_DLE1,		251,  5, {0},	"^SDLE=1\r"},
+{RSP_OK,	251, 251, -1,			252, -1},
+{RSP_ZDLE,	252, 252,  1,			  0,  0, {ACT_DLE1} },
+{RSP_ERROR,	250, 299, -1,			  0,  0, {ACT_FAILDLE1} },
+{EV_TIMEOUT,	250, 299, -1,			  0,  0, {ACT_FAILDLE1} },
+
+/* incoming call */
+{RSP_RING,	 -1,  -1, -1,			 -1, -1, {ACT_RING} },
+
+/* get cid */
+{RSP_INIT,	  0,   0, SEQ_CID,		301,  5, {0},	"^SGCI?\r"},
+{RSP_OK,	301, 301, -1,			302, -1},
+{RSP_ZGCI,	302, 302, -1,			  0,  0, {ACT_CID} },
+{RSP_ERROR,	301, 349, -1,			  0,  0, {ACT_FAILCID} },
+{EV_TIMEOUT,	301, 349, -1,			  0,  0, {ACT_FAILCID} },
+
+/* enter cid mode */
+{RSP_INIT,	  0,   0, SEQ_CIDMODE,		150,  5, {0},	"^SGCI=1\r"},
+{RSP_OK,	150, 150, -1,			  0,  0, {ACT_CMODESET} },
+{RSP_ERROR,	150, 150, -1,			  0,  0, {ACT_FAILCMODE} },
+{EV_TIMEOUT,	150, 150, -1,			  0,  0, {ACT_FAILCMODE} },
+
+/* leave cid mode */
+{RSP_INIT,	  0,   0, SEQ_UMMODE,		160,  5, {0},	"Z\r"},
+{RSP_OK,	160, 160, -1,			  0,  0, {ACT_UMODESET} },
+{RSP_ERROR,	160, 160, -1,			  0,  0, {ACT_FAILUMODE} },
+{EV_TIMEOUT,	160, 160, -1,			  0,  0, {ACT_FAILUMODE} },
+
+/* abort getting cid */
+{RSP_INIT,	  0,   0, SEQ_NOCID,		  0,  0, {ACT_ABORTCID} },
+
+/* reset */
+{RSP_INIT,	  0,   0, SEQ_SHUTDOWN,		504,  5, {0},	"Z\r"},
+{RSP_OK,	504, 504, -1,			  0,  0, {ACT_SDOWN} },
+{RSP_ERROR,	501, 599, -1,			  0,  0, {ACT_FAILSDOWN} },
+{EV_TIMEOUT,	501, 599, -1,			  0,  0, {ACT_FAILSDOWN} },
+{RSP_NODEV,	501, 599, -1,			  0,  0, {ACT_FAKESDOWN} },
+
+{EV_PROC_CIDMODE, -1, -1, -1,			 -1, -1, {ACT_PROC_CIDMODE} },
+{EV_IF_LOCK,	 -1,  -1, -1,			 -1, -1, {ACT_IF_LOCK} },
+{EV_IF_VER,	 -1,  -1, -1,			 -1, -1, {ACT_IF_VER} },
+{EV_START,	 -1,  -1, -1,			 -1, -1, {ACT_START} },
+{EV_STOP,	 -1,  -1, -1,			 -1, -1, {ACT_STOP} },
+{EV_SHUTDOWN,	 -1,  -1, -1,			 -1, -1, {ACT_SHUTDOWN} },
+
+/* misc. */
+{RSP_ERROR,	 -1,  -1, -1,			 -1, -1, {ACT_ERROR} },
+{RSP_ZCFGT,	 -1,  -1, -1,			 -1, -1, {ACT_DEBUG} },
+{RSP_ZCFG,	 -1,  -1, -1,			 -1, -1, {ACT_DEBUG} },
+{RSP_ZLOG,	 -1,  -1, -1,			 -1, -1, {ACT_DEBUG} },
+{RSP_ZMWI,	 -1,  -1, -1,			 -1, -1, {ACT_DEBUG} },
+{RSP_ZABINFO,	 -1,  -1, -1,			 -1, -1, {ACT_DEBUG} },
+{RSP_ZSMLSTCHG,	 -1,  -1, -1,			 -1, -1, {ACT_DEBUG} },
+
+{RSP_ZCAU,	 -1,  -1, -1,			 -1, -1, {ACT_ZCAU} },
+{RSP_NONE,	 -1,  -1, -1,			 -1, -1, {ACT_DEBUG} },
+{RSP_ANY,	 -1,  -1, -1,			 -1, -1, {ACT_WARN} },
+{RSP_LAST}
 };
 
-// 600: start dialing, 650: dial in progress, 800: connection is up, 700: ring, 400: hup, 750: accepted icall
+/* 600: start dialing, 650: dial in progress, 800: connection is up, 700: ring,
+ * 400: hup, 750: accepted icall */
 struct reply_t gigaset_tab_cid[] =
 {
-	/* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */
-
-	/* dial */
-	{EV_DIAL,      -1, -1, -1,                 -1,-1, {ACT_DIAL}}, //FIXME
-	{RSP_INIT,      0,  0,SEQ_DIAL,           601, 5, {ACT_CMD+AT_BC}},
-	{RSP_OK,      601,601, -1,                602, 5, {ACT_CMD+AT_HLC}},
-	{RSP_NULL,    602,602, -1,                603, 5, {ACT_CMD+AT_PROTO}},
-	{RSP_OK,      602,602, -1,                603, 5, {ACT_CMD+AT_PROTO}},
-	{RSP_OK,      603,603, -1,                604, 5, {ACT_CMD+AT_TYPE}},
-	{RSP_OK,      604,604, -1,                605, 5, {ACT_CMD+AT_MSN}},
-	{RSP_OK,      605,605, -1,                606, 5, {ACT_CMD+AT_ISO}},
-	{RSP_NULL,    605,605, -1,                606, 5, {ACT_CMD+AT_ISO}},
-	{RSP_OK,      606,606, -1,                607, 5, {0}, "+VLS=17\r"},
-	{RSP_OK,      607,607, -1,                608,-1},
-	{RSP_ZSAU,    608,608,ZSAU_PROCEEDING,    609, 5, {ACT_CMD+AT_DIAL}},
-	{RSP_OK,      609,609, -1,                650, 0, {ACT_DIALING}},
-
-	{RSP_ERROR,   601,609, -1,                  0, 0, {ACT_ABORTDIAL}},
-	{EV_TIMEOUT,  601,609, -1,                  0, 0, {ACT_ABORTDIAL}},
-
-	/* optional dialing responses */
-	{EV_BC_OPEN,  650,650, -1,                651,-1},
-	{RSP_ZVLS,    608,651, 17,                 -1,-1, {ACT_DEBUG}},
-	{RSP_ZCTP,    609,651, -1,                 -1,-1, {ACT_DEBUG}},
-	{RSP_ZCPN,    609,651, -1,                 -1,-1, {ACT_DEBUG}},
-	{RSP_ZSAU,    650,651,ZSAU_CALL_DELIVERED, -1,-1, {ACT_DEBUG}},
-
-	/* connect */
-	{RSP_ZSAU,    650,650,ZSAU_ACTIVE,        800,-1, {ACT_CONNECT}},
-	{RSP_ZSAU,    651,651,ZSAU_ACTIVE,        800,-1, {ACT_CONNECT,
-							   ACT_NOTIFY_BC_UP}},
-	{RSP_ZSAU,    750,750,ZSAU_ACTIVE,        800,-1, {ACT_CONNECT}},
-	{RSP_ZSAU,    751,751,ZSAU_ACTIVE,        800,-1, {ACT_CONNECT,
-							   ACT_NOTIFY_BC_UP}},
-	{EV_BC_OPEN,  800,800, -1,                800,-1, {ACT_NOTIFY_BC_UP}},
-
-	/* remote hangup */
-	{RSP_ZSAU,    650,651,ZSAU_DISCONNECT_IND,  0, 0, {ACT_REMOTEREJECT}},
-	{RSP_ZSAU,    750,751,ZSAU_DISCONNECT_IND,  0, 0, {ACT_REMOTEHUP}},
-	{RSP_ZSAU,    800,800,ZSAU_DISCONNECT_IND,  0, 0, {ACT_REMOTEHUP}},
-
-	/* hangup */
-	{EV_HUP,       -1, -1, -1,                 -1,-1, {ACT_HUP}}, //FIXME
-	{RSP_INIT,     -1, -1,SEQ_HUP,            401, 5, {0},             "+VLS=0\r"}, /* hang up */ //-1,-1?
-	{RSP_OK,      401,401, -1,                402, 5},
-	{RSP_ZVLS,    402,402,  0,                403, 5},
-	{RSP_ZSAU,    403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} },
-	{RSP_ZSAU,    403, 403, ZSAU_NULL,            0,  0, {ACT_DISCONNECT} },
-	{RSP_NODEV,   401, 403, -1,                   0,  0, {ACT_FAKEHUP} },
-	{RSP_ERROR,   401,401, -1,                  0, 0, {ACT_ABORTHUP}},
-	{EV_TIMEOUT,  401,403, -1,                  0, 0, {ACT_ABORTHUP}},
-
-	{EV_BC_CLOSED,  0,  0, -1,                  0,-1, {ACT_NOTIFY_BC_DOWN}}, //FIXME new constate + timeout
-
-	/* ring */
-	{RSP_ZBC,     700,700, -1,                 -1,-1, {0}},
-	{RSP_ZHLC,    700,700, -1,                 -1,-1, {0}},
-	{RSP_NMBR,    700,700, -1,                 -1,-1, {0}},
-	{RSP_ZCPN,    700,700, -1,                 -1,-1, {0}},
-	{RSP_ZCTP,    700,700, -1,                 -1,-1, {0}},
-	{EV_TIMEOUT,  700,700, -1,               720,720, {ACT_ICALL}},
-	{EV_BC_CLOSED,720,720, -1,                  0,-1, {ACT_NOTIFY_BC_DOWN}},
-
-	/*accept icall*/
-	{EV_ACCEPT,    -1, -1, -1,                 -1,-1, {ACT_ACCEPT}}, //FIXME
-	{RSP_INIT,    720,720,SEQ_ACCEPT,         721, 5, {ACT_CMD+AT_PROTO}},
-	{RSP_OK,      721,721, -1,                722, 5, {ACT_CMD+AT_ISO}},
-	{RSP_OK,      722,722, -1,                723, 5, {0},             "+VLS=17\r"}, /* set "Endgeraetemodus" */
-	{RSP_OK,      723,723, -1,                724, 5, {0}},
-	{RSP_ZVLS,    724,724, 17,                750,50, {ACT_ACCEPTED}},
-	{RSP_ERROR,   721,729, -1,                  0, 0, {ACT_ABORTACCEPT}},
-	{EV_TIMEOUT,  721,729, -1,                  0, 0, {ACT_ABORTACCEPT}},
-	{RSP_ZSAU,    700,729,ZSAU_NULL,            0, 0, {ACT_ABORTACCEPT}},
-	{RSP_ZSAU,    700,729,ZSAU_ACTIVE,          0, 0, {ACT_ABORTACCEPT}},
-	{RSP_ZSAU,    700,729,ZSAU_DISCONNECT_IND,  0, 0, {ACT_ABORTACCEPT}},
-
-	{EV_BC_OPEN,  750,750, -1,                751,-1},
-	{EV_TIMEOUT,  750,751, -1,                  0, 0, {ACT_CONNTIMEOUT}},
-
-	/* B channel closed (general case) */
-	{EV_BC_CLOSED, -1, -1, -1,                 -1,-1, {ACT_NOTIFY_BC_DOWN}}, //FIXME
-
-	/* misc. */
-	{EV_PROTO_L2,  -1, -1, -1,                 -1,-1, {ACT_PROTO_L2}}, //FIXME
-
-	{RSP_ZCON,     -1, -1, -1,                 -1,-1, {ACT_DEBUG}}, //FIXME
-	{RSP_ZCCR,     -1, -1, -1,                 -1,-1, {ACT_DEBUG}}, //FIXME
-	{RSP_ZAOC,     -1, -1, -1,                 -1,-1, {ACT_DEBUG}}, //FIXME
-	{RSP_ZCSTR,    -1, -1, -1,                 -1,-1, {ACT_DEBUG}}, //FIXME
-
-	{RSP_ZCAU,     -1, -1, -1,                 -1,-1, {ACT_ZCAU}},
-	{RSP_NONE,     -1, -1, -1,                 -1,-1, {ACT_DEBUG}},
-	{RSP_ANY,      -1, -1, -1,                 -1,-1, {ACT_WARN}},
-	{RSP_LAST}
+/* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout,
+ * action, command */
+
+/* dial */
+{EV_DIAL,	 -1,  -1, -1,			 -1, -1, {ACT_DIAL} },
+{RSP_INIT,	  0,   0, SEQ_DIAL,		601,  5, {ACT_CMD+AT_BC} },
+{RSP_OK,	601, 601, -1,			602,  5, {ACT_CMD+AT_HLC} },
+{RSP_NULL,	602, 602, -1,			603,  5, {ACT_CMD+AT_PROTO} },
+{RSP_OK,	602, 602, -1,			603,  5, {ACT_CMD+AT_PROTO} },
+{RSP_OK,	603, 603, -1,			604,  5, {ACT_CMD+AT_TYPE} },
+{RSP_OK,	604, 604, -1,			605,  5, {ACT_CMD+AT_MSN} },
+{RSP_NULL,	605, 605, -1,			606,  5, {ACT_CMD+AT_CLIP} },
+{RSP_OK,	605, 605, -1,			606,  5, {ACT_CMD+AT_CLIP} },
+{RSP_NULL,	606, 606, -1,			607,  5, {ACT_CMD+AT_ISO} },
+{RSP_OK,	606, 606, -1,			607,  5, {ACT_CMD+AT_ISO} },
+{RSP_OK,	607, 607, -1,			608,  5, {0},	"+VLS=17\r"},
+{RSP_OK,	608, 608, -1,			609, -1},
+{RSP_ZSAU,	609, 609, ZSAU_PROCEEDING,	610,  5, {ACT_CMD+AT_DIAL} },
+{RSP_OK,	610, 610, -1,			650,  0, {ACT_DIALING} },
+
+{RSP_ERROR,	601, 610, -1,			  0,  0, {ACT_ABORTDIAL} },
+{EV_TIMEOUT,	601, 610, -1,			  0,  0, {ACT_ABORTDIAL} },
+
+/* optional dialing responses */
+{EV_BC_OPEN,	650, 650, -1,			651, -1},
+{RSP_ZVLS,	609, 651, 17,			 -1, -1, {ACT_DEBUG} },
+{RSP_ZCTP,	610, 651, -1,			 -1, -1, {ACT_DEBUG} },
+{RSP_ZCPN,	610, 651, -1,			 -1, -1, {ACT_DEBUG} },
+{RSP_ZSAU,	650, 651, ZSAU_CALL_DELIVERED,	 -1, -1, {ACT_DEBUG} },
+
+/* connect */
+{RSP_ZSAU,	650, 650, ZSAU_ACTIVE,		800, -1, {ACT_CONNECT} },
+{RSP_ZSAU,	651, 651, ZSAU_ACTIVE,		800, -1, {ACT_CONNECT,
+							  ACT_NOTIFY_BC_UP} },
+{RSP_ZSAU,	750, 750, ZSAU_ACTIVE,		800, -1, {ACT_CONNECT} },
+{RSP_ZSAU,	751, 751, ZSAU_ACTIVE,		800, -1, {ACT_CONNECT,
+							  ACT_NOTIFY_BC_UP} },
+{EV_BC_OPEN,	800, 800, -1,			800, -1, {ACT_NOTIFY_BC_UP} },
+
+/* remote hangup */
+{RSP_ZSAU,	650, 651, ZSAU_DISCONNECT_IND,	  0,  0, {ACT_REMOTEREJECT} },
+{RSP_ZSAU,	750, 751, ZSAU_DISCONNECT_IND,	  0,  0, {ACT_REMOTEHUP} },
+{RSP_ZSAU,	800, 800, ZSAU_DISCONNECT_IND,	  0,  0, {ACT_REMOTEHUP} },
+
+/* hangup */
+{EV_HUP,	 -1,  -1, -1,			 -1, -1, {ACT_HUP} },
+{RSP_INIT,	 -1,  -1, SEQ_HUP,		401,  5, {0},	"+VLS=0\r"},
+{RSP_OK,	401, 401, -1,			402,  5},
+{RSP_ZVLS,	402, 402,  0,			403,  5},
+{RSP_ZSAU,	403, 403, ZSAU_DISCONNECT_REQ,	 -1, -1, {ACT_DEBUG} },
+{RSP_ZSAU,	403, 403, ZSAU_NULL,		  0,  0, {ACT_DISCONNECT} },
+{RSP_NODEV,	401, 403, -1,			  0,  0, {ACT_FAKEHUP} },
+{RSP_ERROR,	401, 401, -1,			  0,  0, {ACT_ABORTHUP} },
+{EV_TIMEOUT,	401, 403, -1,			  0,  0, {ACT_ABORTHUP} },
+
+{EV_BC_CLOSED,	  0,   0, -1,			  0, -1, {ACT_NOTIFY_BC_DOWN} },
+
+/* ring */
+{RSP_ZBC,	700, 700, -1,			 -1, -1, {0} },
+{RSP_ZHLC,	700, 700, -1,			 -1, -1, {0} },
+{RSP_NMBR,	700, 700, -1,			 -1, -1, {0} },
+{RSP_ZCPN,	700, 700, -1,			 -1, -1, {0} },
+{RSP_ZCTP,	700, 700, -1,			 -1, -1, {0} },
+{EV_TIMEOUT,	700, 700, -1,			720, 720, {ACT_ICALL} },
+{EV_BC_CLOSED,	720, 720, -1,			  0, -1, {ACT_NOTIFY_BC_DOWN} },
+
+/*accept icall*/
+{EV_ACCEPT,	 -1,  -1, -1,			 -1, -1, {ACT_ACCEPT} },
+{RSP_INIT,	720, 720, SEQ_ACCEPT,		721,  5, {ACT_CMD+AT_PROTO} },
+{RSP_OK,	721, 721, -1,			722,  5, {ACT_CMD+AT_ISO} },
+{RSP_OK,	722, 722, -1,			723,  5, {0},	"+VLS=17\r"},
+{RSP_OK,	723, 723, -1,			724,  5, {0} },
+{RSP_ZVLS,	724, 724, 17,			750, 50, {ACT_ACCEPTED} },
+{RSP_ERROR,	721, 729, -1,			  0,  0, {ACT_ABORTACCEPT} },
+{EV_TIMEOUT,	721, 729, -1,			  0,  0, {ACT_ABORTACCEPT} },
+{RSP_ZSAU,	700, 729, ZSAU_NULL,		  0,  0, {ACT_ABORTACCEPT} },
+{RSP_ZSAU,	700, 729, ZSAU_ACTIVE,		  0,  0, {ACT_ABORTACCEPT} },
+{RSP_ZSAU,	700, 729, ZSAU_DISCONNECT_IND,	  0,  0, {ACT_ABORTACCEPT} },
+
+{EV_BC_OPEN,	750, 750, -1,			751, -1},
+{EV_TIMEOUT,	750, 751, -1,			  0,  0, {ACT_CONNTIMEOUT} },
+
+/* B channel closed (general case) */
+{EV_BC_CLOSED,	 -1,  -1, -1,			 -1, -1, {ACT_NOTIFY_BC_DOWN} },
+
+/* misc. */
+{RSP_ZCON,	 -1,  -1, -1,			 -1, -1, {ACT_DEBUG} },
+{RSP_ZCCR,	 -1,  -1, -1,			 -1, -1, {ACT_DEBUG} },
+{RSP_ZAOC,	 -1,  -1, -1,			 -1, -1, {ACT_DEBUG} },
+{RSP_ZCSTR,	 -1,  -1, -1,			 -1, -1, {ACT_DEBUG} },
+
+{RSP_ZCAU,	 -1,  -1, -1,			 -1, -1, {ACT_ZCAU} },
+{RSP_NONE,	 -1,  -1, -1,			 -1, -1, {ACT_DEBUG} },
+{RSP_ANY,	 -1,  -1, -1,			 -1, -1, {ACT_WARN} },
+{RSP_LAST}
 };
 
 
-static const struct resp_type_t resp_type[] =
+static const struct resp_type_t {
+	unsigned char	*response;
+	int		resp_code;
+	int		type;
+} resp_type[] =
 {
-	/*{"",		RSP_EMPTY,	RT_NOTHING},*/
 	{"OK",		RSP_OK,		RT_NOTHING},
 	{"ERROR",	RSP_ERROR,	RT_NOTHING},
 	{"ZSAU",	RSP_ZSAU,	RT_ZSAU},
@@ -405,7 +403,21 @@ static const struct resp_type_t resp_type[] =
 	{"ZLOG",	RSP_ZLOG,	RT_NOTHING},
 	{"ZABINFO",	RSP_ZABINFO,	RT_NOTHING},
 	{"ZSMLSTCHG",	RSP_ZSMLSTCHG,	RT_NOTHING},
-	{NULL,0,0}
+	{NULL,		0,		0}
+};
+
+static const struct zsau_resp_t {
+	unsigned char	*str;
+	int		code;
+} zsau_resp[] =
+{
+	{"OUTGOING_CALL_PROCEEDING",	ZSAU_OUTGOING_CALL_PROCEEDING},
+	{"CALL_DELIVERED",		ZSAU_CALL_DELIVERED},
+	{"ACTIVE",			ZSAU_ACTIVE},
+	{"DISCONNECT_IND",		ZSAU_DISCONNECT_IND},
+	{"NULL",			ZSAU_NULL},
+	{"DISCONNECT_REQ",		ZSAU_DISCONNECT_REQ},
+	{NULL,				ZSAU_UNKNOWN}
 };
 
 /*
@@ -470,7 +482,6 @@ static int cid_of_response(char *s)
 	if (cid < 1 || cid > 65535)
 		return -1;	/* CID out of range */
 	return cid;
-	//FIXME is ;<digit>+ at end of non-CID response really impossible?
 }
 
 /**
@@ -487,6 +498,7 @@ void gigaset_handle_modem_response(struct cardstate *cs)
 	int params;
 	int i, j;
 	const struct resp_type_t *rt;
+	const struct zsau_resp_t *zr;
 	int curarg;
 	unsigned long flags;
 	unsigned next, tail, head;
@@ -613,24 +625,14 @@ void gigaset_handle_modem_response(struct cardstate *cs)
 				event->parameter = ZSAU_NONE;
 				break;
 			}
-			if (!strcmp(argv[curarg], "OUTGOING_CALL_PROCEEDING"))
-				event->parameter = ZSAU_OUTGOING_CALL_PROCEEDING;
-			else if (!strcmp(argv[curarg], "CALL_DELIVERED"))
-				event->parameter = ZSAU_CALL_DELIVERED;
-			else if (!strcmp(argv[curarg], "ACTIVE"))
-				event->parameter = ZSAU_ACTIVE;
-			else if (!strcmp(argv[curarg], "DISCONNECT_IND"))
-				event->parameter = ZSAU_DISCONNECT_IND;
-			else if (!strcmp(argv[curarg], "NULL"))
-				event->parameter = ZSAU_NULL;
-			else if (!strcmp(argv[curarg], "DISCONNECT_REQ"))
-				event->parameter = ZSAU_DISCONNECT_REQ;
-			else {
-				event->parameter = ZSAU_UNKNOWN;
+			for (zr = zsau_resp; zr->str; ++zr)
+				if (!strcmp(argv[curarg], zr->str))
+					break;
+			event->parameter = zr->code;
+			if (!zr->str)
 				dev_warn(cs->dev,
 					"%s: unknown parameter %s after ZSAU\n",
 					 __func__, argv[curarg]);
-			}
 			++curarg;
 			break;
 		case RT_STRING:
@@ -714,7 +716,7 @@ static void disconnect(struct at_state_t **at_state_p)
 		/* notify LL */
 		if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) {
 			bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL);
-			gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DHUP);
+			gigaset_isdn_hupD(bcs);
 		}
 	} else {
 		/* no B channel assigned: just deallocate */
@@ -872,12 +874,12 @@ static void bchannel_down(struct bc_state *bcs)
 {
 	if (bcs->chstate & CHS_B_UP) {
 		bcs->chstate &= ~CHS_B_UP;
-		gigaset_i4l_channel_cmd(bcs, ISDN_STAT_BHUP);
+		gigaset_isdn_hupB(bcs);
 	}
 
 	if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) {
 		bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL);
-		gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DHUP);
+		gigaset_isdn_hupD(bcs);
 	}
 
 	gigaset_free_channel(bcs);
@@ -894,15 +896,17 @@ static void bchannel_up(struct bc_state *bcs)
 	}
 
 	bcs->chstate |= CHS_B_UP;
-	gigaset_i4l_channel_cmd(bcs, ISDN_STAT_BCONN);
+	gigaset_isdn_connB(bcs);
 }
 
-static void start_dial(struct at_state_t *at_state, void *data, unsigned seq_index)
+static void start_dial(struct at_state_t *at_state, void *data,
+			unsigned seq_index)
 {
 	struct bc_state *bcs = at_state->bcs;
 	struct cardstate *cs = at_state->cs;
-	int retval;
+	char **commands = data;
 	unsigned long flags;
+	int i;
 
 	bcs->chstate |= CHS_NOTIFY_LL;
 
@@ -913,10 +917,10 @@ static void start_dial(struct at_state_t *at_state, void *data, unsigned seq_ind
 	}
 	spin_unlock_irqrestore(&cs->lock, flags);
 
-	retval = gigaset_isdn_setup_dial(at_state, data);
-	if (retval != 0)
-		goto error;
-
+	for (i = 0; i < AT_NUM; ++i) {
+		kfree(bcs->commands[i]);
+		bcs->commands[i] = commands[i];
+	}
 
 	at_state->pending_commands |= PC_CID;
 	gig_dbg(DEBUG_CMD, "Scheduling PC_CID");
@@ -924,6 +928,10 @@ static void start_dial(struct at_state_t *at_state, void *data, unsigned seq_ind
 	return;
 
 error:
+	for (i = 0; i < AT_NUM; ++i) {
+		kfree(commands[i]);
+		commands[i] = NULL;
+	}
 	at_state->pending_commands |= PC_NOCID;
 	gig_dbg(DEBUG_CMD, "Scheduling PC_NOCID");
 	cs->commands_pending = 1;
@@ -933,20 +941,31 @@ error:
 static void start_accept(struct at_state_t *at_state)
 {
 	struct cardstate *cs = at_state->cs;
-	int retval;
+	struct bc_state *bcs = at_state->bcs;
+	int i;
 
-	retval = gigaset_isdn_setup_accept(at_state);
+	for (i = 0; i < AT_NUM; ++i) {
+		kfree(bcs->commands[i]);
+		bcs->commands[i] = NULL;
+	}
 
-	if (retval == 0) {
-		at_state->pending_commands |= PC_ACCEPT;
-		gig_dbg(DEBUG_CMD, "Scheduling PC_ACCEPT");
-		cs->commands_pending = 1;
-	} else {
+	bcs->commands[AT_PROTO] = kmalloc(9, GFP_ATOMIC);
+	bcs->commands[AT_ISO] = kmalloc(9, GFP_ATOMIC);
+	if (!bcs->commands[AT_PROTO] || !bcs->commands[AT_ISO]) {
+		dev_err(at_state->cs->dev, "out of memory\n");
 		/* error reset */
 		at_state->pending_commands |= PC_HUP;
 		gig_dbg(DEBUG_CMD, "Scheduling PC_HUP");
 		cs->commands_pending = 1;
+		return;
 	}
+
+	snprintf(bcs->commands[AT_PROTO], 9, "^SBPR=%u\r", bcs->proto2);
+	snprintf(bcs->commands[AT_ISO], 9, "^SISO=%u\r", bcs->channel + 1);
+
+	at_state->pending_commands |= PC_ACCEPT;
+	gig_dbg(DEBUG_CMD, "Scheduling PC_ACCEPT");
+	cs->commands_pending = 1;
 }
 
 static void do_start(struct cardstate *cs)
@@ -957,9 +976,7 @@ static void do_start(struct cardstate *cs)
 		schedule_init(cs, MS_INIT);
 
 	cs->isdn_up = 1;
-	gigaset_i4l_cmd(cs, ISDN_STAT_RUN);
-					// FIXME: not in locked mode
-					// FIXME 2: only after init sequence
+	gigaset_isdn_start(cs);
 
 	cs->waiting = 0;
 	wake_up(&cs->waitqueue);
@@ -975,7 +992,7 @@ static void finish_shutdown(struct cardstate *cs)
 	/* Tell the LL that the device is not available .. */
 	if (cs->isdn_up) {
 		cs->isdn_up = 0;
-		gigaset_i4l_cmd(cs, ISDN_STAT_STOP);
+		gigaset_isdn_stop(cs);
 	}
 
 	/* The rest is done by cleanup_cs () in user mode. */
@@ -1113,7 +1130,6 @@ static int do_lock(struct cardstate *cs)
 
 		break;
 	case MS_LOCKED:
-		//retval = -EACCES;
 		break;
 	default:
 		return -EBUSY;
@@ -1276,7 +1292,7 @@ static void do_action(int action, struct cardstate *cs,
 			break;
 		}
 		bcs->chstate |= CHS_D_UP;
-		gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN);
+		gigaset_isdn_connD(bcs);
 		cs->ops->init_bchannel(bcs);
 		break;
 	case ACT_DLE1:
@@ -1284,7 +1300,7 @@ static void do_action(int action, struct cardstate *cs,
 		bcs = cs->bcs + cs->curchannel;
 
 		bcs->chstate |= CHS_D_UP;
-		gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN);
+		gigaset_isdn_connD(bcs);
 		cs->ops->init_bchannel(bcs);
 		break;
 	case ACT_FAKEHUP:
@@ -1369,7 +1385,7 @@ static void do_action(int action, struct cardstate *cs,
 		cs->cur_at_seq = SEQ_NONE;
 		break;
 
-	case ACT_ABORTACCEPT:	/* hangup/error/timeout during ICALL processing */
+	case ACT_ABORTACCEPT:	/* hangup/error/timeout during ICALL procssng */
 		disconnect(p_at_state);
 		break;
 
@@ -1443,17 +1459,6 @@ static void do_action(int action, struct cardstate *cs,
 			__func__, at_state->ConState);
 		cs->cur_at_seq = SEQ_NONE;
 		break;
-#ifdef CONFIG_GIGASET_DEBUG
-	case ACT_TEST:
-		{
-			static int count = 3; //2; //1;
-			*p_genresp = 1;
-			*p_resp_code = count ? RSP_ERROR : RSP_OK;
-			if (count > 0)
-				--count;
-		}
-		break;
-#endif
 	case ACT_DEBUG:
 		gig_dbg(DEBUG_ANY, "%s: resp_code %d in ConState %d",
 			__func__, ev->type, at_state->ConState);
@@ -1474,11 +1479,6 @@ static void do_action(int action, struct cardstate *cs,
 	case ACT_ACCEPT:
 		start_accept(at_state);
 		break;
-	case ACT_PROTO_L2:
-		gig_dbg(DEBUG_CMD, "set protocol to %u",
-			(unsigned) ev->parameter);
-		at_state->bcs->proto2 = ev->parameter;
-		break;
 	case ACT_HUP:
 		at_state->pending_commands |= PC_HUP;
 		cs->commands_pending = 1;
@@ -1493,7 +1493,7 @@ static void do_action(int action, struct cardstate *cs,
 		do_start(cs);
 		break;
 
-	/* events from the interface */ // FIXME without ACT_xxxx?
+	/* events from the interface */
 	case ACT_IF_LOCK:
 		cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs);
 		cs->waiting = 0;
@@ -1512,7 +1512,7 @@ static void do_action(int action, struct cardstate *cs,
 		wake_up(&cs->waitqueue);
 		break;
 
-	/* events from the proc file system */ // FIXME without ACT_xxxx?
+	/* events from the proc file system */
 	case ACT_PROC_CIDMODE:
 		spin_lock_irqsave(&cs->lock, flags);
 		if (ev->parameter != cs->cidmode) {
@@ -1649,7 +1649,8 @@ static void process_event(struct cardstate *cs, struct event_t *ev)
 	for (curact = 0; curact < MAXACT; ++curact) {
 		/* The row tells us what we should do  ..
 		 */
-		do_action(rep->action[curact], cs, bcs, &at_state, &p_command, &genresp, &resp_code, ev);
+		do_action(rep->action[curact], cs, bcs, &at_state, &p_command,
+			  &genresp, &resp_code, ev);
 		if (!at_state)
 			break; /* may be freed after disconnect */
 	}
@@ -1661,13 +1662,14 @@ static void process_event(struct cardstate *cs, struct event_t *ev)
 
 		if (genresp) {
 			spin_lock_irqsave(&cs->lock, flags);
-			at_state->timer_expires = 0; //FIXME
-			at_state->timer_active = 0; //FIXME
+			at_state->timer_expires = 0;
+			at_state->timer_active = 0;
 			spin_unlock_irqrestore(&cs->lock, flags);
-			gigaset_add_event(cs, at_state, resp_code, NULL, 0, NULL);
+			gigaset_add_event(cs, at_state, resp_code,
+					  NULL, 0, NULL);
 		} else {
 			/* Send command to modem if not NULL... */
-			if (p_command/*rep->command*/) {
+			if (p_command) {
 				if (cs->connected)
 					send_command(cs, p_command,
 						     sendcid, cs->dle,
@@ -1754,7 +1756,8 @@ static void process_command_flags(struct cardstate *cs)
 		}
 	}
 
-	/* only switch back to unimodem mode, if no commands are pending and no channels are up */
+	/* only switch back to unimodem mode if no commands are pending and
+	 * no channels are up */
 	spin_lock_irqsave(&cs->lock, flags);
 	if (cs->at_state.pending_commands == PC_UMMODE
 	    && !cs->cidmode
@@ -1813,9 +1816,8 @@ static void process_command_flags(struct cardstate *cs)
 
 	if (cs->at_state.pending_commands & PC_INIT) {
 		cs->at_state.pending_commands &= ~PC_INIT;
-		cs->dle = 0; //FIXME
+		cs->dle = 0;
 		cs->inbuf->inputstate = INS_command;
-		//FIXME reset card state (or -> LOCK0)?
 		schedule_sequence(cs, &cs->at_state, SEQ_INIT);
 		return;
 	}
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
index a2f6125739eb..e963a6c2e86d 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -23,7 +23,6 @@
 #include <linux/compiler.h>
 #include <linux/types.h>
 #include <linux/spinlock.h>
-#include <linux/isdnif.h>
 #include <linux/usb.h>
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
@@ -35,12 +34,11 @@
 #include <linux/list.h>
 #include <asm/atomic.h>
 
-#define GIG_VERSION {0,5,0,0}
-#define GIG_COMPAT  {0,4,0,0}
+#define GIG_VERSION {0, 5, 0, 0}
+#define GIG_COMPAT  {0, 4, 0, 0}
 
 #define MAX_REC_PARAMS 10	/* Max. number of params in response string */
 #define MAX_RESP_SIZE 512	/* Max. size of a response string */
-#define HW_HDR_LEN 2		/* Header size used to store ack info */
 
 #define MAX_EVENTS 64		/* size of event queue */
 
@@ -135,35 +133,32 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
 #define OUT_VENDOR_REQ	(USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT)
 #define IN_VENDOR_REQ	(USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT)
 
-/* int-in-events 3070 */
+/* interrupt pipe messages */
 #define HD_B1_FLOW_CONTROL		0x80
 #define HD_B2_FLOW_CONTROL		0x81
-#define HD_RECEIVEATDATA_ACK		(0x35)		// 3070
-						// att: HD_RECEIVE>>AT<<DATA_ACK
-#define HD_READY_SEND_ATDATA		(0x36)		// 3070
-#define HD_OPEN_ATCHANNEL_ACK		(0x37)		// 3070
-#define HD_CLOSE_ATCHANNEL_ACK		(0x38)		// 3070
-#define HD_DEVICE_INIT_OK		(0x11)		// ISurf USB + 3070
-#define HD_OPEN_B1CHANNEL_ACK		(0x51)		// ISurf USB + 3070
-#define HD_OPEN_B2CHANNEL_ACK		(0x52)		// ISurf USB + 3070
-#define HD_CLOSE_B1CHANNEL_ACK		(0x53)		// ISurf USB + 3070
-#define HD_CLOSE_B2CHANNEL_ACK		(0x54)		// ISurf USB + 3070
-// 	 Powermangment
-#define HD_SUSPEND_END			(0x61)		// ISurf USB
-//   Configuration
-#define HD_RESET_INTERRUPT_PIPE_ACK	(0xFF)		// ISurf USB + 3070
-
-/* control requests 3070 */
-#define	HD_OPEN_B1CHANNEL		(0x23)		// ISurf USB + 3070
-#define	HD_CLOSE_B1CHANNEL		(0x24)		// ISurf USB + 3070
-#define	HD_OPEN_B2CHANNEL		(0x25)		// ISurf USB + 3070
-#define	HD_CLOSE_B2CHANNEL		(0x26)		// ISurf USB + 3070
-#define HD_RESET_INTERRUPT_PIPE		(0x27)		// ISurf USB + 3070
-#define	HD_DEVICE_INIT_ACK		(0x34)		// ISurf USB + 3070
-#define	HD_WRITE_ATMESSAGE		(0x12)		// 3070
-#define	HD_READ_ATMESSAGE		(0x13)		// 3070
-#define	HD_OPEN_ATCHANNEL		(0x28)		// 3070
-#define	HD_CLOSE_ATCHANNEL		(0x29)		// 3070
+#define HD_RECEIVEATDATA_ACK		(0x35)		/* 3070 */
+#define HD_READY_SEND_ATDATA		(0x36)		/* 3070 */
+#define HD_OPEN_ATCHANNEL_ACK		(0x37)		/* 3070 */
+#define HD_CLOSE_ATCHANNEL_ACK		(0x38)		/* 3070 */
+#define HD_DEVICE_INIT_OK		(0x11)		/* ISurf USB + 3070 */
+#define HD_OPEN_B1CHANNEL_ACK		(0x51)		/* ISurf USB + 3070 */
+#define HD_OPEN_B2CHANNEL_ACK		(0x52)		/* ISurf USB + 3070 */
+#define HD_CLOSE_B1CHANNEL_ACK		(0x53)		/* ISurf USB + 3070 */
+#define HD_CLOSE_B2CHANNEL_ACK		(0x54)		/* ISurf USB + 3070 */
+#define HD_SUSPEND_END			(0x61)		/* ISurf USB */
+#define HD_RESET_INTERRUPT_PIPE_ACK	(0xFF)		/* ISurf USB + 3070 */
+
+/* control requests */
+#define	HD_OPEN_B1CHANNEL		(0x23)		/* ISurf USB + 3070 */
+#define	HD_CLOSE_B1CHANNEL		(0x24)		/* ISurf USB + 3070 */
+#define	HD_OPEN_B2CHANNEL		(0x25)		/* ISurf USB + 3070 */
+#define	HD_CLOSE_B2CHANNEL		(0x26)		/* ISurf USB + 3070 */
+#define HD_RESET_INTERRUPT_PIPE		(0x27)		/* ISurf USB + 3070 */
+#define	HD_DEVICE_INIT_ACK		(0x34)		/* ISurf USB + 3070 */
+#define	HD_WRITE_ATMESSAGE		(0x12)		/* 3070 */
+#define	HD_READ_ATMESSAGE		(0x13)		/* 3070 */
+#define	HD_OPEN_ATCHANNEL		(0x28)		/* 3070 */
+#define	HD_CLOSE_ATCHANNEL		(0x29)		/* 3070 */
 
 /* number of B channels supported by base driver */
 #define BAS_CHANNELS	2
@@ -193,7 +188,9 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
 #define AT_PROTO	4
 #define AT_TYPE		5
 #define AT_HLC		6
-#define AT_NUM		7
+#define AT_CLIP		7
+/* total number */
+#define AT_NUM		8
 
 /* variables in struct at_state_t */
 #define VAR_ZSAU	0
@@ -216,7 +213,6 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
 #define EV_START	-110
 #define EV_STOP		-111
 #define EV_IF_LOCK	-112
-#define EV_PROTO_L2	-113
 #define EV_ACCEPT	-114
 #define EV_DIAL		-115
 #define EV_HUP		-116
@@ -224,12 +220,11 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
 #define EV_BC_CLOSED	-118
 
 /* input state */
-#define INS_command	0x0001
-#define INS_DLE_char	0x0002
+#define INS_command	0x0001	/* receiving messages (not payload data) */
+#define INS_DLE_char	0x0002	/* DLE flag received (in DLE mode) */
 #define INS_byte_stuff	0x0004
 #define INS_have_data	0x0008
-#define INS_skip_frame	0x0010
-#define INS_DLE_command	0x0020
+#define INS_DLE_command	0x0020	/* DLE message start (<DLE> X) received */
 #define INS_flag_hunt	0x0040
 
 /* channel state */
@@ -259,6 +254,11 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
 #define SM_LOCKED	0
 #define SM_ISDN		1 /* default */
 
+/* layer 2 protocols (AT^SBPR=...) */
+#define L2_BITSYNC	0
+#define L2_HDLC		1
+#define L2_VOICE	2
+
 struct gigaset_ops;
 struct gigaset_driver;
 
@@ -286,8 +286,6 @@ extern struct reply_t gigaset_tab_cid[];
 extern struct reply_t gigaset_tab_nocid[];
 
 struct inbuf_t {
-	unsigned char		*rcvbuf;	/* usb-gigaset receive buffer */
-	struct bc_state		*bcs;
 	struct cardstate	*cs;
 	int			inputstate;
 	int			head, tail;
@@ -359,12 +357,6 @@ struct at_state_t {
 	struct bc_state		*bcs;
 };
 
-struct resp_type_t {
-	unsigned char	*response;
-	int		resp_code;	/* RSP_XXXX */
-	int		type;		/* RT_XXXX */
-};
-
 struct event_t {
 	int type;
 	void *ptr, *arg;
@@ -395,7 +387,7 @@ struct bc_state {
 
 	unsigned chstate;		/* bitmap (CHS_*) */
 	int ignore;
-	unsigned proto2;		/* Layer 2 protocol (ISDN_PROTO_L2_*) */
+	unsigned proto2;		/* layer 2 protocol (L2_*) */
 	char *commands[AT_NUM];		/* see AT_XXXX */
 
 #ifdef CONFIG_GIGASET_DEBUG
@@ -410,6 +402,8 @@ struct bc_state {
 		struct usb_bc_state *usb;	/* usb hardware driver (m105) */
 		struct bas_bc_state *bas;	/* usb hardware driver (base) */
 	} hw;
+
+	void *ap;			/* LL application structure */
 };
 
 struct cardstate {
@@ -456,12 +450,13 @@ struct cardstate {
 
 	unsigned running;		/* !=0 if events are handled */
 	unsigned connected;		/* !=0 if hardware is connected */
-	unsigned isdn_up;		/* !=0 after ISDN_STAT_RUN */
+	unsigned isdn_up;		/* !=0 after gigaset_isdn_start() */
 
 	unsigned cidmode;
 
 	int myid;			/* id for communication with LL */
-	isdn_if iif;
+	void *iif;			/* LL interface structure */
+	unsigned short hw_hdr_len;	/* headroom needed in data skbs */
 
 	struct reply_t *tabnocid;
 	struct reply_t *tabcid;
@@ -476,8 +471,8 @@ struct cardstate {
 
 	struct timer_list timer;
 	int retry_count;
-	int dle;			/* !=0 if modem commands/responses are
-					   dle encoded */
+	int dle;			/* !=0 if DLE mode is active
+					   (ZDLE=1 received -- M10x only) */
 	int cur_at_seq;			/* sequence of AT commands being
 					   processed */
 	int curchannel;			/* channel those commands are meant
@@ -616,7 +611,9 @@ struct gigaset_ops {
 	int (*baud_rate)(struct cardstate *cs, unsigned cflag);
 	int (*set_line_ctrl)(struct cardstate *cs, unsigned cflag);
 
-	/* Called from i4l.c to put an skb into the send-queue. */
+	/* Called from LL interface to put an skb into the send-queue.
+	 * After sending is completed, gigaset_skb_sent() must be called
+	 * with the skb's link layer header preserved. */
 	int (*send_skb)(struct bc_state *bcs, struct sk_buff *skb);
 
 	/* Called from ev-layer.c to process a block of data
@@ -625,7 +622,8 @@ struct gigaset_ops {
 
 };
 
-/* = Common structures and definitions ======================================= */
+/* = Common structures and definitions =======================================
+ */
 
 /* Parser states for DLE-Event:
  * <DLE-EVENT>: <DLE_FLAG> "X" <EVENT> <DLE_FLAG> "."
@@ -638,8 +636,7 @@ struct gigaset_ops {
  *  Functions implemented in asyncdata.c
  */
 
-/* Called from i4l.c to put an skb into the send-queue.
- * After sending gigaset_skb_sent() should be called. */
+/* Called from LL interface to put an skb into the send queue. */
 int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb);
 
 /* Called from ev-layer.c to process a block of data
@@ -650,8 +647,7 @@ void gigaset_m10x_input(struct inbuf_t *inbuf);
  *  Functions implemented in isocdata.c
  */
 
-/* Called from i4l.c to put an skb into the send-queue.
- * After sending gigaset_skb_sent() should be called. */
+/* Called from LL interface to put an skb into the send queue. */
 int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb);
 
 /* Called from ev-layer.c to process a block of data
@@ -674,36 +670,26 @@ void gigaset_isowbuf_init(struct isowbuf_t *iwb, unsigned char idle);
 int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size);
 
 /* ===========================================================================
- *  Functions implemented in i4l.c/gigaset.h
+ *  Functions implemented in LL interface
  */
 
-/* Called by gigaset_initcs() for setting up with the isdn4linux subsystem */
-int gigaset_register_to_LL(struct cardstate *cs, const char *isdnid);
+/* Called from common.c for setting up/shutting down with the ISDN subsystem */
+int gigaset_isdn_register(struct cardstate *cs, const char *isdnid);
+void gigaset_isdn_unregister(struct cardstate *cs);
 
-/* Called from xxx-gigaset.c to indicate completion of sending an skb */
+/* Called from hardware module to indicate completion of an skb */
 void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb);
+void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb);
+void gigaset_isdn_rcv_err(struct bc_state *bcs);
 
 /* Called from common.c/ev-layer.c to indicate events relevant to the LL */
+void gigaset_isdn_start(struct cardstate *cs);
+void gigaset_isdn_stop(struct cardstate *cs);
 int gigaset_isdn_icall(struct at_state_t *at_state);
-int gigaset_isdn_setup_accept(struct at_state_t *at_state);
-int gigaset_isdn_setup_dial(struct at_state_t *at_state, void *data);
-
-void gigaset_i4l_cmd(struct cardstate *cs, int cmd);
-void gigaset_i4l_channel_cmd(struct bc_state *bcs, int cmd);
-
-
-static inline void gigaset_isdn_rcv_err(struct bc_state *bcs)
-{
-	isdn_ctrl response;
-
-	/* error -> LL */
-	gig_dbg(DEBUG_CMD, "sending L1ERR");
-	response.driver = bcs->cs->myid;
-	response.command = ISDN_STAT_L1ERR;
-	response.arg = bcs->channel;
-	response.parm.errcode = ISDN_STAT_L1ERR_RECV;
-	bcs->cs->iif.statcallb(&response);
-}
+void gigaset_isdn_connD(struct bc_state *bcs);
+void gigaset_isdn_hupD(struct bc_state *bcs);
+void gigaset_isdn_connB(struct bc_state *bcs);
+void gigaset_isdn_hupB(struct bc_state *bcs);
 
 /* ===========================================================================
  *  Functions implemented in ev-layer.c
@@ -732,6 +718,7 @@ void gigaset_bcs_reinit(struct bc_state *bcs);
 void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs,
 		     struct cardstate *cs, int cid);
 int gigaset_get_channel(struct bc_state *bcs);
+struct bc_state *gigaset_get_free_channel(struct cardstate *cs);
 void gigaset_free_channel(struct bc_state *bcs);
 int gigaset_get_channels(struct cardstate *cs);
 void gigaset_free_channels(struct cardstate *cs);
@@ -781,7 +768,7 @@ struct event_t *gigaset_add_event(struct cardstate *cs,
 				  void *ptr, int parameter, void *arg);
 
 /* Called on CONFIG1 command from frontend. */
-int gigaset_enterconfigmode(struct cardstate *cs); //0: success <0: errorcode
+int gigaset_enterconfigmode(struct cardstate *cs);
 
 /* cs->lock must not be locked */
 static inline void gigaset_schedule_event(struct cardstate *cs)
@@ -816,35 +803,6 @@ static inline void gigaset_bchannel_up(struct bc_state *bcs)
 /* handling routines for sk_buff */
 /* ============================= */
 
-/* pass received skb to LL
- * Warning: skb must not be accessed anymore!
- */
-static inline void gigaset_rcv_skb(struct sk_buff *skb,
-				   struct cardstate *cs,
-				   struct bc_state *bcs)
-{
-	cs->iif.rcvcallb_skb(cs->myid, bcs->channel, skb);
-	bcs->trans_down++;
-}
-
-/* handle reception of corrupted skb
- * Warning: skb must not be accessed anymore!
- */
-static inline void gigaset_rcv_error(struct sk_buff *procskb,
-				     struct cardstate *cs,
-				     struct bc_state *bcs)
-{
-	if (procskb)
-		dev_kfree_skb(procskb);
-
-	if (bcs->ignore)
-		--bcs->ignore;
-	else {
-		++bcs->corrupted;
-		gigaset_isdn_rcv_err(bcs);
-	}
-}
-
 /* append received bytes to inbuf */
 int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src,
 		       unsigned numbytes);
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
index 654489d836cd..c129ee47a8fb 100644
--- a/drivers/isdn/gigaset/i4l.c
+++ b/drivers/isdn/gigaset/i4l.c
@@ -14,6 +14,9 @@
  */
 
 #include "gigaset.h"
+#include <linux/isdnif.h>
+
+#define HW_HDR_LEN	2	/* Header size used to store ack info */
 
 /* == Handling of I4L IO =====================================================*/
 
@@ -36,12 +39,12 @@
 static int writebuf_from_LL(int driverID, int channel, int ack,
 			    struct sk_buff *skb)
 {
-	struct cardstate *cs;
+	struct cardstate *cs = gigaset_get_cs_by_id(driverID);
 	struct bc_state *bcs;
+	unsigned char *ack_header;
 	unsigned len;
-	unsigned skblen;
 
-	if (!(cs = gigaset_get_cs_by_id(driverID))) {
+	if (!cs) {
 		pr_err("%s: invalid driver ID (%d)\n", __func__, driverID);
 		return -ENODEV;
 	}
@@ -75,11 +78,23 @@ static int writebuf_from_LL(int driverID, int channel, int ack,
 		return -EINVAL;
 	}
 
-	skblen = ack ? len : 0;
-	skb->head[0] = skblen & 0xff;
-	skb->head[1] = skblen >> 8;
-	gig_dbg(DEBUG_MCMD, "skb: len=%u, skblen=%u: %02x %02x",
-		len, skblen, (unsigned) skb->head[0], (unsigned) skb->head[1]);
+	/* set up acknowledgement header */
+	if (skb_headroom(skb) < HW_HDR_LEN) {
+		/* should never happen */
+		dev_err(cs->dev, "%s: insufficient skb headroom\n", __func__);
+		return -ENOMEM;
+	}
+	skb_set_mac_header(skb, -HW_HDR_LEN);
+	skb->mac_len = HW_HDR_LEN;
+	ack_header = skb_mac_header(skb);
+	if (ack) {
+		ack_header[0] = len & 0xff;
+		ack_header[1] = len >> 8;
+	} else {
+		ack_header[0] = ack_header[1] = 0;
+	}
+	gig_dbg(DEBUG_MCMD, "skb: len=%u, ack=%d: %02x %02x",
+		len, ack, ack_header[0], ack_header[1]);
 
 	/* pass to device-specific module */
 	return cs->ops->send_skb(bcs, skb);
@@ -95,6 +110,8 @@ static int writebuf_from_LL(int driverID, int channel, int ack,
  */
 void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
 {
+	isdn_if *iif = bcs->cs->iif;
+	unsigned char *ack_header = skb_mac_header(skb);
 	unsigned len;
 	isdn_ctrl response;
 
@@ -104,8 +121,7 @@ void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
 		dev_warn(bcs->cs->dev, "%s: skb->len==%d\n",
 			 __func__, skb->len);
 
-	len = (unsigned char) skb->head[0] |
-	      (unsigned) (unsigned char) skb->head[1] << 8;
+	len = ack_header[0] + ((unsigned) ack_header[1] << 8);
 	if (len) {
 		gig_dbg(DEBUG_MCMD, "ACKing to LL (id: %d, ch: %d, sz: %u)",
 			bcs->cs->myid, bcs->channel, len);
@@ -114,71 +130,177 @@ void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
 		response.command = ISDN_STAT_BSENT;
 		response.arg = bcs->channel;
 		response.parm.length = len;
-		bcs->cs->iif.statcallb(&response);
+		iif->statcallb(&response);
 	}
 }
 EXPORT_SYMBOL_GPL(gigaset_skb_sent);
 
+/**
+ * gigaset_skb_rcvd() - pass received skb to LL
+ * @bcs:	B channel descriptor structure.
+ * @skb:	received data.
+ *
+ * Called by hardware module {bas,ser,usb}_gigaset when user data has
+ * been successfully received, for passing to the LL.
+ * Warning: skb must not be accessed anymore!
+ */
+void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb)
+{
+	isdn_if *iif = bcs->cs->iif;
+
+	iif->rcvcallb_skb(bcs->cs->myid, bcs->channel, skb);
+	bcs->trans_down++;
+}
+EXPORT_SYMBOL_GPL(gigaset_skb_rcvd);
+
+/**
+ * gigaset_isdn_rcv_err() - signal receive error
+ * @bcs:	B channel descriptor structure.
+ *
+ * Called by hardware module {bas,ser,usb}_gigaset when a receive error
+ * has occurred, for signalling to the LL.
+ */
+void gigaset_isdn_rcv_err(struct bc_state *bcs)
+{
+	isdn_if *iif = bcs->cs->iif;
+	isdn_ctrl response;
+
+	/* if currently ignoring packets, just count down */
+	if (bcs->ignore) {
+		bcs->ignore--;
+		return;
+	}
+
+	/* update statistics */
+	bcs->corrupted++;
+
+	/* error -> LL */
+	gig_dbg(DEBUG_CMD, "sending L1ERR");
+	response.driver = bcs->cs->myid;
+	response.command = ISDN_STAT_L1ERR;
+	response.arg = bcs->channel;
+	response.parm.errcode = ISDN_STAT_L1ERR_RECV;
+	iif->statcallb(&response);
+}
+EXPORT_SYMBOL_GPL(gigaset_isdn_rcv_err);
+
 /* This function will be called by LL to send commands
  * NOTE: LL ignores the returned value, for commands other than ISDN_CMD_IOCTL,
  * so don't put too much effort into it.
  */
 static int command_from_LL(isdn_ctrl *cntrl)
 {
-	struct cardstate *cs = gigaset_get_cs_by_id(cntrl->driver);
+	struct cardstate *cs;
 	struct bc_state *bcs;
 	int retval = 0;
-	struct setup_parm *sp;
+	char **commands;
+	int ch;
+	int i;
+	size_t l;
 
 	gigaset_debugdrivers();
 
-	if (!cs) {
+	gig_dbg(DEBUG_CMD, "driver: %d, command: %d, arg: 0x%lx",
+		cntrl->driver, cntrl->command, cntrl->arg);
+
+	cs = gigaset_get_cs_by_id(cntrl->driver);
+	if (cs == NULL) {
 		pr_err("%s: invalid driver ID (%d)\n", __func__, cntrl->driver);
 		return -ENODEV;
 	}
+	ch = cntrl->arg & 0xff;
 
 	switch (cntrl->command) {
 	case ISDN_CMD_IOCTL:
-		gig_dbg(DEBUG_ANY, "ISDN_CMD_IOCTL (driver: %d, arg: %ld)",
-			cntrl->driver, cntrl->arg);
-
 		dev_warn(cs->dev, "ISDN_CMD_IOCTL not supported\n");
 		return -EINVAL;
 
 	case ISDN_CMD_DIAL:
 		gig_dbg(DEBUG_ANY,
-			"ISDN_CMD_DIAL (driver: %d, ch: %ld, "
-			"phone: %s, ownmsn: %s, si1: %d, si2: %d)",
-			cntrl->driver, cntrl->arg,
+			"ISDN_CMD_DIAL (phone: %s, msn: %s, si1: %d, si2: %d)",
 			cntrl->parm.setup.phone, cntrl->parm.setup.eazmsn,
 			cntrl->parm.setup.si1, cntrl->parm.setup.si2);
 
-		if (cntrl->arg >= cs->channels) {
+		if (ch >= cs->channels) {
 			dev_err(cs->dev,
-				"ISDN_CMD_DIAL: invalid channel (%d)\n",
-				(int) cntrl->arg);
+				"ISDN_CMD_DIAL: invalid channel (%d)\n", ch);
 			return -EINVAL;
 		}
-
-		bcs = cs->bcs + cntrl->arg;
-
+		bcs = cs->bcs + ch;
 		if (!gigaset_get_channel(bcs)) {
 			dev_err(cs->dev, "ISDN_CMD_DIAL: channel not free\n");
 			return -EBUSY;
 		}
 
-		sp = kmalloc(sizeof *sp, GFP_ATOMIC);
-		if (!sp) {
+		commands = kzalloc(AT_NUM*(sizeof *commands), GFP_ATOMIC);
+		if (!commands) {
 			gigaset_free_channel(bcs);
 			dev_err(cs->dev, "ISDN_CMD_DIAL: out of memory\n");
 			return -ENOMEM;
 		}
-		*sp = cntrl->parm.setup;
 
-		if (!gigaset_add_event(cs, &bcs->at_state, EV_DIAL, sp,
+		l = 3 + strlen(cntrl->parm.setup.phone);
+		commands[AT_DIAL] = kmalloc(l, GFP_ATOMIC);
+		if (!commands[AT_DIAL])
+			goto oom;
+		if (cntrl->parm.setup.phone[0] == '*' &&
+		    cntrl->parm.setup.phone[1] == '*') {
+			/* internal call: translate ** prefix to CTP value */
+			commands[AT_TYPE] = kstrdup("^SCTP=0\r", GFP_ATOMIC);
+			if (!commands[AT_TYPE])
+				goto oom;
+			snprintf(commands[AT_DIAL], l,
+				 "D%s\r", cntrl->parm.setup.phone+2);
+		} else {
+			commands[AT_TYPE] = kstrdup("^SCTP=1\r", GFP_ATOMIC);
+			if (!commands[AT_TYPE])
+				goto oom;
+			snprintf(commands[AT_DIAL], l,
+				 "D%s\r", cntrl->parm.setup.phone);
+		}
+
+		l = strlen(cntrl->parm.setup.eazmsn);
+		if (l) {
+			l += 8;
+			commands[AT_MSN] = kmalloc(l, GFP_ATOMIC);
+			if (!commands[AT_MSN])
+				goto oom;
+			snprintf(commands[AT_MSN], l, "^SMSN=%s\r",
+				 cntrl->parm.setup.eazmsn);
+		}
+
+		switch (cntrl->parm.setup.si1) {
+		case 1:		/* audio */
+			/* BC = 9090A3: 3.1 kHz audio, A-law */
+			commands[AT_BC] = kstrdup("^SBC=9090A3\r", GFP_ATOMIC);
+			if (!commands[AT_BC])
+				goto oom;
+			break;
+		case 7:		/* data */
+		default:	/* hope the app knows what it is doing */
+			/* BC = 8890: unrestricted digital information */
+			commands[AT_BC] = kstrdup("^SBC=8890\r", GFP_ATOMIC);
+			if (!commands[AT_BC])
+				goto oom;
+		}
+		/* ToDo: other si1 values, inspect si2, set HLC/LLC */
+
+		commands[AT_PROTO] = kmalloc(9, GFP_ATOMIC);
+		if (!commands[AT_PROTO])
+			goto oom;
+		snprintf(commands[AT_PROTO], 9, "^SBPR=%u\r", bcs->proto2);
+
+		commands[AT_ISO] = kmalloc(9, GFP_ATOMIC);
+		if (!commands[AT_ISO])
+			goto oom;
+		snprintf(commands[AT_ISO], 9, "^SISO=%u\r",
+			 (unsigned) bcs->channel + 1);
+
+		if (!gigaset_add_event(cs, &bcs->at_state, EV_DIAL, commands,
 				       bcs->at_state.seq_index, NULL)) {
-			//FIXME what should we do?
-			kfree(sp);
+			for (i = 0; i < AT_NUM; ++i)
+				kfree(commands[i]);
+			kfree(commands);
 			gigaset_free_channel(bcs);
 			return -ENOMEM;
 		}
@@ -186,115 +308,102 @@ static int command_from_LL(isdn_ctrl *cntrl)
 		gig_dbg(DEBUG_CMD, "scheduling DIAL");
 		gigaset_schedule_event(cs);
 		break;
-	case ISDN_CMD_ACCEPTD: //FIXME
-		gig_dbg(DEBUG_ANY, "ISDN_CMD_ACCEPTD");
-
-		if (cntrl->arg >= cs->channels) {
+	case ISDN_CMD_ACCEPTD:
+		if (ch >= cs->channels) {
 			dev_err(cs->dev,
-				"ISDN_CMD_ACCEPTD: invalid channel (%d)\n",
-				(int) cntrl->arg);
+				"ISDN_CMD_ACCEPTD: invalid channel (%d)\n", ch);
 			return -EINVAL;
 		}
-
-		if (!gigaset_add_event(cs, &cs->bcs[cntrl->arg].at_state,
-				       EV_ACCEPT, NULL, 0, NULL)) {
-			//FIXME what should we do?
+		bcs = cs->bcs + ch;
+		if (!gigaset_add_event(cs, &bcs->at_state,
+				       EV_ACCEPT, NULL, 0, NULL))
 			return -ENOMEM;
-		}
 
 		gig_dbg(DEBUG_CMD, "scheduling ACCEPT");
 		gigaset_schedule_event(cs);
 
 		break;
 	case ISDN_CMD_ACCEPTB:
-		gig_dbg(DEBUG_ANY, "ISDN_CMD_ACCEPTB");
 		break;
 	case ISDN_CMD_HANGUP:
-		gig_dbg(DEBUG_ANY, "ISDN_CMD_HANGUP (ch: %d)",
-			(int) cntrl->arg);
-
-		if (cntrl->arg >= cs->channels) {
+		if (ch >= cs->channels) {
 			dev_err(cs->dev,
-				"ISDN_CMD_HANGUP: invalid channel (%d)\n",
-				(int) cntrl->arg);
+				"ISDN_CMD_HANGUP: invalid channel (%d)\n", ch);
 			return -EINVAL;
 		}
-
-		if (!gigaset_add_event(cs, &cs->bcs[cntrl->arg].at_state,
-				       EV_HUP, NULL, 0, NULL)) {
-			//FIXME what should we do?
+		bcs = cs->bcs + ch;
+		if (!gigaset_add_event(cs, &bcs->at_state,
+				       EV_HUP, NULL, 0, NULL))
 			return -ENOMEM;
-		}
 
 		gig_dbg(DEBUG_CMD, "scheduling HUP");
 		gigaset_schedule_event(cs);
 
 		break;
-	case ISDN_CMD_CLREAZ: /* Do not signal incoming signals */ //FIXME
-		gig_dbg(DEBUG_ANY, "ISDN_CMD_CLREAZ");
+	case ISDN_CMD_CLREAZ: /* Do not signal incoming signals */
+		dev_info(cs->dev, "ignoring ISDN_CMD_CLREAZ\n");
 		break;
-	case ISDN_CMD_SETEAZ: /* Signal incoming calls for given MSN */ //FIXME
-		gig_dbg(DEBUG_ANY,
-			"ISDN_CMD_SETEAZ (id: %d, ch: %ld, number: %s)",
-			cntrl->driver, cntrl->arg, cntrl->parm.num);
+	case ISDN_CMD_SETEAZ: /* Signal incoming calls for given MSN */
+		dev_info(cs->dev, "ignoring ISDN_CMD_SETEAZ (%s)\n",
+			 cntrl->parm.num);
 		break;
 	case ISDN_CMD_SETL2: /* Set L2 to given protocol */
-		gig_dbg(DEBUG_ANY, "ISDN_CMD_SETL2 (ch: %ld, proto: %lx)",
-			cntrl->arg & 0xff, (cntrl->arg >> 8));
-
-		if ((cntrl->arg & 0xff) >= cs->channels) {
+		if (ch >= cs->channels) {
 			dev_err(cs->dev,
-				"ISDN_CMD_SETL2: invalid channel (%d)\n",
-				(int) cntrl->arg & 0xff);
+				"ISDN_CMD_SETL2: invalid channel (%d)\n", ch);
 			return -EINVAL;
 		}
-
-		if (!gigaset_add_event(cs, &cs->bcs[cntrl->arg & 0xff].at_state,
-				       EV_PROTO_L2, NULL, cntrl->arg >> 8,
-				       NULL)) {
-			//FIXME what should we do?
-			return -ENOMEM;
+		bcs = cs->bcs + ch;
+		if (bcs->chstate & CHS_D_UP) {
+			dev_err(cs->dev,
+				"ISDN_CMD_SETL2: channel active (%d)\n", ch);
+			return -EINVAL;
+		}
+		switch (cntrl->arg >> 8) {
+		case ISDN_PROTO_L2_HDLC:
+			gig_dbg(DEBUG_CMD, "ISDN_CMD_SETL2: setting L2_HDLC");
+			bcs->proto2 = L2_HDLC;
+			break;
+		case ISDN_PROTO_L2_TRANS:
+			gig_dbg(DEBUG_CMD, "ISDN_CMD_SETL2: setting L2_VOICE");
+			bcs->proto2 = L2_VOICE;
+			break;
+		default:
+			dev_err(cs->dev,
+				"ISDN_CMD_SETL2: unsupported protocol (%lu)\n",
+				cntrl->arg >> 8);
+			return -EINVAL;
 		}
-
-		gig_dbg(DEBUG_CMD, "scheduling PROTO_L2");
-		gigaset_schedule_event(cs);
 		break;
 	case ISDN_CMD_SETL3: /* Set L3 to given protocol */
-		gig_dbg(DEBUG_ANY, "ISDN_CMD_SETL3 (ch: %ld, proto: %lx)",
-			cntrl->arg & 0xff, (cntrl->arg >> 8));
-
-		if ((cntrl->arg & 0xff) >= cs->channels) {
+		if (ch >= cs->channels) {
 			dev_err(cs->dev,
-				"ISDN_CMD_SETL3: invalid channel (%d)\n",
-				(int) cntrl->arg & 0xff);
+				"ISDN_CMD_SETL3: invalid channel (%d)\n", ch);
 			return -EINVAL;
 		}
 
 		if (cntrl->arg >> 8 != ISDN_PROTO_L3_TRANS) {
 			dev_err(cs->dev,
-				"ISDN_CMD_SETL3: invalid protocol %lu\n",
+				"ISDN_CMD_SETL3: unsupported protocol (%lu)\n",
 				cntrl->arg >> 8);
 			return -EINVAL;
 		}
 
 		break;
 	case ISDN_CMD_PROCEED:
-		gig_dbg(DEBUG_ANY, "ISDN_CMD_PROCEED"); //FIXME
+		gig_dbg(DEBUG_ANY, "ISDN_CMD_PROCEED");
 		break;
 	case ISDN_CMD_ALERT:
-		gig_dbg(DEBUG_ANY, "ISDN_CMD_ALERT"); //FIXME
+		gig_dbg(DEBUG_ANY, "ISDN_CMD_ALERT");
 		if (cntrl->arg >= cs->channels) {
 			dev_err(cs->dev,
 				"ISDN_CMD_ALERT: invalid channel (%d)\n",
 				(int) cntrl->arg);
 			return -EINVAL;
 		}
-		//bcs = cs->bcs + cntrl->arg;
-		//bcs->proto2 = -1;
-		// FIXME
 		break;
 	case ISDN_CMD_REDIR:
-		gig_dbg(DEBUG_ANY, "ISDN_CMD_REDIR"); //FIXME
+		gig_dbg(DEBUG_ANY, "ISDN_CMD_REDIR");
 		break;
 	case ISDN_CMD_PROT_IO:
 		gig_dbg(DEBUG_ANY, "ISDN_CMD_PROT_IO");
@@ -324,149 +433,34 @@ static int command_from_LL(isdn_ctrl *cntrl)
 	}
 
 	return retval;
+
+oom:
+	dev_err(bcs->cs->dev, "out of memory\n");
+	for (i = 0; i < AT_NUM; ++i)
+		kfree(commands[i]);
+	return -ENOMEM;
 }
 
-void gigaset_i4l_cmd(struct cardstate *cs, int cmd)
+static void gigaset_i4l_cmd(struct cardstate *cs, int cmd)
 {
+	isdn_if *iif = cs->iif;
 	isdn_ctrl command;
 
 	command.driver = cs->myid;
 	command.command = cmd;
 	command.arg = 0;
-	cs->iif.statcallb(&command);
+	iif->statcallb(&command);
 }
 
-void gigaset_i4l_channel_cmd(struct bc_state *bcs, int cmd)
+static void gigaset_i4l_channel_cmd(struct bc_state *bcs, int cmd)
 {
+	isdn_if *iif = bcs->cs->iif;
 	isdn_ctrl command;
 
 	command.driver = bcs->cs->myid;
 	command.command = cmd;
 	command.arg = bcs->channel;
-	bcs->cs->iif.statcallb(&command);
-}
-
-int gigaset_isdn_setup_dial(struct at_state_t *at_state, void *data)
-{
-	struct bc_state *bcs = at_state->bcs;
-	unsigned proto;
-	const char *bc;
-	size_t length[AT_NUM];
-	size_t l;
-	int i;
-	struct setup_parm *sp = data;
-
-	switch (bcs->proto2) {
-	case ISDN_PROTO_L2_HDLC:
-		proto = 1; /* 0: Bitsynchron, 1: HDLC, 2: voice */
-		break;
-	case ISDN_PROTO_L2_TRANS:
-		proto = 2; /* 0: Bitsynchron, 1: HDLC, 2: voice */
-		break;
-	default:
-		dev_err(bcs->cs->dev, "%s: invalid L2 protocol: %u\n",
-			__func__, bcs->proto2);
-		return -EINVAL;
-	}
-
-	switch (sp->si1) {
-	case 1:		/* audio */
-		bc = "9090A3";	/* 3.1 kHz audio, A-law */
-		break;
-	case 7:		/* data */
-	default:	/* hope the app knows what it is doing */
-		bc = "8890";	/* unrestricted digital information */
-	}
-	//FIXME add missing si1 values from 1TR6, inspect si2, set HLC/LLC
-
-	length[AT_DIAL ] = 1 + strlen(sp->phone) + 1 + 1;
-	l = strlen(sp->eazmsn);
-	length[AT_MSN  ] = l ? 6 + l + 1 + 1 : 0;
-	length[AT_BC   ] = 5 + strlen(bc) + 1 + 1;
-	length[AT_PROTO] = 6 + 1 + 1 + 1; /* proto: 1 character */
-	length[AT_ISO  ] = 6 + 1 + 1 + 1; /* channel: 1 character */
-	length[AT_TYPE ] = 6 + 1 + 1 + 1; /* call type: 1 character */
-	length[AT_HLC  ] = 0;
-
-	for (i = 0; i < AT_NUM; ++i) {
-		kfree(bcs->commands[i]);
-		bcs->commands[i] = NULL;
-		if (length[i] &&
-		    !(bcs->commands[i] = kmalloc(length[i], GFP_ATOMIC))) {
-			dev_err(bcs->cs->dev, "out of memory\n");
-			return -ENOMEM;
-		}
-	}
-
-	/* type = 1: extern, 0: intern, 2: recall, 3: door, 4: centrex */
-	if (sp->phone[0] == '*' && sp->phone[1] == '*') {
-		/* internal call: translate ** prefix to CTP value */
-		snprintf(bcs->commands[AT_DIAL], length[AT_DIAL],
-			 "D%s\r", sp->phone+2);
-		strncpy(bcs->commands[AT_TYPE], "^SCTP=0\r", length[AT_TYPE]);
-	} else {
-		snprintf(bcs->commands[AT_DIAL], length[AT_DIAL],
-			 "D%s\r", sp->phone);
-		strncpy(bcs->commands[AT_TYPE], "^SCTP=1\r", length[AT_TYPE]);
-	}
-
-	if (bcs->commands[AT_MSN])
-		snprintf(bcs->commands[AT_MSN], length[AT_MSN],
-			 "^SMSN=%s\r", sp->eazmsn);
-	snprintf(bcs->commands[AT_BC   ], length[AT_BC   ],
-		 "^SBC=%s\r", bc);
-	snprintf(bcs->commands[AT_PROTO], length[AT_PROTO],
-		 "^SBPR=%u\r", proto);
-	snprintf(bcs->commands[AT_ISO  ], length[AT_ISO  ],
-		 "^SISO=%u\r", (unsigned)bcs->channel + 1);
-
-	return 0;
-}
-
-int gigaset_isdn_setup_accept(struct at_state_t *at_state)
-{
-	unsigned proto;
-	size_t length[AT_NUM];
-	int i;
-	struct bc_state *bcs = at_state->bcs;
-
-	switch (bcs->proto2) {
-	case ISDN_PROTO_L2_HDLC:
-		proto = 1; /* 0: Bitsynchron, 1: HDLC, 2: voice */
-		break;
-	case ISDN_PROTO_L2_TRANS:
-		proto = 2; /* 0: Bitsynchron, 1: HDLC, 2: voice */
-		break;
-	default:
-		dev_err(at_state->cs->dev, "%s: invalid protocol: %u\n",
-			__func__, bcs->proto2);
-		return -EINVAL;
-	}
-
-	length[AT_DIAL ] = 0;
-	length[AT_MSN  ] = 0;
-	length[AT_BC   ] = 0;
-	length[AT_PROTO] = 6 + 1 + 1 + 1; /* proto: 1 character */
-	length[AT_ISO  ] = 6 + 1 + 1 + 1; /* channel: 1 character */
-	length[AT_TYPE ] = 0;
-	length[AT_HLC  ] = 0;
-
-	for (i = 0; i < AT_NUM; ++i) {
-		kfree(bcs->commands[i]);
-		bcs->commands[i] = NULL;
-		if (length[i] &&
-		    !(bcs->commands[i] = kmalloc(length[i], GFP_ATOMIC))) {
-			dev_err(at_state->cs->dev, "out of memory\n");
-			return -ENOMEM;
-		}
-	}
-
-	snprintf(bcs->commands[AT_PROTO], length[AT_PROTO],
-		 "^SBPR=%u\r", proto);
-	snprintf(bcs->commands[AT_ISO  ], length[AT_ISO  ],
-		 "^SISO=%u\r", (unsigned) bcs->channel + 1);
-
-	return 0;
+	iif->statcallb(&command);
 }
 
 /**
@@ -482,13 +476,14 @@ int gigaset_isdn_icall(struct at_state_t *at_state)
 {
 	struct cardstate *cs = at_state->cs;
 	struct bc_state *bcs = at_state->bcs;
+	isdn_if *iif = cs->iif;
 	isdn_ctrl response;
 	int retval;
 
 	/* fill ICALL structure */
 	response.parm.setup.si1 = 0;	/* default: unknown */
 	response.parm.setup.si2 = 0;
-	response.parm.setup.screen = 0;	//FIXME how to set these?
+	response.parm.setup.screen = 0;
 	response.parm.setup.plan = 0;
 	if (!at_state->str_var[STR_ZBC]) {
 		/* no BC (internal call): assume speech, A-law */
@@ -509,29 +504,27 @@ int gigaset_isdn_icall(struct at_state_t *at_state)
 		return ICALL_IGNORE;
 	}
 	if (at_state->str_var[STR_NMBR]) {
-		strncpy(response.parm.setup.phone, at_state->str_var[STR_NMBR],
-			sizeof response.parm.setup.phone - 1);
-		response.parm.setup.phone[sizeof response.parm.setup.phone - 1] = 0;
+		strlcpy(response.parm.setup.phone, at_state->str_var[STR_NMBR],
+			sizeof response.parm.setup.phone);
 	} else
 		response.parm.setup.phone[0] = 0;
 	if (at_state->str_var[STR_ZCPN]) {
-		strncpy(response.parm.setup.eazmsn, at_state->str_var[STR_ZCPN],
-			sizeof response.parm.setup.eazmsn - 1);
-		response.parm.setup.eazmsn[sizeof response.parm.setup.eazmsn - 1] = 0;
+		strlcpy(response.parm.setup.eazmsn, at_state->str_var[STR_ZCPN],
+			sizeof response.parm.setup.eazmsn);
 	} else
 		response.parm.setup.eazmsn[0] = 0;
 
 	if (!bcs) {
 		dev_notice(cs->dev, "no channel for incoming call\n");
 		response.command = ISDN_STAT_ICALLW;
-		response.arg = 0; //FIXME
+		response.arg = 0;
 	} else {
 		gig_dbg(DEBUG_CMD, "Sending ICALL");
 		response.command = ISDN_STAT_ICALL;
-		response.arg = bcs->channel; //FIXME
+		response.arg = bcs->channel;
 	}
 	response.driver = cs->myid;
-	retval = cs->iif.statcallb(&response);
+	retval = iif->statcallb(&response);
 	gig_dbg(DEBUG_CMD, "Response: %d", retval);
 	switch (retval) {
 	case 0:	/* no takers */
@@ -560,16 +553,109 @@ int gigaset_isdn_icall(struct at_state_t *at_state)
 	}
 }
 
-/* Set Callback function pointer */
-int gigaset_register_to_LL(struct cardstate *cs, const char *isdnid)
+/**
+ * gigaset_isdn_connD() - signal D channel connect
+ * @bcs:	B channel descriptor structure.
+ *
+ * Called by main module to notify the LL that the D channel connection has
+ * been established.
+ */
+void gigaset_isdn_connD(struct bc_state *bcs)
 {
-	isdn_if *iif = &cs->iif;
+	gig_dbg(DEBUG_CMD, "sending DCONN");
+	gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN);
+}
 
-	gig_dbg(DEBUG_ANY, "Register driver capabilities to LL");
+/**
+ * gigaset_isdn_hupD() - signal D channel hangup
+ * @bcs:	B channel descriptor structure.
+ *
+ * Called by main module to notify the LL that the D channel connection has
+ * been shut down.
+ */
+void gigaset_isdn_hupD(struct bc_state *bcs)
+{
+	gig_dbg(DEBUG_CMD, "sending DHUP");
+	gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DHUP);
+}
+
+/**
+ * gigaset_isdn_connB() - signal B channel connect
+ * @bcs:	B channel descriptor structure.
+ *
+ * Called by main module to notify the LL that the B channel connection has
+ * been established.
+ */
+void gigaset_isdn_connB(struct bc_state *bcs)
+{
+	gig_dbg(DEBUG_CMD, "sending BCONN");
+	gigaset_i4l_channel_cmd(bcs, ISDN_STAT_BCONN);
+}
+
+/**
+ * gigaset_isdn_hupB() - signal B channel hangup
+ * @bcs:	B channel descriptor structure.
+ *
+ * Called by main module to notify the LL that the B channel connection has
+ * been shut down.
+ */
+void gigaset_isdn_hupB(struct bc_state *bcs)
+{
+	gig_dbg(DEBUG_CMD, "sending BHUP");
+	gigaset_i4l_channel_cmd(bcs, ISDN_STAT_BHUP);
+}
+
+/**
+ * gigaset_isdn_start() - signal device availability
+ * @cs:		device descriptor structure.
+ *
+ * Called by main module to notify the LL that the device is available for
+ * use.
+ */
+void gigaset_isdn_start(struct cardstate *cs)
+{
+	gig_dbg(DEBUG_CMD, "sending RUN");
+	gigaset_i4l_cmd(cs, ISDN_STAT_RUN);
+}
+
+/**
+ * gigaset_isdn_stop() - signal device unavailability
+ * @cs:		device descriptor structure.
+ *
+ * Called by main module to notify the LL that the device is no longer
+ * available for use.
+ */
+void gigaset_isdn_stop(struct cardstate *cs)
+{
+	gig_dbg(DEBUG_CMD, "sending STOP");
+	gigaset_i4l_cmd(cs, ISDN_STAT_STOP);
+}
+
+/**
+ * gigaset_isdn_register() - register to LL
+ * @cs:		device descriptor structure.
+ * @isdnid:	device name.
+ *
+ * Called by main module to register the device with the LL.
+ *
+ * Return value: 1 for success, 0 for failure
+ */
+int gigaset_isdn_register(struct cardstate *cs, const char *isdnid)
+{
+	isdn_if *iif;
+
+	pr_info("ISDN4Linux interface\n");
+
+	iif = kmalloc(sizeof *iif, GFP_KERNEL);
+	if (!iif) {
+		pr_err("out of memory\n");
+		return 0;
+	}
 
 	if (snprintf(iif->id, sizeof iif->id, "%s_%u", isdnid, cs->minor_index)
 	    >= sizeof iif->id) {
 		pr_err("ID too long: %s\n", isdnid);
+		kfree(iif);
 		return 0;
 	}
 
@@ -593,9 +679,26 @@ int gigaset_register_to_LL(struct cardstate *cs, const char *isdnid)
 
 	if (!register_isdn(iif)) {
 		pr_err("register_isdn failed\n");
+		kfree(iif);
 		return 0;
 	}
 
+	cs->iif = iif;
 	cs->myid = iif->channels;		/* Set my device id */
+	cs->hw_hdr_len = HW_HDR_LEN;
 	return 1;
 }
+
+/**
+ * gigaset_isdn_unregister() - unregister from LL
+ * @cs:		device descriptor structure.
+ *
+ * Called by main module to unregister the device from the LL.
+ */
+void gigaset_isdn_unregister(struct cardstate *cs)
+{
+	gig_dbg(DEBUG_CMD, "sending UNLOAD");
+	gigaset_i4l_cmd(cs, ISDN_STAT_UNLOAD);
+	kfree(cs->iif);
+	cs->iif = NULL;
+}
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index 6a8e1384e7bd..d2260b0055fc 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -162,7 +162,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
 		return -ENODEV;
 
 	if (mutex_lock_interruptible(&cs->mutex))
-		return -ERESTARTSYS; // FIXME -EINTR?
+		return -ERESTARTSYS;
 	tty->driver_data = cs;
 
 	++cs->open_count;
@@ -171,7 +171,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
 		spin_lock_irqsave(&cs->lock, flags);
 		cs->tty = tty;
 		spin_unlock_irqrestore(&cs->lock, flags);
-		tty->low_latency = 1; //FIXME test
+		tty->low_latency = 1;
 	}
 
 	mutex_unlock(&cs->mutex);
@@ -228,7 +228,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
 	gig_dbg(DEBUG_IF, "%u: %s(0x%x)", cs->minor_index, __func__, cmd);
 
 	if (mutex_lock_interruptible(&cs->mutex))
-		return -ERESTARTSYS; // FIXME -EINTR?
+		return -ERESTARTSYS;
 
 	if (!cs->connected) {
 		gig_dbg(DEBUG_IF, "not connected");
@@ -299,9 +299,8 @@ static int if_tiocmget(struct tty_struct *tty, struct file *file)
 	gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
 
 	if (mutex_lock_interruptible(&cs->mutex))
-		return -ERESTARTSYS; // FIXME -EINTR?
+		return -ERESTARTSYS;
 
-	// FIXME read from device?
 	retval = cs->control_state & (TIOCM_RTS|TIOCM_DTR);
 
 	mutex_unlock(&cs->mutex);
@@ -326,7 +325,7 @@ static int if_tiocmset(struct tty_struct *tty, struct file *file,
 		cs->minor_index, __func__, set, clear);
 
 	if (mutex_lock_interruptible(&cs->mutex))
-		return -ERESTARTSYS; // FIXME -EINTR?
+		return -ERESTARTSYS;
 
 	if (!cs->connected) {
 		gig_dbg(DEBUG_IF, "not connected");
@@ -356,7 +355,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
 	gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
 
 	if (mutex_lock_interruptible(&cs->mutex))
-		return -ERESTARTSYS; // FIXME -EINTR?
+		return -ERESTARTSYS;
 
 	if (!cs->connected) {
 		gig_dbg(DEBUG_IF, "not connected");
@@ -390,7 +389,7 @@ static int if_write_room(struct tty_struct *tty)
 	gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
 
 	if (mutex_lock_interruptible(&cs->mutex))
-		return -ERESTARTSYS; // FIXME -EINTR?
+		return -ERESTARTSYS;
 
 	if (!cs->connected) {
 		gig_dbg(DEBUG_IF, "not connected");
@@ -455,9 +454,8 @@ static void if_throttle(struct tty_struct *tty)
 		gig_dbg(DEBUG_IF, "not connected");	/* nothing to do */
 	else if (!cs->open_count)
 		dev_warn(cs->dev, "%s: device not opened\n", __func__);
-	else {
-		//FIXME
-	}
+	else
+		gig_dbg(DEBUG_ANY, "%s: not implemented\n", __func__);
 
 	mutex_unlock(&cs->mutex);
 }
@@ -480,9 +478,8 @@ static void if_unthrottle(struct tty_struct *tty)
 		gig_dbg(DEBUG_IF, "not connected");	/* nothing to do */
 	else if (!cs->open_count)
 		dev_warn(cs->dev, "%s: device not opened\n", __func__);
-	else {
-		//FIXME
-	}
+	else
+		gig_dbg(DEBUG_ANY, "%s: not implemented\n", __func__);
 
 	mutex_unlock(&cs->mutex);
 }
@@ -515,10 +512,9 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
 		goto out;
 	}
 
-	// stolen from mct_u232.c
 	iflag = tty->termios->c_iflag;
 	cflag = tty->termios->c_cflag;
-	old_cflag = old ? old->c_cflag : cflag; //FIXME?
+	old_cflag = old ? old->c_cflag : cflag;
 	gig_dbg(DEBUG_IF, "%u: iflag %x cflag %x old %x",
 		cs->minor_index, iflag, cflag, old_cflag);
 
@@ -588,7 +584,7 @@ void gigaset_if_init(struct cardstate *cs)
 	if (!drv->have_tty)
 		return;
 
-	tasklet_init(&cs->if_wake_tasklet, &if_wake, (unsigned long) cs);
+	tasklet_init(&cs->if_wake_tasklet, if_wake, (unsigned long) cs);
 
 	mutex_lock(&cs->mutex);
 	cs->tty_dev = tty_register_device(drv->tty, cs->minor_index, NULL);
@@ -632,7 +628,8 @@ void gigaset_if_receive(struct cardstate *cs,
 	struct tty_struct *tty;
 
 	spin_lock_irqsave(&cs->lock, flags);
-	if ((tty = cs->tty) == NULL)
+	tty = cs->tty;
+	if (tty == NULL)
 		gig_dbg(DEBUG_ANY, "receive on closed device");
 	else {
 		tty_buffer_request_room(tty, len);
@@ -659,9 +656,9 @@ void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname,
 
 	drv->have_tty = 0;
 
-	if ((drv->tty = alloc_tty_driver(minors)) == NULL)
+	drv->tty = tty = alloc_tty_driver(minors);
+	if (tty == NULL)
 		goto enomem;
-	tty = drv->tty;
 
 	tty->magic =		TTY_DRIVER_MAGIC,
 	tty->major =		GIG_MAJOR,
@@ -676,8 +673,8 @@ void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname,
 
 	tty->owner =		THIS_MODULE;
 
-	tty->init_termios          = tty_std_termios; //FIXME
-	tty->init_termios.c_cflag  = B9600 | CS8 | CREAD | HUPCL | CLOCAL; //FIXME
+	tty->init_termios          = tty_std_termios;
+	tty->init_termios.c_cflag  = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
 	tty_set_operations(tty, &if_ops);
 
 	ret = tty_register_driver(tty);
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
index 9f3ef7b4248c..85394a6ebae8 100644
--- a/drivers/isdn/gigaset/isocdata.c
+++ b/drivers/isdn/gigaset/isocdata.c
@@ -41,7 +41,8 @@ static inline int isowbuf_freebytes(struct isowbuf_t *iwb)
 
 	read = iwb->read;
 	write = iwb->write;
-	if ((freebytes = read - write) > 0) {
+	freebytes = read - write;
+	if (freebytes > 0) {
 		/* no wraparound: need padding space within regular area */
 		return freebytes - BAS_OUTBUFPAD;
 	} else if (read < BAS_OUTBUFPAD) {
@@ -53,29 +54,6 @@ static inline int isowbuf_freebytes(struct isowbuf_t *iwb)
 	}
 }
 
-/* compare two offsets within the buffer
- * The buffer is seen as circular, with the read position as start
- * returns -1/0/1 if position a </=/> position b without crossing 'read'
- */
-static inline int isowbuf_poscmp(struct isowbuf_t *iwb, int a, int b)
-{
-	int read;
-	if (a == b)
-		return 0;
-	read = iwb->read;
-	if (a < b) {
-		if (a < read && read <= b)
-			return +1;
-		else
-			return -1;
-	} else {
-		if (b < read && read <= a)
-			return -1;
-		else
-			return +1;
-	}
-}
-
 /* start writing
  * acquire the write semaphore
  * return true if acquired, false if busy
@@ -271,7 +249,7 @@ static inline void dump_bytes(enum debuglevel level, const char *tag,
  *        bit 14..13 = number of bits added by stuffing
  */
 static const u16 stufftab[5 * 256] = {
-// previous 1s = 0:
+/* previous 1s = 0: */
  0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
  0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x201f,
  0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
@@ -289,7 +267,7 @@ static const u16 stufftab[5 * 256] = {
  0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x0cef,
  0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x2ddf,
 
-// previous 1s = 1:
+/* previous 1s = 1: */
  0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x200f,
  0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x202f,
  0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x204f,
@@ -307,7 +285,7 @@ static const u16 stufftab[5 * 256] = {
  0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dcf,
  0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x31ef,
 
-// previous 1s = 2:
+/* previous 1s = 2: */
  0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x2007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x2017,
  0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x2027, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x2037,
  0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x2047, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x2057,
@@ -325,7 +303,7 @@ static const u16 stufftab[5 * 256] = {
  0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dc7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dd7,
  0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x31e7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x41f7,
 
-// previous 1s = 3:
+/* previous 1s = 3: */
  0x0000, 0x0001, 0x0002, 0x2003, 0x0004, 0x0005, 0x0006, 0x200b, 0x0008, 0x0009, 0x000a, 0x2013, 0x000c, 0x000d, 0x000e, 0x201b,
  0x0010, 0x0011, 0x0012, 0x2023, 0x0014, 0x0015, 0x0016, 0x202b, 0x0018, 0x0019, 0x001a, 0x2033, 0x001c, 0x001d, 0x001e, 0x203b,
  0x0020, 0x0021, 0x0022, 0x2043, 0x0024, 0x0025, 0x0026, 0x204b, 0x0028, 0x0029, 0x002a, 0x2053, 0x002c, 0x002d, 0x002e, 0x205b,
@@ -343,7 +321,7 @@ static const u16 stufftab[5 * 256] = {
  0x0ce0, 0x0ce1, 0x0ce2, 0x2dc3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dcb, 0x0ce8, 0x0ce9, 0x0cea, 0x2dd3, 0x0cec, 0x0ced, 0x0cee, 0x2ddb,
  0x10f0, 0x10f1, 0x10f2, 0x31e3, 0x10f4, 0x10f5, 0x10f6, 0x31eb, 0x20f8, 0x20f9, 0x20fa, 0x41f3, 0x257c, 0x257d, 0x29be, 0x46fb,
 
-// previous 1s = 4:
+/* previous 1s = 4: */
  0x0000, 0x2001, 0x0002, 0x2005, 0x0004, 0x2009, 0x0006, 0x200d, 0x0008, 0x2011, 0x000a, 0x2015, 0x000c, 0x2019, 0x000e, 0x201d,
  0x0010, 0x2021, 0x0012, 0x2025, 0x0014, 0x2029, 0x0016, 0x202d, 0x0018, 0x2031, 0x001a, 0x2035, 0x001c, 0x2039, 0x001e, 0x203d,
  0x0020, 0x2041, 0x0022, 0x2045, 0x0024, 0x2049, 0x0026, 0x204d, 0x0028, 0x2051, 0x002a, 0x2055, 0x002c, 0x2059, 0x002e, 0x205d,
@@ -367,7 +345,8 @@ static const u16 stufftab[5 * 256] = {
  * parameters:
  *	cin	input byte
  *	ones	number of trailing '1' bits in result before this step
- *	iwb	pointer to output buffer structure (write semaphore must be held)
+ *	iwb	pointer to output buffer structure
+ *		(write semaphore must be held)
  * return value:
  *	number of trailing '1' bits in result after this step
  */
@@ -408,7 +387,8 @@ static inline int hdlc_bitstuff_byte(struct isowbuf_t *iwb, unsigned char cin,
  * parameters:
  *	in	input buffer
  *	count	number of bytes in input buffer
- *	iwb	pointer to output buffer structure (write semaphore must be held)
+ *	iwb	pointer to output buffer structure
+ *		(write semaphore must be held)
  * return value:
  *	position of end of packet in output buffer on success,
  *	-EAGAIN if write semaphore busy or buffer full
@@ -440,7 +420,8 @@ static inline int hdlc_buildframe(struct isowbuf_t *iwb,
 		fcs = crc_ccitt_byte(fcs, c);
 	}
 
-	/* bitstuff and append FCS (complemented, least significant byte first) */
+	/* bitstuff and append FCS
+	 * (complemented, least significant byte first) */
 	fcs ^= 0xffff;
 	ones = hdlc_bitstuff_byte(iwb, fcs & 0x00ff, ones);
 	ones = hdlc_bitstuff_byte(iwb, (fcs >> 8) & 0x00ff, ones);
@@ -459,7 +440,8 @@ static inline int hdlc_buildframe(struct isowbuf_t *iwb,
  * parameters:
  *	in	input buffer
  *	count	number of bytes in input buffer
- *	iwb	pointer to output buffer structure (write semaphore must be held)
+ *	iwb	pointer to output buffer structure
+ *		(write semaphore must be held)
  * return value:
  *	position of end of packet in output buffer on success,
  *	-EAGAIN if write semaphore busy or buffer full
@@ -500,7 +482,7 @@ int gigaset_isoc_buildframe(struct bc_state *bcs, unsigned char *in, int len)
 	int result;
 
 	switch (bcs->proto2) {
-	case ISDN_PROTO_L2_HDLC:
+	case L2_HDLC:
 		result = hdlc_buildframe(bcs->hw.bas->isooutbuf, in, len);
 		gig_dbg(DEBUG_ISO, "%s: %d bytes HDLC -> %d",
 			__func__, len, result);
@@ -542,8 +524,9 @@ static inline void hdlc_flush(struct bc_state *bcs)
 	if (likely(bcs->skb != NULL))
 		skb_trim(bcs->skb, 0);
 	else if (!bcs->ignore) {
-		if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL)
-			skb_reserve(bcs->skb, HW_HDR_LEN);
+		bcs->skb = dev_alloc_skb(SBUFSIZE + bcs->cs->hw_hdr_len);
+		if (bcs->skb)
+			skb_reserve(bcs->skb, bcs->cs->hw_hdr_len);
 		else
 			dev_err(bcs->cs->dev, "could not allocate skb\n");
 	}
@@ -557,43 +540,46 @@ static inline void hdlc_flush(struct bc_state *bcs)
  */
 static inline void hdlc_done(struct bc_state *bcs)
 {
+	struct cardstate *cs = bcs->cs;
 	struct sk_buff *procskb;
+	unsigned int len;
 
 	if (unlikely(bcs->ignore)) {
 		bcs->ignore--;
 		hdlc_flush(bcs);
 		return;
 	}
-
-	if ((procskb = bcs->skb) == NULL) {
+	procskb = bcs->skb;
+	if (procskb == NULL) {
 		/* previous error */
 		gig_dbg(DEBUG_ISO, "%s: skb=NULL", __func__);
-		gigaset_rcv_error(NULL, bcs->cs, bcs);
+		gigaset_isdn_rcv_err(bcs);
 	} else if (procskb->len < 2) {
-		dev_notice(bcs->cs->dev, "received short frame (%d octets)\n",
+		dev_notice(cs->dev, "received short frame (%d octets)\n",
 			   procskb->len);
 		bcs->hw.bas->runts++;
-		gigaset_rcv_error(procskb, bcs->cs, bcs);
+		dev_kfree_skb_any(procskb);
+		gigaset_isdn_rcv_err(bcs);
 	} else if (bcs->fcs != PPP_GOODFCS) {
-		dev_notice(bcs->cs->dev, "frame check error (0x%04x)\n",
-			   bcs->fcs);
+		dev_notice(cs->dev, "frame check error (0x%04x)\n", bcs->fcs);
 		bcs->hw.bas->fcserrs++;
-		gigaset_rcv_error(procskb, bcs->cs, bcs);
+		dev_kfree_skb_any(procskb);
+		gigaset_isdn_rcv_err(bcs);
 	} else {
-		procskb->len -= 2;		/* subtract FCS */
-		procskb->tail -= 2;
-		gig_dbg(DEBUG_ISO, "%s: good frame (%d octets)",
-			__func__, procskb->len);
+		len = procskb->len;
+		__skb_trim(procskb, len -= 2);	/* subtract FCS */
+		gig_dbg(DEBUG_ISO, "%s: good frame (%d octets)", __func__, len);
 		dump_bytes(DEBUG_STREAM_DUMP,
-			   "rcv data", procskb->data, procskb->len);
-		bcs->hw.bas->goodbytes += procskb->len;
-		gigaset_rcv_skb(procskb, bcs->cs, bcs);
+			   "rcv data", procskb->data, len);
+		bcs->hw.bas->goodbytes += len;
+		gigaset_skb_rcvd(bcs, procskb);
 	}
 
-	if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL)
-		skb_reserve(bcs->skb, HW_HDR_LEN);
+	bcs->skb = dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len);
+	if (bcs->skb)
+		skb_reserve(bcs->skb, cs->hw_hdr_len);
 	else
-		dev_err(bcs->cs->dev, "could not allocate skb\n");
+		dev_err(cs->dev, "could not allocate skb\n");
 	bcs->fcs = PPP_INITFCS;
 }
 
@@ -610,12 +596,8 @@ static inline void hdlc_frag(struct bc_state *bcs, unsigned inbits)
 
 	dev_notice(bcs->cs->dev, "received partial byte (%d bits)\n", inbits);
 	bcs->hw.bas->alignerrs++;
-	gigaset_rcv_error(bcs->skb, bcs->cs, bcs);
-
-	if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL)
-		skb_reserve(bcs->skb, HW_HDR_LEN);
-	else
-		dev_err(bcs->cs->dev, "could not allocate skb\n");
+	gigaset_isdn_rcv_err(bcs);
+	__skb_trim(bcs->skb, 0);
 	bcs->fcs = PPP_INITFCS;
 }
 
@@ -646,10 +628,10 @@ static const unsigned char bitcounts[256] = {
 };
 
 /* hdlc_unpack
- * perform HDLC frame processing (bit unstuffing, flag detection, FCS calculation)
- * on a sequence of received data bytes (8 bits each, LSB first)
- * pass on successfully received, complete frames as SKBs via gigaset_rcv_skb
- * notify of errors via gigaset_rcv_error
+ * perform HDLC frame processing (bit unstuffing, flag detection, FCS
+ * calculation) on a sequence of received data bytes (8 bits each, LSB first)
+ * pass on successfully received, complete frames as SKBs via gigaset_skb_rcvd
+ * notify of errors via gigaset_isdn_rcv_err
  * tally frames, errors etc. in BC structure counters
  * parameters:
  *	src	received data
@@ -665,9 +647,12 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
 
 	/* load previous state:
 	 * inputstate = set of flag bits:
-	 * - INS_flag_hunt: no complete opening flag received since connection setup or last abort
-	 * - INS_have_data: at least one complete data byte received since last flag
-	 * seqlen = number of consecutive '1' bits in last 7 input stream bits (0..7)
+	 * - INS_flag_hunt: no complete opening flag received since connection
+	 *                  setup or last abort
+	 * - INS_have_data: at least one complete data byte received since last
+	 *                  flag
+	 * seqlen = number of consecutive '1' bits in last 7 input stream bits
+	 *          (0..7)
 	 * inbyte = accumulated partial data byte (if !INS_flag_hunt)
 	 * inbits = number of valid bits in inbyte, starting at LSB (0..6)
 	 */
@@ -701,9 +686,11 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
 				inbyte = c >> (lead1 + 1);
 				inbits = 7 - lead1;
 				if (trail1 >= 8) {
-					/* interior stuffing: omitting the MSB handles most cases */
+					/* interior stuffing:
+					 * omitting the MSB handles most cases,
+					 * correct the incorrectly handled
+					 * cases individually */
 					inbits--;
-					/* correct the incorrectly handled cases individually */
 					switch (c) {
 					case 0xbe:
 						inbyte = 0x3f;
@@ -729,13 +716,14 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
 			hdlc_flush(bcs);
 			inputstate |= INS_flag_hunt;
 		} else if (seqlen == 6) {
-			/* closing flag, including (6 - lead1) '1's and one '0' from inbits */
+			/* closing flag, including (6 - lead1) '1's
+			 * and one '0' from inbits */
 			if (inbits > 7 - lead1) {
 				hdlc_frag(bcs, inbits + lead1 - 7);
 				inputstate &= ~INS_have_data;
 			} else {
 				if (inbits < 7 - lead1)
-					ubc->stolen0s ++;
+					ubc->stolen0s++;
 				if (inputstate & INS_have_data) {
 					hdlc_done(bcs);
 					inputstate &= ~INS_have_data;
@@ -744,7 +732,7 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
 
 			if (c == PPP_FLAG) {
 				/* complete flag, LSB overlaps preceding flag */
-				ubc->shared0s ++;
+				ubc->shared0s++;
 				inbits = 0;
 				inbyte = 0;
 			} else if (trail1 != 7) {
@@ -752,9 +740,11 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
 				inbyte = c >> (lead1 + 1);
 				inbits = 7 - lead1;
 				if (trail1 >= 8) {
-					/* interior stuffing: omitting the MSB handles most cases */
+					/* interior stuffing:
+					 * omitting the MSB handles most cases,
+					 * correct the incorrectly handled
+					 * cases individually */
 					inbits--;
-					/* correct the incorrectly handled cases individually */
 					switch (c) {
 					case 0xbe:
 						inbyte = 0x3f;
@@ -762,7 +752,8 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
 					}
 				}
 			} else {
-				/* abort sequence follows, skb already empty anyway */
+				/* abort sequence follows,
+				 * skb already empty anyway */
 				ubc->aborts++;
 				inputstate |= INS_flag_hunt;
 			}
@@ -787,14 +778,17 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
 			} else {
 				/* stuffed data */
 				if (trail1 < 7) { /* => seqlen == 5 */
-					/* stuff bit at position lead1, no interior stuffing */
+					/* stuff bit at position lead1,
+					 * no interior stuffing */
 					unsigned char mask = (1 << lead1) - 1;
 					c = (c & mask) | ((c & ~mask) >> 1);
 					inbyte |= c << inbits;
 					inbits += 7;
 				} else if (seqlen < 5) { /* trail1 >= 8 */
-					/* interior stuffing: omitting the MSB handles most cases */
-					/* correct the incorrectly handled cases individually */
+					/* interior stuffing:
+					 * omitting the MSB handles most cases,
+					 * correct the incorrectly handled
+					 * cases individually */
 					switch (c) {
 					case 0xbe:
 						c = 0x7e;
@@ -804,8 +798,9 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
 					inbits += 7;
 				} else { /* seqlen == 5 && trail1 >= 8 */
 
-					/* stuff bit at lead1 *and* interior stuffing */
-					switch (c) {	/* unstuff individually */
+					/* stuff bit at lead1 *and* interior
+					 * stuffing -- unstuff individually */
+					switch (c) {
 					case 0x7d:
 						c = 0x3f;
 						break;
@@ -841,7 +836,7 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
 }
 
 /* trans_receive
- * pass on received USB frame transparently as SKB via gigaset_rcv_skb
+ * pass on received USB frame transparently as SKB via gigaset_skb_rcvd
  * invert bytes
  * tally frames, errors etc. in BC structure counters
  * parameters:
@@ -852,6 +847,7 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
 static inline void trans_receive(unsigned char *src, unsigned count,
 				 struct bc_state *bcs)
 {
+	struct cardstate *cs = bcs->cs;
 	struct sk_buff *skb;
 	int dobytes;
 	unsigned char *dst;
@@ -861,13 +857,14 @@ static inline void trans_receive(unsigned char *src, unsigned count,
 		hdlc_flush(bcs);
 		return;
 	}
-	if (unlikely((skb = bcs->skb) == NULL)) {
-		bcs->skb = skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN);
+	skb = bcs->skb;
+	if (unlikely(skb == NULL)) {
+		bcs->skb = skb = dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len);
 		if (!skb) {
-			dev_err(bcs->cs->dev, "could not allocate skb\n");
+			dev_err(cs->dev, "could not allocate skb\n");
 			return;
 		}
-		skb_reserve(skb, HW_HDR_LEN);
+		skb_reserve(skb, cs->hw_hdr_len);
 	}
 	bcs->hw.bas->goodbytes += skb->len;
 	dobytes = TRANSBUFSIZE - skb->len;
@@ -881,23 +878,24 @@ static inline void trans_receive(unsigned char *src, unsigned count,
 		if (dobytes == 0) {
 			dump_bytes(DEBUG_STREAM_DUMP,
 				   "rcv data", skb->data, skb->len);
-			gigaset_rcv_skb(skb, bcs->cs, bcs);
-			bcs->skb = skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN);
+			gigaset_skb_rcvd(bcs, skb);
+			bcs->skb = skb =
+				dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len);
 			if (!skb) {
-				dev_err(bcs->cs->dev,
-					"could not allocate skb\n");
+				dev_err(cs->dev, "could not allocate skb\n");
 				return;
 			}
-			skb_reserve(bcs->skb, HW_HDR_LEN);
+			skb_reserve(skb, cs->hw_hdr_len);
 			dobytes = TRANSBUFSIZE;
 		}
 	}
 }
 
-void gigaset_isoc_receive(unsigned char *src, unsigned count, struct bc_state *bcs)
+void gigaset_isoc_receive(unsigned char *src, unsigned count,
+			  struct bc_state *bcs)
 {
 	switch (bcs->proto2) {
-	case ISDN_PROTO_L2_HDLC:
+	case L2_HDLC:
 		hdlc_unpack(src, count, bcs);
 		break;
 	default:		/* assume transparent */
@@ -981,8 +979,10 @@ void gigaset_isoc_input(struct inbuf_t *inbuf)
  * @bcs:	B channel descriptor structure.
  * @skb:	data to send.
  *
- * Called by i4l.c to queue an skb for sending, and start transmission if
+ * Called by LL to queue an skb for sending, and start transmission if
  * necessary.
+ * Once the payload data has been transmitted completely, gigaset_skb_sent()
+ * will be called with the skb's link layer header preserved.
  *
  * Return value:
  *	number of bytes accepted for sending (skb->len) if ok,
diff --git a/drivers/isdn/gigaset/proc.c b/drivers/isdn/gigaset/proc.c
index 9715aad9c3f0..758a00c1d2e2 100644
--- a/drivers/isdn/gigaset/proc.c
+++ b/drivers/isdn/gigaset/proc.c
@@ -39,7 +39,7 @@ static ssize_t set_cidmode(struct device *dev, struct device_attribute *attr,
 			return -EINVAL;
 
 	if (mutex_lock_interruptible(&cs->mutex))
-		return -ERESTARTSYS; // FIXME -EINTR?
+		return -ERESTARTSYS;
 
 	cs->waiting = 1;
 	if (!gigaset_add_event(cs, &cs->at_state, EV_PROC_CIDMODE,
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 3071a52467ed..168d585d64d8 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -164,9 +164,15 @@ static void gigaset_modem_fill(unsigned long data)
 {
 	struct cardstate *cs = (struct cardstate *) data;
 	struct bc_state *bcs;
+	struct sk_buff *nextskb;
 	int sent = 0;
 
-	if (!cs || !(bcs = cs->bcs)) {
+	if (!cs) {
+		gig_dbg(DEBUG_OUTPUT, "%s: no cardstate", __func__);
+		return;
+	}
+	bcs = cs->bcs;
+	if (!bcs) {
 		gig_dbg(DEBUG_OUTPUT, "%s: no cardstate", __func__);
 		return;
 	}
@@ -179,9 +185,11 @@ static void gigaset_modem_fill(unsigned long data)
 			return;
 
 		/* no command to send; get skb */
-		if (!(bcs->tx_skb = skb_dequeue(&bcs->squeue)))
+		nextskb = skb_dequeue(&bcs->squeue);
+		if (!nextskb)
 			/* no skb either, nothing to do */
 			return;
+		bcs->tx_skb = nextskb;
 
 		gig_dbg(DEBUG_INTR, "Dequeued skb (Adr: %lx)",
 			(unsigned long) bcs->tx_skb);
@@ -236,19 +244,20 @@ static void flush_send_queue(struct cardstate *cs)
  *	number of bytes queued, or error code < 0
  */
 static int gigaset_write_cmd(struct cardstate *cs, const unsigned char *buf,
-                             int len, struct tasklet_struct *wake_tasklet)
+			     int len, struct tasklet_struct *wake_tasklet)
 {
 	struct cmdbuf_t *cb;
 	unsigned long flags;
 
 	gigaset_dbg_buffer(cs->mstate != MS_LOCKED ?
-	                     DEBUG_TRANSCMD : DEBUG_LOCKCMD,
-	                   "CMD Transmit", len, buf);
+				DEBUG_TRANSCMD : DEBUG_LOCKCMD,
+			   "CMD Transmit", len, buf);
 
 	if (len <= 0)
 		return 0;
 
-	if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) {
+	cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC);
+	if (!cb) {
 		dev_err(cs->dev, "%s: out of memory!\n", __func__);
 		return -ENOMEM;
 	}
@@ -392,7 +401,6 @@ static void gigaset_device_release(struct device *dev)
 	struct platform_device *pdev = to_platform_device(dev);
 
 	/* adapted from platform_device_release() in drivers/base/platform.c */
-	//FIXME is this actually necessary?
 	kfree(dev->platform_data);
 	kfree(pdev->resource);
 }
@@ -404,16 +412,20 @@ static void gigaset_device_release(struct device *dev)
 static int gigaset_initcshw(struct cardstate *cs)
 {
 	int rc;
+	struct ser_cardstate *scs;
 
-	if (!(cs->hw.ser = kzalloc(sizeof(struct ser_cardstate), GFP_KERNEL))) {
+	scs = kzalloc(sizeof(struct ser_cardstate), GFP_KERNEL);
+	if (!scs) {
 		pr_err("out of memory\n");
 		return 0;
 	}
+	cs->hw.ser = scs;
 
 	cs->hw.ser->dev.name = GIGASET_MODULENAME;
 	cs->hw.ser->dev.id = cs->minor_index;
 	cs->hw.ser->dev.dev.release = gigaset_device_release;
-	if ((rc = platform_device_register(&cs->hw.ser->dev)) != 0) {
+	rc = platform_device_register(&cs->hw.ser->dev);
+	if (rc != 0) {
 		pr_err("error %d registering platform device\n", rc);
 		kfree(cs->hw.ser);
 		cs->hw.ser = NULL;
@@ -422,7 +434,7 @@ static int gigaset_initcshw(struct cardstate *cs)
 	dev_set_drvdata(&cs->hw.ser->dev.dev, cs);
 
 	tasklet_init(&cs->write_tasklet,
-	             &gigaset_modem_fill, (unsigned long) cs);
+		     gigaset_modem_fill, (unsigned long) cs);
 	return 1;
 }
 
@@ -434,7 +446,8 @@ static int gigaset_initcshw(struct cardstate *cs)
  * Called by "gigaset_start" and "gigaset_enterconfigmode" in common.c
  * and by "if_lock" and "if_termios" in interface.c
  */
-static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state, unsigned new_state)
+static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
+				  unsigned new_state)
 {
 	struct tty_struct *tty = cs->hw.ser->tty;
 	unsigned int set, clear;
@@ -520,8 +533,8 @@ gigaset_tty_open(struct tty_struct *tty)
 	}
 
 	/* allocate memory for our device state and intialize it */
-	if (!(cs = gigaset_initcs(driver, 1, 1, 0, cidmode,
-				  GIGASET_MODULENAME)))
+	cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
+	if (!cs)
 		goto error;
 
 	cs->dev = &cs->hw.ser->dev.dev;
@@ -690,7 +703,8 @@ gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
 
 	if (!cs)
 		return;
-	if (!(inbuf = cs->inbuf)) {
+	inbuf = cs->inbuf;
+	if (!inbuf) {
 		dev_err(cs->dev, "%s: no inbuf\n", __func__);
 		cs_put(cs);
 		return;
@@ -770,18 +784,21 @@ static int __init ser_gigaset_init(void)
 	int rc;
 
 	gig_dbg(DEBUG_INIT, "%s", __func__);
-	if ((rc = platform_driver_register(&device_driver)) != 0) {
+	rc = platform_driver_register(&device_driver);
+	if (rc != 0) {
 		pr_err("error %d registering platform driver\n", rc);
 		return rc;
 	}
 
 	/* allocate memory for our driver state and intialize it */
-	if (!(driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
+	driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
 					  GIGASET_MODULENAME, GIGASET_DEVNAME,
-					  &ops, THIS_MODULE)))
+					  &ops, THIS_MODULE);
+	if (!driver)
 		goto error;
 
-	if ((rc = tty_register_ldisc(N_GIGASET_M101, &gigaset_ldisc)) != 0) {
+	rc = tty_register_ldisc(N_GIGASET_M101, &gigaset_ldisc);
+	if (rc != 0) {
 		pr_err("error %d registering line discipline\n", rc);
 		goto error;
 	}
@@ -808,7 +825,8 @@ static void __exit ser_gigaset_exit(void)
 		driver = NULL;
 	}
 
-	if ((rc = tty_unregister_ldisc(N_GIGASET_M101)) != 0)
+	rc = tty_unregister_ldisc(N_GIGASET_M101);
+	if (rc != 0)
 		pr_err("error %d unregistering line discipline\n", rc);
 
 	platform_driver_unregister(&device_driver);
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index 4deb1ab0dbf8..3ab1daeb276b 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -43,14 +43,14 @@ MODULE_PARM_DESC(cidmode, "Call-ID mode");
 #define GIGASET_MODULENAME "usb_gigaset"
 #define GIGASET_DEVNAME    "ttyGU"
 
-#define IF_WRITEBUF 2000 //FIXME  // WAKEUP_CHARS: 256
+#define IF_WRITEBUF 2000	/* arbitrary limit */
 
 /* Values for the Gigaset M105 Data */
 #define USB_M105_VENDOR_ID	0x0681
 #define USB_M105_PRODUCT_ID	0x0009
 
 /* table of devices that work with this driver */
-static const struct usb_device_id gigaset_table [] = {
+static const struct usb_device_id gigaset_table[] = {
 	{ USB_DEVICE(USB_M105_VENDOR_ID, USB_M105_PRODUCT_ID) },
 	{ }					/* Terminating entry */
 };
@@ -97,8 +97,8 @@ MODULE_DEVICE_TABLE(usb, gigaset_table);
  *       41 19 -- -- -- -- 06 00 00 00 00 xx 11 13
  *            Used after every "configuration sequence" (RQ 12, RQs 01/03/13).
  *            xx is usually 0x00 but was 0x7e before starting data transfer
- *            in unimodem mode. So, this might be an array of characters that need
- *            special treatment ("commit all bufferd data"?), 11=^Q, 13=^S.
+ *            in unimodem mode. So, this might be an array of characters that
+ *            need special treatment ("commit all bufferd data"?), 11=^Q, 13=^S.
  *
  * Unimodem mode: use "modprobe ppp_async flag_time=0" as the device _needs_ two
  * flags per packet.
@@ -114,7 +114,7 @@ static int gigaset_suspend(struct usb_interface *intf, pm_message_t message);
 static int gigaset_resume(struct usb_interface *intf);
 static int gigaset_pre_reset(struct usb_interface *intf);
 
-static struct gigaset_driver *driver = NULL;
+static struct gigaset_driver *driver;
 
 /* usb specific object needed to register this driver with the usb subsystem */
 static struct usb_driver gigaset_usb_driver = {
@@ -141,6 +141,7 @@ struct usb_cardstate {
 	struct urb		*bulk_out_urb;
 
 	/* Input buffer */
+	unsigned char		*rcvbuf;
 	int			rcvbuf_size;
 	struct urb		*read_urb;
 	__u8			int_in_endpointAddr;
@@ -164,13 +165,11 @@ static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
 	val = tiocm_to_gigaset(new_state);
 
 	gig_dbg(DEBUG_USBREQ, "set flags 0x%02x with mask 0x%02x", val, mask);
-	// don't use this in an interrupt/BH
 	r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 7, 0x41,
 			    (val & 0xff) | ((mask & 0xff) << 8), 0,
 			    NULL, 0, 2000 /* timeout? */);
 	if (r < 0)
 		return r;
-	//..
 	return 0;
 }
 
@@ -220,7 +219,6 @@ static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
 	cflag &= CBAUD;
 
 	switch (cflag) {
-	//FIXME more values?
 	case    B300: rate =     300; break;
 	case    B600: rate =     600; break;
 	case   B1200: rate =    1200; break;
@@ -273,7 +271,7 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
 	/* set the number of stop bits */
 	if (cflag & CSTOPB) {
 		if ((cflag & CSIZE) == CS5)
-			val |= 1; /* 1.5 stop bits */ //FIXME is this okay?
+			val |= 1; /* 1.5 stop bits */
 		else
 			val |= 2; /* 2 stop bits */
 	}
@@ -282,7 +280,7 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
 }
 
 
- /*================================================================================================================*/
+/*============================================================================*/
 static int gigaset_init_bchannel(struct bc_state *bcs)
 {
 	/* nothing to do for M10x */
@@ -344,7 +342,6 @@ static void gigaset_modem_fill(unsigned long data)
 			if (write_modem(cs) < 0) {
 				gig_dbg(DEBUG_OUTPUT,
 					"modem_fill: write_modem failed");
-				// FIXME should we tell the LL?
 				again = 1; /* no callback will be called! */
 			}
 		}
@@ -356,8 +353,8 @@ static void gigaset_modem_fill(unsigned long data)
  */
 static void gigaset_read_int_callback(struct urb *urb)
 {
-	struct inbuf_t *inbuf = urb->context;
-	struct cardstate *cs = inbuf->cs;
+	struct cardstate *cs = urb->context;
+	struct inbuf_t *inbuf = cs->inbuf;
 	int status = urb->status;
 	int r;
 	unsigned numbytes;
@@ -368,7 +365,7 @@ static void gigaset_read_int_callback(struct urb *urb)
 		numbytes = urb->actual_length;
 
 		if (numbytes) {
-			src = inbuf->rcvbuf;
+			src = cs->hw.usb->rcvbuf;
 			if (unlikely(*src))
 				dev_warn(cs->dev,
 				    "%s: There was no leading 0, but 0x%02x!\n",
@@ -440,7 +437,7 @@ static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb)
 	struct cmdbuf_t *tcb;
 	unsigned long flags;
 	int count;
-	int status = -ENOENT; // FIXME
+	int status = -ENOENT;
 	struct usb_cardstate *ucs = cs->hw.usb;
 
 	do {
@@ -480,7 +477,9 @@ static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb)
 			ucs->busy = 1;
 
 			spin_lock_irqsave(&cs->lock, flags);
-			status = cs->connected ? usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC) : -ENODEV;
+			status = cs->connected ?
+				usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC) :
+				-ENODEV;
 			spin_unlock_irqrestore(&cs->lock, flags);
 
 			if (status) {
@@ -510,8 +509,8 @@ static int gigaset_write_cmd(struct cardstate *cs, const unsigned char *buf,
 
 	if (len <= 0)
 		return 0;
-
-	if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) {
+	cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC);
+	if (!cb) {
 		dev_err(cs->dev, "%s: out of memory\n", __func__);
 		return -ENOMEM;
 	}
@@ -615,7 +614,7 @@ static int gigaset_initcshw(struct cardstate *cs)
 	ucs->bulk_out_urb = NULL;
 	ucs->read_urb = NULL;
 	tasklet_init(&cs->write_tasklet,
-		     &gigaset_modem_fill, (unsigned long) cs);
+		     gigaset_modem_fill, (unsigned long) cs);
 
 	return 1;
 }
@@ -637,9 +636,7 @@ static int write_modem(struct cardstate *cs)
 		return -EINVAL;
 	}
 
-	/* Copy data to bulk out buffer and  // FIXME copying not necessary
-	 * transmit data
-	 */
+	/* Copy data to bulk out buffer and transmit data */
 	count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size);
 	skb_copy_from_linear_data(bcs->tx_skb, ucs->bulk_out_buffer, count);
 	skb_pull(bcs->tx_skb, count);
@@ -650,7 +647,8 @@ static int write_modem(struct cardstate *cs)
 	if (cs->connected) {
 		usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev,
 				  usb_sndbulkpipe(ucs->udev,
-						  ucs->bulk_out_endpointAddr & 0x0f),
+						  ucs->bulk_out_endpointAddr &
+						  0x0f),
 				  ucs->bulk_out_buffer, count,
 				  gigaset_write_bulk_callback, cs);
 		ret = usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC);
@@ -666,7 +664,7 @@ static int write_modem(struct cardstate *cs)
 
 	if (!bcs->tx_skb->len) {
 		/* skb sent completely */
-		gigaset_skb_sent(bcs, bcs->tx_skb); //FIXME also, when ret<0?
+		gigaset_skb_sent(bcs, bcs->tx_skb);
 
 		gig_dbg(DEBUG_INTR, "kfree skb (Adr: %lx)!",
 			(unsigned long) bcs->tx_skb);
@@ -763,8 +761,8 @@ static int gigaset_probe(struct usb_interface *interface,
 	buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
 	ucs->rcvbuf_size = buffer_size;
 	ucs->int_in_endpointAddr = endpoint->bEndpointAddress;
-	cs->inbuf[0].rcvbuf = kmalloc(buffer_size, GFP_KERNEL);
-	if (!cs->inbuf[0].rcvbuf) {
+	ucs->rcvbuf = kmalloc(buffer_size, GFP_KERNEL);
+	if (!ucs->rcvbuf) {
 		dev_err(cs->dev, "Couldn't allocate rcvbuf\n");
 		retval = -ENOMEM;
 		goto error;
@@ -773,9 +771,9 @@ static int gigaset_probe(struct usb_interface *interface,
 	usb_fill_int_urb(ucs->read_urb, udev,
 			 usb_rcvintpipe(udev,
 					endpoint->bEndpointAddress & 0x0f),
-			 cs->inbuf[0].rcvbuf, buffer_size,
+			 ucs->rcvbuf, buffer_size,
 			 gigaset_read_int_callback,
-			 cs->inbuf + 0, endpoint->bInterval);
+			 cs, endpoint->bInterval);
 
 	retval = usb_submit_urb(ucs->read_urb, GFP_KERNEL);
 	if (retval) {
@@ -789,7 +787,7 @@ static int gigaset_probe(struct usb_interface *interface,
 
 	if (!gigaset_start(cs)) {
 		tasklet_kill(&cs->write_tasklet);
-		retval = -ENODEV; //FIXME
+		retval = -ENODEV;
 		goto error;
 	}
 	return 0;
@@ -798,11 +796,11 @@ error:
 	usb_kill_urb(ucs->read_urb);
 	kfree(ucs->bulk_out_buffer);
 	usb_free_urb(ucs->bulk_out_urb);
-	kfree(cs->inbuf[0].rcvbuf);
+	kfree(ucs->rcvbuf);
 	usb_free_urb(ucs->read_urb);
 	usb_set_intfdata(interface, NULL);
 	ucs->read_urb = ucs->bulk_out_urb = NULL;
-	cs->inbuf[0].rcvbuf = ucs->bulk_out_buffer = NULL;
+	ucs->rcvbuf = ucs->bulk_out_buffer = NULL;
 	usb_put_dev(ucs->udev);
 	ucs->udev = NULL;
 	ucs->interface = NULL;
@@ -831,10 +829,10 @@ static void gigaset_disconnect(struct usb_interface *interface)
 
 	kfree(ucs->bulk_out_buffer);
 	usb_free_urb(ucs->bulk_out_urb);
-	kfree(cs->inbuf[0].rcvbuf);
+	kfree(ucs->rcvbuf);
 	usb_free_urb(ucs->read_urb);
 	ucs->read_urb = ucs->bulk_out_urb = NULL;
-	cs->inbuf[0].rcvbuf = ucs->bulk_out_buffer = NULL;
+	ucs->rcvbuf = ucs->bulk_out_buffer = NULL;
 
 	usb_put_dev(ucs->udev);
 	ucs->interface = NULL;
@@ -916,9 +914,10 @@ static int __init usb_gigaset_init(void)
 	int result;
 
 	/* allocate memory for our driver state and intialize it */
-	if ((driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
-				       GIGASET_MODULENAME, GIGASET_DEVNAME,
-				       &ops, THIS_MODULE)) == NULL)
+	driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
+				    GIGASET_MODULENAME, GIGASET_DEVNAME,
+				    &ops, THIS_MODULE);
+	if (driver == NULL)
 		goto error;
 
 	/* register this driver with the USB subsystem */
diff --git a/drivers/isdn/hardware/mISDN/speedfax.c b/drivers/isdn/hardware/mISDN/speedfax.c
index ff3a4e290da3..7726afdbb40b 100644
--- a/drivers/isdn/hardware/mISDN/speedfax.c
+++ b/drivers/isdn/hardware/mISDN/speedfax.c
@@ -110,6 +110,7 @@ set_debug(const char *val, struct kernel_param *kp)
 MODULE_AUTHOR("Karsten Keil");
 MODULE_LICENSE("GPL v2");
 MODULE_VERSION(SPEEDFAX_REV);
+MODULE_FIRMWARE("isdn/ISAR.BIN");
 module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(debug, "Speedfax debug mask");
 module_param(irqloops, uint, S_IRUGO | S_IWUSR);
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index feb0fa45b664..fcfe17a19a61 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -779,7 +779,7 @@ base_sock_create(struct net *net, struct socket *sock, int protocol)
 }
 
 static int
-mISDN_sock_create(struct net *net, struct socket *sock, int proto)
+mISDN_sock_create(struct net *net, struct socket *sock, int proto, int kern)
 {
 	int err = -EPROTONOSUPPORT;
 
@@ -808,8 +808,7 @@ mISDN_sock_create(struct net *net, struct socket *sock, int proto)
 	return err;
 }
 
-static struct
-net_proto_family mISDN_sock_family_ops = {
+static const struct net_proto_family mISDN_sock_family_ops = {
 	.owner  = THIS_MODULE,
 	.family = PF_ISDN,
 	.create = mISDN_sock_create,
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index df1f86b5c83e..a2ea383105a6 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -249,5 +249,6 @@ config EP93XX_PWM
 source "drivers/misc/c2port/Kconfig"
 source "drivers/misc/eeprom/Kconfig"
 source "drivers/misc/cb710/Kconfig"
+source "drivers/misc/iwmc3200top/Kconfig"
 
 endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index f982d2ecfde7..e311267a355f 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -21,5 +21,6 @@ obj-$(CONFIG_HP_ILO)		+= hpilo.o
 obj-$(CONFIG_ISL29003)		+= isl29003.o
 obj-$(CONFIG_EP93XX_PWM)	+= ep93xx_pwm.o
 obj-$(CONFIG_C2PORT)		+= c2port/
+obj-$(CONFIG_IWMC3200TOP)      += iwmc3200top/
 obj-y				+= eeprom/
 obj-y				+= cb710/
diff --git a/drivers/misc/iwmc3200top/Kconfig b/drivers/misc/iwmc3200top/Kconfig
new file mode 100644
index 000000000000..9e4b88fb57f1
--- /dev/null
+++ b/drivers/misc/iwmc3200top/Kconfig
@@ -0,0 +1,20 @@
+config IWMC3200TOP
+        tristate "Intel Wireless MultiCom Top Driver"
+        depends on MMC && EXPERIMENTAL
+        select FW_LOADER
+	---help---
+	  Intel Wireless MultiCom 3200 Top driver is responsible for
+	  for firmware load and enabled coms enumeration
+
+config IWMC3200TOP_DEBUG
+	bool "Enable full debug output of iwmc3200top Driver"
+	depends on IWMC3200TOP
+	---help---
+	  Enable full debug output of iwmc3200top Driver
+
+config IWMC3200TOP_DEBUGFS
+	bool "Enable Debugfs debugging interface for iwmc3200top"
+	depends on IWMC3200TOP
+	---help---
+	  Enable creation of debugfs files for iwmc3200top
+
diff --git a/drivers/misc/iwmc3200top/Makefile b/drivers/misc/iwmc3200top/Makefile
new file mode 100644
index 000000000000..fbf53fb4634e
--- /dev/null
+++ b/drivers/misc/iwmc3200top/Makefile
@@ -0,0 +1,29 @@
+# iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
+# drivers/misc/iwmc3200top/Makefile
+#
+# Copyright (C) 2009 Intel Corporation. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License version
+# 2 as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+#
+#
+# Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
+#  -
+#
+#
+
+obj-$(CONFIG_IWMC3200TOP)	+= iwmc3200top.o
+iwmc3200top-objs	:= main.o fw-download.o
+iwmc3200top-$(CONFIG_IWMC3200TOP_DEBUG) += log.o
+iwmc3200top-$(CONFIG_IWMC3200TOP_DEBUGFS) += debugfs.o
diff --git a/drivers/misc/iwmc3200top/debugfs.c b/drivers/misc/iwmc3200top/debugfs.c
new file mode 100644
index 000000000000..0c8ea0a1c8a3
--- /dev/null
+++ b/drivers/misc/iwmc3200top/debugfs.c
@@ -0,0 +1,133 @@
+/*
+ * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
+ * drivers/misc/iwmc3200top/debufs.c
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
+ *  -
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio.h>
+#include <linux/debugfs.h>
+
+#include "iwmc3200top.h"
+#include "fw-msg.h"
+#include "log.h"
+#include "debugfs.h"
+
+
+
+/*      Constants definition        */
+#define HEXADECIMAL_RADIX	16
+
+/*      Functions definition        */
+
+
+#define DEBUGFS_ADD(name, parent) do {					\
+	dbgfs->dbgfs_##parent##_files.file_##name =			\
+	debugfs_create_file(#name, 0644, dbgfs->dir_##parent, priv,	\
+				&iwmct_dbgfs_##name##_ops);		\
+} while (0)
+
+#define DEBUGFS_RM(name)  do {		\
+	debugfs_remove(name);		\
+	name = NULL;			\
+} while (0)
+
+#define DEBUGFS_READ_FUNC(name)						\
+ssize_t iwmct_dbgfs_##name##_read(struct file *file,			\
+				  char __user *user_buf,		\
+				  size_t count, loff_t *ppos);
+
+#define DEBUGFS_WRITE_FUNC(name)					\
+ssize_t iwmct_dbgfs_##name##_write(struct file *file,			\
+				   const char __user *user_buf,		\
+				   size_t count, loff_t *ppos);
+
+#define DEBUGFS_READ_FILE_OPS(name)					\
+	DEBUGFS_READ_FUNC(name)						\
+	static const struct file_operations iwmct_dbgfs_##name##_ops = {  \
+		.read = iwmct_dbgfs_##name##_read,			\
+		.open = iwmct_dbgfs_open_file_generic,			\
+	};
+
+#define DEBUGFS_WRITE_FILE_OPS(name)					\
+	DEBUGFS_WRITE_FUNC(name)					\
+	static const struct file_operations iwmct_dbgfs_##name##_ops = {  \
+		.write = iwmct_dbgfs_##name##_write,			\
+		.open = iwmct_dbgfs_open_file_generic,			\
+	};
+
+#define DEBUGFS_READ_WRITE_FILE_OPS(name)				\
+	DEBUGFS_READ_FUNC(name)						\
+	DEBUGFS_WRITE_FUNC(name)					\
+	static const struct file_operations iwmct_dbgfs_##name##_ops = {\
+		.write = iwmct_dbgfs_##name##_write,			\
+		.read = iwmct_dbgfs_##name##_read,			\
+		.open = iwmct_dbgfs_open_file_generic,			\
+	};
+
+
+/*      Debugfs file ops definitions        */
+
+/*
+ * Create the debugfs files and directories
+ *
+ */
+void iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name)
+{
+	struct iwmct_debugfs *dbgfs;
+
+	dbgfs = kzalloc(sizeof(struct iwmct_debugfs), GFP_KERNEL);
+	if (!dbgfs) {
+		LOG_ERROR(priv, DEBUGFS, "failed to allocate %zd bytes\n",
+					sizeof(struct iwmct_debugfs));
+		return;
+	}
+
+	priv->dbgfs = dbgfs;
+	dbgfs->name = name;
+	dbgfs->dir_drv = debugfs_create_dir(name, NULL);
+	if (!dbgfs->dir_drv) {
+		LOG_ERROR(priv, DEBUGFS, "failed to create debugfs dir\n");
+		return;
+	}
+
+	return;
+}
+
+/**
+ * Remove the debugfs files and directories
+ *
+ */
+void iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs)
+{
+	if (!dbgfs)
+		return;
+
+	DEBUGFS_RM(dbgfs->dir_drv);
+	kfree(dbgfs);
+	dbgfs = NULL;
+}
+
diff --git a/drivers/misc/iwmc3200top/debugfs.h b/drivers/misc/iwmc3200top/debugfs.h
new file mode 100644
index 000000000000..71d45759b40f
--- /dev/null
+++ b/drivers/misc/iwmc3200top/debugfs.h
@@ -0,0 +1,58 @@
+/*
+ * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
+ * drivers/misc/iwmc3200top/debufs.h
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
+ *  -
+ *
+ */
+
+#ifndef __DEBUGFS_H__
+#define __DEBUGFS_H__
+
+
+#ifdef CONFIG_IWMC3200TOP_DEBUGFS
+
+struct iwmct_debugfs {
+	const char *name;
+	struct dentry *dir_drv;
+	struct dir_drv_files {
+	} dbgfs_drv_files;
+};
+
+void iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name);
+void iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs);
+
+#else /* CONFIG_IWMC3200TOP_DEBUGFS */
+
+struct iwmct_debugfs;
+
+static inline void
+iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name)
+{}
+
+static inline void
+iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs)
+{}
+
+#endif /* CONFIG_IWMC3200TOP_DEBUGFS */
+
+#endif /* __DEBUGFS_H__ */
+
diff --git a/drivers/misc/iwmc3200top/fw-download.c b/drivers/misc/iwmc3200top/fw-download.c
new file mode 100644
index 000000000000..50d431e469f5
--- /dev/null
+++ b/drivers/misc/iwmc3200top/fw-download.c
@@ -0,0 +1,355 @@
+/*
+ * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
+ * drivers/misc/iwmc3200top/fw-download.c
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
+ *  -
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/mmc/sdio_func.h>
+#include <asm/unaligned.h>
+
+#include "iwmc3200top.h"
+#include "log.h"
+#include "fw-msg.h"
+
+#define CHECKSUM_BYTES_NUM sizeof(u32)
+
+/**
+  init parser struct with file
+ */
+static int iwmct_fw_parser_init(struct iwmct_priv *priv, const u8 *file,
+			      size_t file_size, size_t block_size)
+{
+	struct iwmct_parser *parser = &priv->parser;
+	struct iwmct_fw_hdr *fw_hdr = &parser->versions;
+
+	LOG_INFOEX(priv, INIT, "-->\n");
+
+	LOG_INFO(priv, FW_DOWNLOAD, "file_size=%zd\n", file_size);
+
+	parser->file = file;
+	parser->file_size = file_size;
+	parser->cur_pos = 0;
+	parser->buf = NULL;
+
+	parser->buf = kzalloc(block_size, GFP_KERNEL);
+	if (!parser->buf) {
+		LOG_ERROR(priv, FW_DOWNLOAD, "kzalloc error\n");
+		return -ENOMEM;
+	}
+	parser->buf_size = block_size;
+
+	/* extract fw versions */
+	memcpy(fw_hdr, parser->file, sizeof(struct iwmct_fw_hdr));
+	LOG_INFO(priv, FW_DOWNLOAD, "fw versions are:\n"
+		"top %u.%u.%u gps %u.%u.%u bt %u.%u.%u tic %s\n",
+		fw_hdr->top_major, fw_hdr->top_minor, fw_hdr->top_revision,
+		fw_hdr->gps_major, fw_hdr->gps_minor, fw_hdr->gps_revision,
+		fw_hdr->bt_major, fw_hdr->bt_minor, fw_hdr->bt_revision,
+		fw_hdr->tic_name);
+
+	parser->cur_pos += sizeof(struct iwmct_fw_hdr);
+
+	LOG_INFOEX(priv, INIT, "<--\n");
+	return 0;
+}
+
+static bool iwmct_checksum(struct iwmct_priv *priv)
+{
+	struct iwmct_parser *parser = &priv->parser;
+	__le32 *file = (__le32 *)parser->file;
+	int i, pad, steps;
+	u32 accum = 0;
+	u32 checksum;
+	u32 mask = 0xffffffff;
+
+	pad = (parser->file_size - CHECKSUM_BYTES_NUM) % 4;
+	steps =  (parser->file_size - CHECKSUM_BYTES_NUM) / 4;
+
+	LOG_INFO(priv, FW_DOWNLOAD, "pad=%d steps=%d\n", pad, steps);
+
+	for (i = 0; i < steps; i++)
+		accum += le32_to_cpu(file[i]);
+
+	if (pad) {
+		mask <<= 8 * (4 - pad);
+		accum += le32_to_cpu(file[steps]) & mask;
+	}
+
+	checksum = get_unaligned_le32((__le32 *)(parser->file +
+			parser->file_size - CHECKSUM_BYTES_NUM));
+
+	LOG_INFO(priv, FW_DOWNLOAD,
+		"compare checksum accum=0x%x to checksum=0x%x\n",
+		accum, checksum);
+
+	return checksum == accum;
+}
+
+static int iwmct_parse_next_section(struct iwmct_priv *priv, const u8 **p_sec,
+				  size_t *sec_size, __le32 *sec_addr)
+{
+	struct iwmct_parser *parser = &priv->parser;
+	struct iwmct_dbg *dbg = &priv->dbg;
+	struct iwmct_fw_sec_hdr *sec_hdr;
+
+	LOG_INFOEX(priv, INIT, "-->\n");
+
+	while (parser->cur_pos + sizeof(struct iwmct_fw_sec_hdr)
+		<= parser->file_size) {
+
+		sec_hdr = (struct iwmct_fw_sec_hdr *)
+				(parser->file + parser->cur_pos);
+		parser->cur_pos += sizeof(struct iwmct_fw_sec_hdr);
+
+		LOG_INFO(priv, FW_DOWNLOAD,
+			"sec hdr: type=%s addr=0x%x size=%d\n",
+			sec_hdr->type, sec_hdr->target_addr,
+			sec_hdr->data_size);
+
+		if (strcmp(sec_hdr->type, "ENT") == 0)
+			parser->entry_point = le32_to_cpu(sec_hdr->target_addr);
+		else if (strcmp(sec_hdr->type, "LBL") == 0)
+			strcpy(dbg->label_fw, parser->file + parser->cur_pos);
+		else if (((strcmp(sec_hdr->type, "TOP") == 0) &&
+			  (priv->barker & BARKER_DNLOAD_TOP_MSK)) ||
+			 ((strcmp(sec_hdr->type, "GPS") == 0) &&
+			  (priv->barker & BARKER_DNLOAD_GPS_MSK)) ||
+			 ((strcmp(sec_hdr->type, "BTH") == 0) &&
+			  (priv->barker & BARKER_DNLOAD_BT_MSK))) {
+			*sec_addr = sec_hdr->target_addr;
+			*sec_size = le32_to_cpu(sec_hdr->data_size);
+			*p_sec = parser->file + parser->cur_pos;
+			parser->cur_pos += le32_to_cpu(sec_hdr->data_size);
+			return 1;
+		} else if (strcmp(sec_hdr->type, "LOG") != 0)
+			LOG_WARNING(priv, FW_DOWNLOAD,
+				    "skipping section type %s\n",
+				    sec_hdr->type);
+
+		parser->cur_pos += le32_to_cpu(sec_hdr->data_size);
+		LOG_INFO(priv, FW_DOWNLOAD,
+			"finished with section cur_pos=%zd\n", parser->cur_pos);
+	}
+
+	LOG_INFOEX(priv, INIT, "<--\n");
+	return 0;
+}
+
+static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec,
+				size_t sec_size, __le32 addr)
+{
+	struct iwmct_parser *parser = &priv->parser;
+	struct iwmct_fw_load_hdr *hdr = (struct iwmct_fw_load_hdr *)parser->buf;
+	const u8 *cur_block = p_sec;
+	size_t sent = 0;
+	int cnt = 0;
+	int ret = 0;
+	u32 cmd = 0;
+
+	LOG_INFOEX(priv, INIT, "-->\n");
+	LOG_INFO(priv, FW_DOWNLOAD, "Download address 0x%x size 0x%zx\n",
+				addr, sec_size);
+
+	while (sent < sec_size) {
+		int i;
+		u32 chksm = 0;
+		u32 reset = atomic_read(&priv->reset);
+		/* actual FW data */
+		u32 data_size = min(parser->buf_size - sizeof(*hdr),
+				    sec_size - sent);
+		/* Pad to block size */
+		u32 trans_size = (data_size + sizeof(*hdr) +
+				  IWMC_SDIO_BLK_SIZE - 1) &
+				  ~(IWMC_SDIO_BLK_SIZE - 1);
+		++cnt;
+
+		/* in case of reset, interrupt FW DOWNLAOD */
+		if (reset) {
+			LOG_INFO(priv, FW_DOWNLOAD,
+				 "Reset detected. Abort FW download!!!");
+			ret = -ECANCELED;
+			goto exit;
+		}
+
+		memset(parser->buf, 0, parser->buf_size);
+		cmd |= IWMC_OPCODE_WRITE << CMD_HDR_OPCODE_POS;
+		cmd |= IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS;
+		cmd |= (priv->dbg.direct ? 1 : 0) << CMD_HDR_DIRECT_ACCESS_POS;
+		cmd |= (priv->dbg.checksum ? 1 : 0) << CMD_HDR_USE_CHECKSUM_POS;
+		hdr->data_size = cpu_to_le32(data_size);
+		hdr->target_addr = addr;
+
+		/* checksum is allowed for sizes divisible by 4 */
+		if (data_size & 0x3)
+			cmd &= ~CMD_HDR_USE_CHECKSUM_MSK;
+
+		memcpy(hdr->data, cur_block, data_size);
+
+
+		if (cmd & CMD_HDR_USE_CHECKSUM_MSK) {
+
+			chksm = data_size + le32_to_cpu(addr) + cmd;
+			for (i = 0; i < data_size >> 2; i++)
+				chksm += ((u32 *)cur_block)[i];
+
+			hdr->block_chksm = cpu_to_le32(chksm);
+			LOG_INFO(priv, FW_DOWNLOAD, "Checksum = 0x%X\n",
+				 hdr->block_chksm);
+		}
+
+		LOG_INFO(priv, FW_DOWNLOAD, "trans#%d, len=%d, sent=%zd, "
+				"sec_size=%zd, startAddress 0x%X\n",
+				cnt, trans_size, sent, sec_size, addr);
+
+		if (priv->dbg.dump)
+			LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, trans_size);
+
+
+		hdr->cmd = cpu_to_le32(cmd);
+		/* send it down */
+		/* TODO: add more proper sending and error checking */
+		ret = iwmct_tx(priv, 0, parser->buf, trans_size);
+		if (ret != 0) {
+			LOG_INFO(priv, FW_DOWNLOAD,
+				"iwmct_tx returned %d\n", ret);
+			goto exit;
+		}
+
+		addr = cpu_to_le32(le32_to_cpu(addr) + data_size);
+		sent += data_size;
+		cur_block = p_sec + sent;
+
+		if (priv->dbg.blocks && (cnt + 1) >= priv->dbg.blocks) {
+			LOG_INFO(priv, FW_DOWNLOAD,
+				"Block number limit is reached [%d]\n",
+				priv->dbg.blocks);
+			break;
+		}
+	}
+
+	if (sent < sec_size)
+		ret = -EINVAL;
+exit:
+	LOG_INFOEX(priv, INIT, "<--\n");
+	return ret;
+}
+
+static int iwmct_kick_fw(struct iwmct_priv *priv, bool jump)
+{
+	struct iwmct_parser *parser = &priv->parser;
+	struct iwmct_fw_load_hdr *hdr = (struct iwmct_fw_load_hdr *)parser->buf;
+	int ret;
+	u32 cmd;
+
+	LOG_INFOEX(priv, INIT, "-->\n");
+
+	memset(parser->buf, 0, parser->buf_size);
+	cmd = IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS;
+	if (jump) {
+		cmd |= IWMC_OPCODE_JUMP << CMD_HDR_OPCODE_POS;
+		hdr->target_addr = cpu_to_le32(parser->entry_point);
+		LOG_INFO(priv, FW_DOWNLOAD, "jump address 0x%x\n",
+				parser->entry_point);
+	} else {
+		cmd |= IWMC_OPCODE_LAST_COMMAND << CMD_HDR_OPCODE_POS;
+		LOG_INFO(priv, FW_DOWNLOAD, "last command\n");
+	}
+
+	hdr->cmd = cpu_to_le32(cmd);
+
+	LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, sizeof(*hdr));
+	/* send it down */
+	/* TODO: add more proper sending and error checking */
+	ret = iwmct_tx(priv, 0, parser->buf, IWMC_SDIO_BLK_SIZE);
+	if (ret)
+		LOG_INFO(priv, FW_DOWNLOAD, "iwmct_tx returned %d", ret);
+
+	LOG_INFOEX(priv, INIT, "<--\n");
+	return 0;
+}
+
+int iwmct_fw_load(struct iwmct_priv *priv)
+{
+	const u8 *fw_name = FW_NAME(FW_API_VER);
+	const struct firmware *raw;
+	const u8 *pdata;
+	size_t len;
+	__le32 addr;
+	int ret;
+
+	/* clear parser struct */
+	memset(&priv->parser, 0, sizeof(struct iwmct_parser));
+
+	/* get the firmware */
+	ret = request_firmware(&raw, fw_name, &priv->func->dev);
+	if (ret < 0) {
+		LOG_ERROR(priv, FW_DOWNLOAD, "%s request_firmware failed %d\n",
+			  fw_name, ret);
+		goto exit;
+	}
+
+	if (raw->size < sizeof(struct iwmct_fw_sec_hdr)) {
+		LOG_ERROR(priv, FW_DOWNLOAD, "%s smaller then (%zd) (%zd)\n",
+			  fw_name, sizeof(struct iwmct_fw_sec_hdr), raw->size);
+		goto exit;
+	}
+
+	LOG_INFO(priv, FW_DOWNLOAD, "Read firmware '%s'\n", fw_name);
+
+	ret = iwmct_fw_parser_init(priv, raw->data, raw->size, priv->trans_len);
+	if (ret < 0) {
+		LOG_ERROR(priv, FW_DOWNLOAD,
+			  "iwmct_parser_init failed: Reason %d\n", ret);
+		goto exit;
+	}
+
+	/* checksum  */
+	if (!iwmct_checksum(priv)) {
+		LOG_ERROR(priv, FW_DOWNLOAD, "checksum error\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	/* download firmware to device */
+	while (iwmct_parse_next_section(priv, &pdata, &len, &addr)) {
+		if (iwmct_download_section(priv, pdata, len, addr)) {
+			LOG_ERROR(priv, FW_DOWNLOAD,
+				  "%s download section failed\n", fw_name);
+			ret = -EIO;
+			goto exit;
+		}
+	}
+
+	iwmct_kick_fw(priv, !!(priv->barker & BARKER_DNLOAD_JUMP_MSK));
+
+exit:
+	kfree(priv->parser.buf);
+
+	if (raw)
+		release_firmware(raw);
+
+	raw = NULL;
+
+	return ret;
+}
diff --git a/drivers/misc/iwmc3200top/fw-msg.h b/drivers/misc/iwmc3200top/fw-msg.h
new file mode 100644
index 000000000000..9e26b75bd482
--- /dev/null
+++ b/drivers/misc/iwmc3200top/fw-msg.h
@@ -0,0 +1,113 @@
+/*
+ * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
+ * drivers/misc/iwmc3200top/fw-msg.h
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
+ *  -
+ *
+ */
+
+#ifndef __FWMSG_H__
+#define __FWMSG_H__
+
+#define COMM_TYPE_D2H           	0xFF
+#define COMM_TYPE_H2D           	0xEE
+
+#define COMM_CATEGORY_OPERATIONAL      	0x00
+#define COMM_CATEGORY_DEBUG            	0x01
+#define COMM_CATEGORY_TESTABILITY      	0x02
+#define COMM_CATEGORY_DIAGNOSTICS      	0x03
+
+#define OP_DBG_ZSTR_MSG			cpu_to_le16(0x1A)
+
+#define FW_LOG_SRC_MAX			32
+#define FW_LOG_SRC_ALL			255
+
+#define FW_STRING_TABLE_ADDR		cpu_to_le32(0x0C000000)
+
+#define CMD_DBG_LOG_LEVEL		cpu_to_le16(0x0001)
+#define CMD_TST_DEV_RESET		cpu_to_le16(0x0060)
+#define CMD_TST_FUNC_RESET		cpu_to_le16(0x0062)
+#define CMD_TST_IFACE_RESET		cpu_to_le16(0x0064)
+#define CMD_TST_CPU_UTILIZATION		cpu_to_le16(0x0065)
+#define CMD_TST_TOP_DEEP_SLEEP		cpu_to_le16(0x0080)
+#define CMD_TST_WAKEUP			cpu_to_le16(0x0081)
+#define CMD_TST_FUNC_WAKEUP		cpu_to_le16(0x0082)
+#define CMD_TST_FUNC_DEEP_SLEEP_REQUEST	cpu_to_le16(0x0083)
+#define CMD_TST_GET_MEM_DUMP		cpu_to_le16(0x0096)
+
+#define OP_OPR_ALIVE			cpu_to_le16(0x0010)
+#define OP_OPR_CMD_ACK			cpu_to_le16(0x001F)
+#define OP_OPR_CMD_NACK			cpu_to_le16(0x0020)
+#define OP_TST_MEM_DUMP			cpu_to_le16(0x0043)
+
+#define CMD_FLAG_PADDING_256		0x80
+
+#define FW_HCMD_BLOCK_SIZE      	256
+
+struct msg_hdr {
+	u8 type;
+	u8 category;
+	__le16 opcode;
+	u8 seqnum;
+	u8 flags;
+	__le16 length;
+} __attribute__((__packed__));
+
+struct log_hdr {
+	__le32 timestamp;
+	u8 severity;
+	u8 logsource;
+	__le16 reserved;
+} __attribute__((__packed__));
+
+struct mdump_hdr {
+	u8 dmpid;
+	u8 frag;
+	__le16 size;
+	__le32 addr;
+} __attribute__((__packed__));
+
+struct top_msg {
+	struct msg_hdr hdr;
+	union {
+		/* D2H messages */
+		struct {
+			struct log_hdr log_hdr;
+			u8 data[1];
+		} __attribute__((__packed__)) log;
+
+		struct {
+			struct log_hdr log_hdr;
+			struct mdump_hdr md_hdr;
+			u8 data[1];
+		} __attribute__((__packed__)) mdump;
+
+		/* H2D messages */
+		struct {
+			u8 logsource;
+			u8 sevmask;
+		} __attribute__((__packed__)) logdefs[FW_LOG_SRC_MAX];
+		struct mdump_hdr mdump_req;
+	} u;
+} __attribute__((__packed__));
+
+
+#endif /* __FWMSG_H__ */
diff --git a/drivers/misc/iwmc3200top/iwmc3200top.h b/drivers/misc/iwmc3200top/iwmc3200top.h
new file mode 100644
index 000000000000..43bd510e1872
--- /dev/null
+++ b/drivers/misc/iwmc3200top/iwmc3200top.h
@@ -0,0 +1,209 @@
+/*
+ * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
+ * drivers/misc/iwmc3200top/iwmc3200top.h
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
+ *  -
+ *
+ */
+
+#ifndef __IWMC3200TOP_H__
+#define __IWMC3200TOP_H__
+
+#include <linux/workqueue.h>
+
+#define DRV_NAME "iwmc3200top"
+#define FW_API_VER 1
+#define _FW_NAME(api) DRV_NAME "." #api ".fw"
+#define FW_NAME(api) _FW_NAME(api)
+
+#define IWMC_SDIO_BLK_SIZE			256
+#define IWMC_DEFAULT_TR_BLK			64
+#define IWMC_SDIO_DATA_ADDR			0x0
+#define IWMC_SDIO_INTR_ENABLE_ADDR		0x14
+#define IWMC_SDIO_INTR_STATUS_ADDR		0x13
+#define IWMC_SDIO_INTR_CLEAR_ADDR		0x13
+#define IWMC_SDIO_INTR_GET_SIZE_ADDR		0x2C
+
+#define COMM_HUB_HEADER_LENGTH 16
+#define LOGGER_HEADER_LENGTH   10
+
+
+#define BARKER_DNLOAD_BT_POS		0
+#define BARKER_DNLOAD_BT_MSK		BIT(BARKER_DNLOAD_BT_POS)
+#define BARKER_DNLOAD_GPS_POS		1
+#define BARKER_DNLOAD_GPS_MSK		BIT(BARKER_DNLOAD_GPS_POS)
+#define BARKER_DNLOAD_TOP_POS		2
+#define BARKER_DNLOAD_TOP_MSK		BIT(BARKER_DNLOAD_TOP_POS)
+#define BARKER_DNLOAD_RESERVED1_POS	3
+#define BARKER_DNLOAD_RESERVED1_MSK	BIT(BARKER_DNLOAD_RESERVED1_POS)
+#define BARKER_DNLOAD_JUMP_POS		4
+#define BARKER_DNLOAD_JUMP_MSK		BIT(BARKER_DNLOAD_JUMP_POS)
+#define BARKER_DNLOAD_SYNC_POS		5
+#define BARKER_DNLOAD_SYNC_MSK		BIT(BARKER_DNLOAD_SYNC_POS)
+#define BARKER_DNLOAD_RESERVED2_POS	6
+#define BARKER_DNLOAD_RESERVED2_MSK	(0x3 << BARKER_DNLOAD_RESERVED2_POS)
+#define BARKER_DNLOAD_BARKER_POS	8
+#define BARKER_DNLOAD_BARKER_MSK	(0xffffff << BARKER_DNLOAD_BARKER_POS)
+
+#define IWMC_BARKER_REBOOT 	(0xdeadbe << BARKER_DNLOAD_BARKER_POS)
+/* whole field barker */
+#define IWMC_BARKER_ACK 	0xfeedbabe
+
+#define IWMC_CMD_SIGNATURE 	0xcbbc
+
+#define CMD_HDR_OPCODE_POS		0
+#define CMD_HDR_OPCODE_MSK_MSK		(0xf << CMD_HDR_OPCODE_MSK_POS)
+#define CMD_HDR_RESPONSE_CODE_POS	4
+#define CMD_HDR_RESPONSE_CODE_MSK	(0xf << CMD_HDR_RESPONSE_CODE_POS)
+#define CMD_HDR_USE_CHECKSUM_POS	8
+#define CMD_HDR_USE_CHECKSUM_MSK	BIT(CMD_HDR_USE_CHECKSUM_POS)
+#define CMD_HDR_RESPONSE_REQUIRED_POS	9
+#define CMD_HDR_RESPONSE_REQUIRED_MSK	BIT(CMD_HDR_RESPONSE_REQUIRED_POS)
+#define CMD_HDR_DIRECT_ACCESS_POS	10
+#define CMD_HDR_DIRECT_ACCESS_MSK	BIT(CMD_HDR_DIRECT_ACCESS_POS)
+#define CMD_HDR_RESERVED_POS		11
+#define CMD_HDR_RESERVED_MSK		BIT(0x1f << CMD_HDR_RESERVED_POS)
+#define CMD_HDR_SIGNATURE_POS		16
+#define CMD_HDR_SIGNATURE_MSK		BIT(0xffff << CMD_HDR_SIGNATURE_POS)
+
+enum {
+	IWMC_OPCODE_PING = 0,
+	IWMC_OPCODE_READ = 1,
+	IWMC_OPCODE_WRITE = 2,
+	IWMC_OPCODE_JUMP = 3,
+	IWMC_OPCODE_REBOOT = 4,
+	IWMC_OPCODE_PERSISTENT_WRITE = 5,
+	IWMC_OPCODE_PERSISTENT_READ = 6,
+	IWMC_OPCODE_READ_MODIFY_WRITE = 7,
+	IWMC_OPCODE_LAST_COMMAND = 15
+};
+
+struct iwmct_fw_load_hdr {
+	__le32 cmd;
+	__le32 target_addr;
+	__le32 data_size;
+	__le32 block_chksm;
+	u8 data[0];
+};
+
+/**
+ * struct iwmct_fw_hdr
+ * holds all sw components versions
+ */
+struct iwmct_fw_hdr {
+	u8 top_major;
+	u8 top_minor;
+	u8 top_revision;
+	u8 gps_major;
+	u8 gps_minor;
+	u8 gps_revision;
+	u8 bt_major;
+	u8 bt_minor;
+	u8 bt_revision;
+	u8 tic_name[31];
+};
+
+/**
+ * struct iwmct_fw_sec_hdr
+ * @type: function type
+ * @data_size: section's data size
+ * @target_addr: download address
+ */
+struct iwmct_fw_sec_hdr {
+	u8 type[4];
+	__le32 data_size;
+	__le32 target_addr;
+};
+
+/**
+ * struct iwmct_parser
+ * @file: fw image
+ * @file_size: fw size
+ * @cur_pos: position in file
+ * @buf: temp buf for download
+ * @buf_size: size of buf
+ * @entry_point: address to jump in fw kick-off
+ */
+struct iwmct_parser {
+	const u8 *file;
+	size_t file_size;
+	size_t cur_pos;
+	u8 *buf;
+	size_t buf_size;
+	u32 entry_point;
+	struct iwmct_fw_hdr versions;
+};
+
+
+struct iwmct_work_struct {
+	struct list_head list;
+	ssize_t iosize;
+};
+
+struct iwmct_dbg {
+	int blocks;
+	bool dump;
+	bool jump;
+	bool direct;
+	bool checksum;
+	bool fw_download;
+	int block_size;
+	int download_trans_blks;
+
+	char label_fw[256];
+};
+
+struct iwmct_debugfs;
+
+struct iwmct_priv {
+	struct sdio_func *func;
+	struct iwmct_debugfs *dbgfs;
+	struct iwmct_parser parser;
+	atomic_t reset;
+	atomic_t dev_sync;
+	u32 trans_len;
+	u32 barker;
+	struct iwmct_dbg dbg;
+
+	/* drivers work queue */
+	struct workqueue_struct *wq;
+	struct workqueue_struct *bus_rescan_wq;
+	struct work_struct bus_rescan_worker;
+	struct work_struct isr_worker;
+
+	/* drivers wait queue */
+	wait_queue_head_t wait_q;
+
+	/* rx request list */
+	struct list_head read_req_list;
+};
+
+extern int iwmct_tx(struct iwmct_priv *priv, unsigned int addr,
+		void *src, int count);
+
+extern int iwmct_fw_load(struct iwmct_priv *priv);
+
+extern void iwmct_dbg_init_params(struct iwmct_priv *drv);
+extern void iwmct_dbg_init_drv_attrs(struct device_driver *drv);
+extern void iwmct_dbg_remove_drv_attrs(struct device_driver *drv);
+extern int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len);
+
+#endif  /*  __IWMC3200TOP_H__  */
diff --git a/drivers/misc/iwmc3200top/log.c b/drivers/misc/iwmc3200top/log.c
new file mode 100644
index 000000000000..d569279698f6
--- /dev/null
+++ b/drivers/misc/iwmc3200top/log.c
@@ -0,0 +1,347 @@
+/*
+ * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
+ * drivers/misc/iwmc3200top/log.c
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
+ *  -
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/ctype.h>
+#include "fw-msg.h"
+#include "iwmc3200top.h"
+#include "log.h"
+
+/* Maximal hexadecimal string size of the FW memdump message */
+#define LOG_MSG_SIZE_MAX		12400
+
+/* iwmct_logdefs is a global used by log macros */
+u8 iwmct_logdefs[LOG_SRC_MAX];
+static u8 iwmct_fw_logdefs[FW_LOG_SRC_MAX];
+
+
+static int _log_set_log_filter(u8 *logdefs, int size, u8 src, u8 logmask)
+{
+	int i;
+
+	if (src < size)
+		logdefs[src] = logmask;
+	else if (src == LOG_SRC_ALL)
+		for (i = 0; i < size; i++)
+			logdefs[i] = logmask;
+	else
+		return -1;
+
+	return 0;
+}
+
+
+int iwmct_log_set_filter(u8 src, u8 logmask)
+{
+	return _log_set_log_filter(iwmct_logdefs, LOG_SRC_MAX, src, logmask);
+}
+
+
+int iwmct_log_set_fw_filter(u8 src, u8 logmask)
+{
+	return _log_set_log_filter(iwmct_fw_logdefs,
+				   FW_LOG_SRC_MAX, src, logmask);
+}
+
+
+static int log_msg_format_hex(char *str, int slen, u8 *ibuf,
+			      int ilen, char *pref)
+{
+	int pos = 0;
+	int i;
+	int len;
+
+	for (pos = 0, i = 0; pos < slen - 2 && pref[i] != '\0'; i++, pos++)
+		str[pos] = pref[i];
+
+	for (i = 0; pos < slen - 2 && i < ilen; pos += len, i++)
+		len = snprintf(&str[pos], slen - pos - 1, " %2.2X", ibuf[i]);
+
+	if (i < ilen)
+		return -1;
+
+	return 0;
+}
+
+/*	NOTE: This function is not thread safe.
+	Currently it's called only from sdio rx worker - no race there
+*/
+void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len)
+{
+	struct top_msg *msg;
+	static char logbuf[LOG_MSG_SIZE_MAX];
+
+	msg = (struct top_msg *)buf;
+
+	if (len < sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr)) {
+		LOG_ERROR(priv, FW_MSG, "Log message from TOP "
+			  "is too short %d (expected %zd)\n",
+			  len, sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr));
+		return;
+	}
+
+	if (!(iwmct_fw_logdefs[msg->u.log.log_hdr.logsource] &
+		BIT(msg->u.log.log_hdr.severity)) ||
+	    !(iwmct_logdefs[LOG_SRC_FW_MSG] & BIT(msg->u.log.log_hdr.severity)))
+		return;
+
+	switch (msg->hdr.category) {
+	case COMM_CATEGORY_TESTABILITY:
+		if (!(iwmct_logdefs[LOG_SRC_TST] &
+		      BIT(msg->u.log.log_hdr.severity)))
+			return;
+		if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf,
+				       le16_to_cpu(msg->hdr.length) +
+				       sizeof(msg->hdr), "<TST>"))
+			LOG_WARNING(priv, TST,
+				  "TOP TST message is too long, truncating...");
+		LOG_WARNING(priv, TST, "%s\n", logbuf);
+		break;
+	case COMM_CATEGORY_DEBUG:
+		if (msg->hdr.opcode == OP_DBG_ZSTR_MSG)
+			LOG_INFO(priv, FW_MSG, "%s %s", "<DBG>",
+				       ((u8 *)msg) + sizeof(msg->hdr)
+					+ sizeof(msg->u.log.log_hdr));
+		else {
+			if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf,
+					le16_to_cpu(msg->hdr.length)
+						+ sizeof(msg->hdr),
+					"<DBG>"))
+				LOG_WARNING(priv, FW_MSG,
+					"TOP DBG message is too long,"
+					"truncating...");
+			LOG_WARNING(priv, FW_MSG, "%s\n", logbuf);
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+static int _log_get_filter_str(u8 *logdefs, int logdefsz, char *buf, int size)
+{
+	int i, pos, len;
+	for (i = 0, pos = 0; (pos < size-1) && (i < logdefsz); i++) {
+		len = snprintf(&buf[pos], size - pos - 1, "0x%02X%02X,",
+				i, logdefs[i]);
+		pos += len;
+	}
+	buf[pos-1] = '\n';
+	buf[pos] = '\0';
+
+	if (i < logdefsz)
+		return -1;
+	return 0;
+}
+
+int log_get_filter_str(char *buf, int size)
+{
+	return _log_get_filter_str(iwmct_logdefs, LOG_SRC_MAX, buf, size);
+}
+
+int log_get_fw_filter_str(char *buf, int size)
+{
+	return _log_get_filter_str(iwmct_fw_logdefs, FW_LOG_SRC_MAX, buf, size);
+}
+
+#define HEXADECIMAL_RADIX	16
+#define LOG_SRC_FORMAT		7 /* log level is in format of "0xXXXX," */
+
+ssize_t show_iwmct_log_level(struct device *d,
+				struct device_attribute *attr, char *buf)
+{
+	struct iwmct_priv *priv = dev_get_drvdata(d);
+	char *str_buf;
+	int buf_size;
+	ssize_t ret;
+
+	buf_size = (LOG_SRC_FORMAT * LOG_SRC_MAX) + 1;
+	str_buf = kzalloc(buf_size, GFP_KERNEL);
+	if (!str_buf) {
+		LOG_ERROR(priv, DEBUGFS,
+			"failed to allocate %d bytes\n", buf_size);
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	if (log_get_filter_str(str_buf, buf_size) < 0) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	ret = sprintf(buf, "%s", str_buf);
+
+exit:
+	kfree(str_buf);
+	return ret;
+}
+
+ssize_t store_iwmct_log_level(struct device *d,
+			struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct iwmct_priv *priv = dev_get_drvdata(d);
+	char *token, *str_buf = NULL;
+	long val;
+	ssize_t ret = count;
+	u8 src, mask;
+
+	if (!count)
+		goto exit;
+
+	str_buf = kzalloc(count, GFP_KERNEL);
+	if (!str_buf) {
+		LOG_ERROR(priv, DEBUGFS,
+			"failed to allocate %zd bytes\n", count);
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	memcpy(str_buf, buf, count);
+
+	while ((token = strsep(&str_buf, ",")) != NULL) {
+		while (isspace(*token))
+			++token;
+		if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) {
+			LOG_ERROR(priv, DEBUGFS,
+				  "failed to convert string to long %s\n",
+				  token);
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		mask  = val & 0xFF;
+		src = (val & 0XFF00) >> 8;
+		iwmct_log_set_filter(src, mask);
+	}
+
+exit:
+	kfree(str_buf);
+	return ret;
+}
+
+ssize_t show_iwmct_log_level_fw(struct device *d,
+			struct device_attribute *attr, char *buf)
+{
+	struct iwmct_priv *priv = dev_get_drvdata(d);
+	char *str_buf;
+	int buf_size;
+	ssize_t ret;
+
+	buf_size = (LOG_SRC_FORMAT * FW_LOG_SRC_MAX) + 2;
+
+	str_buf = kzalloc(buf_size, GFP_KERNEL);
+	if (!str_buf) {
+		LOG_ERROR(priv, DEBUGFS,
+			"failed to allocate %d bytes\n", buf_size);
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	if (log_get_fw_filter_str(str_buf, buf_size) < 0) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	ret = sprintf(buf, "%s", str_buf);
+
+exit:
+	kfree(str_buf);
+	return ret;
+}
+
+ssize_t store_iwmct_log_level_fw(struct device *d,
+			struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct iwmct_priv *priv = dev_get_drvdata(d);
+	struct top_msg cmd;
+	char *token, *str_buf = NULL;
+	ssize_t ret = count;
+	u16 cmdlen = 0;
+	int i;
+	long val;
+	u8 src, mask;
+
+	if (!count)
+		goto exit;
+
+	str_buf = kzalloc(count, GFP_KERNEL);
+	if (!str_buf) {
+		LOG_ERROR(priv, DEBUGFS,
+			"failed to allocate %zd bytes\n", count);
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	memcpy(str_buf, buf, count);
+
+	cmd.hdr.type = COMM_TYPE_H2D;
+	cmd.hdr.category = COMM_CATEGORY_DEBUG;
+	cmd.hdr.opcode = CMD_DBG_LOG_LEVEL;
+
+	for (i = 0; ((token = strsep(&str_buf, ",")) != NULL) &&
+		     (i < FW_LOG_SRC_MAX); i++) {
+
+		while (isspace(*token))
+			++token;
+
+		if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) {
+			LOG_ERROR(priv, DEBUGFS,
+				  "failed to convert string to long %s\n",
+				  token);
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		mask  = val & 0xFF; /* LSB */
+		src = (val & 0XFF00) >> 8; /* 2nd least significant byte. */
+		iwmct_log_set_fw_filter(src, mask);
+
+		cmd.u.logdefs[i].logsource = src;
+		cmd.u.logdefs[i].sevmask = mask;
+	}
+
+	cmd.hdr.length = cpu_to_le16(i * sizeof(cmd.u.logdefs[0]));
+	cmdlen = (i * sizeof(cmd.u.logdefs[0]) + sizeof(cmd.hdr));
+
+	ret = iwmct_send_hcmd(priv, (u8 *)&cmd, cmdlen);
+	if (ret) {
+		LOG_ERROR(priv, DEBUGFS,
+			  "Failed to send %d bytes of fwcmd, ret=%zd\n",
+			  cmdlen, ret);
+		goto exit;
+	} else
+		LOG_INFO(priv, DEBUGFS, "fwcmd sent (%d bytes)\n", cmdlen);
+
+	ret = count;
+
+exit:
+	kfree(str_buf);
+	return ret;
+}
+
diff --git a/drivers/misc/iwmc3200top/log.h b/drivers/misc/iwmc3200top/log.h
new file mode 100644
index 000000000000..aba8121f978c
--- /dev/null
+++ b/drivers/misc/iwmc3200top/log.h
@@ -0,0 +1,158 @@
+/*
+ * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
+ * drivers/misc/iwmc3200top/log.h
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
+ *  -
+ *
+ */
+
+#ifndef __LOG_H__
+#define __LOG_H__
+
+
+/* log severity:
+ * The log levels here match FW log levels
+ * so values need to stay as is */
+#define LOG_SEV_CRITICAL		0
+#define LOG_SEV_ERROR			1
+#define LOG_SEV_WARNING			2
+#define LOG_SEV_INFO			3
+#define LOG_SEV_INFOEX			4
+
+#define LOG_SEV_FILTER_ALL		\
+	(BIT(LOG_SEV_CRITICAL) |	\
+	 BIT(LOG_SEV_ERROR)    |	\
+	 BIT(LOG_SEV_WARNING)  | 	\
+	 BIT(LOG_SEV_INFO)     |	\
+	 BIT(LOG_SEV_INFOEX))
+
+/* log source */
+#define LOG_SRC_INIT			0
+#define LOG_SRC_DEBUGFS			1
+#define LOG_SRC_FW_DOWNLOAD		2
+#define LOG_SRC_FW_MSG			3
+#define LOG_SRC_TST			4
+#define LOG_SRC_IRQ			5
+
+#define	LOG_SRC_MAX			6
+#define	LOG_SRC_ALL			0xFF
+
+/**
+ * Default intitialization runtime log level
+ */
+#ifndef LOG_SEV_FILTER_RUNTIME
+#define LOG_SEV_FILTER_RUNTIME			\
+	(BIT(LOG_SEV_CRITICAL)	|		\
+	 BIT(LOG_SEV_ERROR)	|		\
+	 BIT(LOG_SEV_WARNING))
+#endif
+
+#ifndef FW_LOG_SEV_FILTER_RUNTIME
+#define FW_LOG_SEV_FILTER_RUNTIME	LOG_SEV_FILTER_ALL
+#endif
+
+#ifdef CONFIG_IWMC3200TOP_DEBUG
+/**
+ * Log macros
+ */
+
+#define priv2dev(priv) (&(priv->func)->dev)
+
+#define LOG_CRITICAL(priv, src, fmt, args...)				\
+do {									\
+	if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_CRITICAL))	\
+		dev_crit(priv2dev(priv), "%s %d: " fmt,			\
+			__func__, __LINE__, ##args);			\
+} while (0)
+
+#define LOG_ERROR(priv, src, fmt, args...)				\
+do {									\
+	if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_ERROR))	\
+		dev_err(priv2dev(priv), "%s %d: " fmt,			\
+			__func__, __LINE__, ##args);			\
+} while (0)
+
+#define LOG_WARNING(priv, src, fmt, args...)				\
+do {									\
+	if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_WARNING))	\
+		dev_warn(priv2dev(priv), "%s %d: " fmt,			\
+			 __func__, __LINE__, ##args);			\
+} while (0)
+
+#define LOG_INFO(priv, src, fmt, args...)				\
+do {									\
+	if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFO))		\
+		dev_info(priv2dev(priv), "%s %d: " fmt,			\
+			 __func__, __LINE__, ##args);			\
+} while (0)
+
+#define LOG_INFOEX(priv, src, fmt, args...)				\
+do {									\
+	if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFOEX))	\
+		dev_dbg(priv2dev(priv), "%s %d: " fmt,			\
+			 __func__, __LINE__, ##args);			\
+} while (0)
+
+#define LOG_HEXDUMP(src, ptr, len)					\
+do {									\
+	if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFOEX))	\
+		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE,	\
+				16, 1, ptr, len, false);		\
+} while (0)
+
+void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len);
+
+extern u8 iwmct_logdefs[];
+
+int iwmct_log_set_filter(u8 src, u8 logmask);
+int iwmct_log_set_fw_filter(u8 src, u8 logmask);
+
+ssize_t show_iwmct_log_level(struct device *d,
+			struct device_attribute *attr, char *buf);
+ssize_t store_iwmct_log_level(struct device *d,
+			struct device_attribute *attr,
+			const char *buf, size_t count);
+ssize_t show_iwmct_log_level_fw(struct device *d,
+			struct device_attribute *attr, char *buf);
+ssize_t store_iwmct_log_level_fw(struct device *d,
+			struct device_attribute *attr,
+			const char *buf, size_t count);
+
+#else
+
+#define LOG_CRITICAL(priv, src, fmt, args...)
+#define LOG_ERROR(priv, src, fmt, args...)
+#define LOG_WARNING(priv, src, fmt, args...)
+#define LOG_INFO(priv, src, fmt, args...)
+#define LOG_INFOEX(priv, src, fmt, args...)
+#define LOG_HEXDUMP(src, ptr, len)
+
+static inline void iwmct_log_top_message(struct iwmct_priv *priv,
+					 u8 *buf, int len) {}
+static inline int iwmct_log_set_filter(u8 src, u8 logmask) { return 0; }
+static inline int iwmct_log_set_fw_filter(u8 src, u8 logmask) { return 0; }
+
+#endif /* CONFIG_IWMC3200TOP_DEBUG */
+
+int log_get_filter_str(char *buf, int size);
+int log_get_fw_filter_str(char *buf, int size);
+
+#endif /* __LOG_H__ */
diff --git a/drivers/misc/iwmc3200top/main.c b/drivers/misc/iwmc3200top/main.c
new file mode 100644
index 000000000000..fafcaa481d74
--- /dev/null
+++ b/drivers/misc/iwmc3200top/main.c
@@ -0,0 +1,678 @@
+/*
+ * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
+ * drivers/misc/iwmc3200top/main.c
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
+ *  -
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio.h>
+
+#include "iwmc3200top.h"
+#include "log.h"
+#include "fw-msg.h"
+#include "debugfs.h"
+
+
+#define DRIVER_DESCRIPTION "Intel(R) IWMC 3200 Top Driver"
+#define DRIVER_COPYRIGHT "Copyright (c) 2008 Intel Corporation."
+
+#define DRIVER_VERSION  "0.1.62"
+
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR(DRIVER_COPYRIGHT);
+MODULE_FIRMWARE(FW_NAME(FW_API_VER));
+
+/*
+ * This workers main task is to wait for OP_OPR_ALIVE
+ * from TOP FW until ALIVE_MSG_TIMOUT timeout is elapsed.
+ * When OP_OPR_ALIVE received it will issue
+ * a call to "bus_rescan_devices".
+ */
+static void iwmct_rescan_worker(struct work_struct *ws)
+{
+	struct iwmct_priv *priv;
+	int ret;
+
+	priv = container_of(ws, struct iwmct_priv, bus_rescan_worker);
+
+	LOG_INFO(priv, FW_MSG, "Calling bus_rescan\n");
+
+	ret = bus_rescan_devices(priv->func->dev.bus);
+	if (ret < 0)
+		LOG_INFO(priv, FW_DOWNLOAD, "bus_rescan_devices FAILED!!!\n");
+}
+
+static void op_top_message(struct iwmct_priv *priv, struct top_msg *msg)
+{
+	switch (msg->hdr.opcode) {
+	case OP_OPR_ALIVE:
+		LOG_INFO(priv, FW_MSG, "Got ALIVE from device, wake rescan\n");
+		queue_work(priv->bus_rescan_wq, &priv->bus_rescan_worker);
+		break;
+	default:
+		LOG_INFO(priv, FW_MSG, "Received msg opcode 0x%X\n",
+			msg->hdr.opcode);
+		break;
+	}
+}
+
+
+static void handle_top_message(struct iwmct_priv *priv, u8 *buf, int len)
+{
+	struct top_msg *msg;
+
+	msg = (struct top_msg *)buf;
+
+	if (msg->hdr.type != COMM_TYPE_D2H) {
+		LOG_ERROR(priv, FW_MSG,
+			"Message from TOP with invalid message type 0x%X\n",
+			msg->hdr.type);
+		return;
+	}
+
+	if (len < sizeof(msg->hdr)) {
+		LOG_ERROR(priv, FW_MSG,
+			"Message from TOP is too short for message header "
+			"received %d bytes, expected at least %zd bytes\n",
+			len, sizeof(msg->hdr));
+		return;
+	}
+
+	if (len < le16_to_cpu(msg->hdr.length) + sizeof(msg->hdr)) {
+		LOG_ERROR(priv, FW_MSG,
+			"Message length (%d bytes) is shorter than "
+			"in header (%d bytes)\n",
+			len, le16_to_cpu(msg->hdr.length));
+		return;
+	}
+
+	switch (msg->hdr.category) {
+	case COMM_CATEGORY_OPERATIONAL:
+		op_top_message(priv, (struct top_msg *)buf);
+		break;
+
+	case COMM_CATEGORY_DEBUG:
+	case COMM_CATEGORY_TESTABILITY:
+	case COMM_CATEGORY_DIAGNOSTICS:
+		iwmct_log_top_message(priv, buf, len);
+		break;
+
+	default:
+		LOG_ERROR(priv, FW_MSG,
+			"Message from TOP with unknown category 0x%X\n",
+			msg->hdr.category);
+		break;
+	}
+}
+
+int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len)
+{
+	int ret;
+	u8 *buf;
+
+	LOG_INFOEX(priv, FW_MSG, "Sending hcmd:\n");
+
+	/* add padding to 256 for IWMC */
+	((struct top_msg *)cmd)->hdr.flags |= CMD_FLAG_PADDING_256;
+
+	LOG_HEXDUMP(FW_MSG, cmd, len);
+
+	if (len > FW_HCMD_BLOCK_SIZE) {
+		LOG_ERROR(priv, FW_MSG, "size %d exceeded hcmd max size %d\n",
+			  len, FW_HCMD_BLOCK_SIZE);
+		return -1;
+	}
+
+	buf = kzalloc(FW_HCMD_BLOCK_SIZE, GFP_KERNEL);
+	if (!buf) {
+		LOG_ERROR(priv, FW_MSG, "kzalloc error, buf size %d\n",
+			  FW_HCMD_BLOCK_SIZE);
+		return -1;
+	}
+
+	memcpy(buf, cmd, len);
+
+	sdio_claim_host(priv->func);
+	ret = sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR, buf,
+			       FW_HCMD_BLOCK_SIZE);
+	sdio_release_host(priv->func);
+
+	kfree(buf);
+	return ret;
+}
+
+int iwmct_tx(struct iwmct_priv *priv, unsigned int addr,
+	void *src, int count)
+{
+	int ret;
+
+	sdio_claim_host(priv->func);
+	ret = sdio_memcpy_toio(priv->func, addr, src, count);
+	sdio_release_host(priv->func);
+
+	return ret;
+}
+
+static void iwmct_irq_read_worker(struct work_struct *ws)
+{
+	struct iwmct_priv *priv;
+	struct iwmct_work_struct *read_req;
+	__le32 *buf = NULL;
+	int ret;
+	int iosize;
+	u32 barker;
+	bool is_barker;
+
+	priv = container_of(ws, struct iwmct_priv, isr_worker);
+
+	LOG_INFO(priv, IRQ, "enter iwmct_irq_read_worker %p\n", ws);
+
+	/* --------------------- Handshake with device -------------------- */
+	sdio_claim_host(priv->func);
+
+	/* all list manipulations have to be protected by
+	 * sdio_claim_host/sdio_release_host */
+	if (list_empty(&priv->read_req_list)) {
+		LOG_ERROR(priv, IRQ, "read_req_list empty in read worker\n");
+		goto exit_release;
+	}
+
+	read_req = list_entry(priv->read_req_list.next,
+			      struct iwmct_work_struct, list);
+
+	list_del(&read_req->list);
+	iosize = read_req->iosize;
+	kfree(read_req);
+
+	buf = kzalloc(iosize, GFP_KERNEL);
+	if (!buf) {
+		LOG_ERROR(priv, IRQ, "kzalloc error, buf size %d\n", iosize);
+		goto exit_release;
+	}
+
+	LOG_INFO(priv, IRQ, "iosize=%d, buf=%p, func=%d\n",
+				iosize, buf, priv->func->num);
+
+	/* read from device */
+	ret = sdio_memcpy_fromio(priv->func, buf, IWMC_SDIO_DATA_ADDR, iosize);
+	if (ret) {
+		LOG_ERROR(priv, IRQ, "error %d reading buffer\n", ret);
+		goto exit_release;
+	}
+
+	LOG_HEXDUMP(IRQ, (u8 *)buf, iosize);
+
+	barker = le32_to_cpu(buf[0]);
+
+	/* Verify whether it's a barker and if not - treat as regular Rx */
+	if (barker == IWMC_BARKER_ACK ||
+	    (barker & BARKER_DNLOAD_BARKER_MSK) == IWMC_BARKER_REBOOT) {
+
+		/* Valid Barker is equal on first 4 dwords */
+		is_barker = (buf[1] == buf[0]) &&
+			    (buf[2] == buf[0]) &&
+			    (buf[3] == buf[0]);
+
+		if (!is_barker) {
+			LOG_WARNING(priv, IRQ,
+				"Potentially inconsistent barker "
+				"%08X_%08X_%08X_%08X\n",
+				le32_to_cpu(buf[0]), le32_to_cpu(buf[1]),
+				le32_to_cpu(buf[2]), le32_to_cpu(buf[3]));
+		}
+	} else {
+		is_barker = false;
+	}
+
+	/* Handle Top CommHub message */
+	if (!is_barker) {
+		sdio_release_host(priv->func);
+		handle_top_message(priv, (u8 *)buf, iosize);
+		goto exit;
+	} else if (barker == IWMC_BARKER_ACK) { /* Handle barkers */
+		if (atomic_read(&priv->dev_sync) == 0) {
+			LOG_ERROR(priv, IRQ,
+				  "ACK barker arrived out-of-sync\n");
+			goto exit_release;
+		}
+
+		/* Continuing to FW download (after Sync is completed)*/
+		atomic_set(&priv->dev_sync, 0);
+		LOG_INFO(priv, IRQ, "ACK barker arrived "
+				"- starting FW download\n");
+	} else { /* REBOOT barker */
+		LOG_INFO(priv, IRQ, "Recieved reboot barker: %x\n", barker);
+		priv->barker = barker;
+
+		if (barker & BARKER_DNLOAD_SYNC_MSK) {
+			/* Send the same barker back */
+			ret = sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR,
+					       buf, iosize);
+			if (ret) {
+				LOG_ERROR(priv, IRQ,
+					 "error %d echoing barker\n", ret);
+				goto exit_release;
+			}
+			LOG_INFO(priv, IRQ, "Echoing barker to device\n");
+			atomic_set(&priv->dev_sync, 1);
+			goto exit_release;
+		}
+
+		/* Continuing to FW download (without Sync) */
+		LOG_INFO(priv, IRQ, "No sync requested "
+				    "- starting FW download\n");
+	}
+
+	sdio_release_host(priv->func);
+
+
+	LOG_INFO(priv, IRQ, "barker download request 0x%x is:\n", priv->barker);
+	LOG_INFO(priv, IRQ, "*******  Top FW %s requested ********\n",
+			(priv->barker & BARKER_DNLOAD_TOP_MSK) ? "was" : "not");
+	LOG_INFO(priv, IRQ, "*******  GPS FW %s requested ********\n",
+			(priv->barker & BARKER_DNLOAD_GPS_MSK) ? "was" : "not");
+	LOG_INFO(priv, IRQ, "*******  BT FW %s requested ********\n",
+			(priv->barker & BARKER_DNLOAD_BT_MSK) ? "was" : "not");
+
+	if (priv->dbg.fw_download)
+		iwmct_fw_load(priv);
+	else
+		LOG_ERROR(priv, IRQ, "FW download not allowed\n");
+
+	goto exit;
+
+exit_release:
+	sdio_release_host(priv->func);
+exit:
+	kfree(buf);
+	LOG_INFO(priv, IRQ, "exit iwmct_irq_read_worker\n");
+}
+
+static void iwmct_irq(struct sdio_func *func)
+{
+	struct iwmct_priv *priv;
+	int val, ret;
+	int iosize;
+	int addr = IWMC_SDIO_INTR_GET_SIZE_ADDR;
+	struct iwmct_work_struct *read_req;
+
+	priv = sdio_get_drvdata(func);
+
+	LOG_INFO(priv, IRQ, "enter iwmct_irq\n");
+
+	/* read the function's status register */
+	val = sdio_readb(func, IWMC_SDIO_INTR_STATUS_ADDR, &ret);
+
+	LOG_INFO(priv, IRQ, "iir value = %d, ret=%d\n", val, ret);
+
+	if (!val) {
+		LOG_ERROR(priv, IRQ, "iir = 0, exiting ISR\n");
+		goto exit_clear_intr;
+	}
+
+
+	/*
+	 * read 2 bytes of the transaction size
+	 * IMPORTANT: sdio transaction size has to be read before clearing
+	 * sdio interrupt!!!
+	 */
+	val = sdio_readb(priv->func, addr++, &ret);
+	iosize = val;
+	val = sdio_readb(priv->func, addr++, &ret);
+	iosize += val << 8;
+
+	LOG_INFO(priv, IRQ, "READ size %d\n", iosize);
+
+	if (iosize == 0) {
+		LOG_ERROR(priv, IRQ, "READ size %d, exiting ISR\n", iosize);
+		goto exit_clear_intr;
+	}
+
+	/* allocate a work structure to pass iosize to the worker */
+	read_req = kzalloc(sizeof(struct iwmct_work_struct), GFP_KERNEL);
+	if (!read_req) {
+		LOG_ERROR(priv, IRQ, "failed to allocate read_req, exit ISR\n");
+		goto exit_clear_intr;
+	}
+
+	INIT_LIST_HEAD(&read_req->list);
+	read_req->iosize = iosize;
+
+	list_add_tail(&priv->read_req_list, &read_req->list);
+
+	/* clear the function's interrupt request bit (write 1 to clear) */
+	sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret);
+
+	queue_work(priv->wq, &priv->isr_worker);
+
+	LOG_INFO(priv, IRQ, "exit iwmct_irq\n");
+
+	return;
+
+exit_clear_intr:
+	/* clear the function's interrupt request bit (write 1 to clear) */
+	sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret);
+}
+
+
+static int blocks;
+module_param(blocks, int, 0604);
+MODULE_PARM_DESC(blocks, "max_blocks_to_send");
+
+static int dump;
+module_param(dump, bool, 0604);
+MODULE_PARM_DESC(dump, "dump_hex_content");
+
+static int jump = 1;
+module_param(jump, bool, 0604);
+
+static int direct = 1;
+module_param(direct, bool, 0604);
+
+static int checksum = 1;
+module_param(checksum, bool, 0604);
+
+static int fw_download = 1;
+module_param(fw_download, bool, 0604);
+
+static int block_size = IWMC_SDIO_BLK_SIZE;
+module_param(block_size, int, 0404);
+
+static int download_trans_blks = IWMC_DEFAULT_TR_BLK;
+module_param(download_trans_blks, int, 0604);
+
+static int rubbish_barker;
+module_param(rubbish_barker, bool, 0604);
+
+#ifdef CONFIG_IWMC3200TOP_DEBUG
+static int log_level[LOG_SRC_MAX];
+static unsigned int log_level_argc;
+module_param_array(log_level, int, &log_level_argc, 0604);
+MODULE_PARM_DESC(log_level, "log_level");
+
+static int log_level_fw[FW_LOG_SRC_MAX];
+static unsigned int log_level_fw_argc;
+module_param_array(log_level_fw, int, &log_level_fw_argc, 0604);
+MODULE_PARM_DESC(log_level_fw, "log_level_fw");
+#endif
+
+void iwmct_dbg_init_params(struct iwmct_priv *priv)
+{
+#ifdef CONFIG_IWMC3200TOP_DEBUG
+	int i;
+
+	for (i = 0; i < log_level_argc; i++) {
+		dev_notice(&priv->func->dev, "log_level[%d]=0x%X\n",
+						i, log_level[i]);
+		iwmct_log_set_filter((log_level[i] >> 8) & 0xFF,
+			       log_level[i] & 0xFF);
+	}
+	for (i = 0; i < log_level_fw_argc; i++) {
+		dev_notice(&priv->func->dev, "log_level_fw[%d]=0x%X\n",
+						i, log_level_fw[i]);
+		iwmct_log_set_fw_filter((log_level_fw[i] >> 8) & 0xFF,
+				  log_level_fw[i] & 0xFF);
+	}
+#endif
+
+	priv->dbg.blocks = blocks;
+	LOG_INFO(priv, INIT, "blocks=%d\n", blocks);
+	priv->dbg.dump = (bool)dump;
+	LOG_INFO(priv, INIT, "dump=%d\n", dump);
+	priv->dbg.jump = (bool)jump;
+	LOG_INFO(priv, INIT, "jump=%d\n", jump);
+	priv->dbg.direct = (bool)direct;
+	LOG_INFO(priv, INIT, "direct=%d\n", direct);
+	priv->dbg.checksum = (bool)checksum;
+	LOG_INFO(priv, INIT, "checksum=%d\n", checksum);
+	priv->dbg.fw_download = (bool)fw_download;
+	LOG_INFO(priv, INIT, "fw_download=%d\n", fw_download);
+	priv->dbg.block_size = block_size;
+	LOG_INFO(priv, INIT, "block_size=%d\n", block_size);
+	priv->dbg.download_trans_blks = download_trans_blks;
+	LOG_INFO(priv, INIT, "download_trans_blks=%d\n", download_trans_blks);
+}
+
+/*****************************************************************************
+ *
+ * sysfs attributes
+ *
+ *****************************************************************************/
+static ssize_t show_iwmct_fw_version(struct device *d,
+				  struct device_attribute *attr, char *buf)
+{
+	struct iwmct_priv *priv = dev_get_drvdata(d);
+	return sprintf(buf, "%s\n", priv->dbg.label_fw);
+}
+static DEVICE_ATTR(cc_label_fw, S_IRUGO, show_iwmct_fw_version, NULL);
+
+#ifdef CONFIG_IWMC3200TOP_DEBUG
+static DEVICE_ATTR(log_level, S_IWUSR | S_IRUGO,
+		   show_iwmct_log_level, store_iwmct_log_level);
+static DEVICE_ATTR(log_level_fw, S_IWUSR | S_IRUGO,
+		   show_iwmct_log_level_fw, store_iwmct_log_level_fw);
+#endif
+
+static struct attribute *iwmct_sysfs_entries[] = {
+	&dev_attr_cc_label_fw.attr,
+#ifdef CONFIG_IWMC3200TOP_DEBUG
+	&dev_attr_log_level.attr,
+	&dev_attr_log_level_fw.attr,
+#endif
+	NULL
+};
+
+static struct attribute_group iwmct_attribute_group = {
+	.name = NULL,		/* put in device directory */
+	.attrs = iwmct_sysfs_entries,
+};
+
+
+static int iwmct_probe(struct sdio_func *func,
+			   const struct sdio_device_id *id)
+{
+	struct iwmct_priv *priv;
+	int ret;
+	int val = 1;
+	int addr = IWMC_SDIO_INTR_ENABLE_ADDR;
+
+	dev_dbg(&func->dev, "enter iwmct_probe\n");
+
+	dev_dbg(&func->dev, "IRQ polling period id %u msecs, HZ is %d\n",
+		jiffies_to_msecs(2147483647), HZ);
+
+	priv = kzalloc(sizeof(struct iwmct_priv), GFP_KERNEL);
+	if (!priv) {
+		dev_err(&func->dev, "kzalloc error\n");
+		return -ENOMEM;
+	}
+	priv->func = func;
+	sdio_set_drvdata(func, priv);
+
+
+	/* create drivers work queue */
+	priv->wq = create_workqueue(DRV_NAME "_wq");
+	priv->bus_rescan_wq = create_workqueue(DRV_NAME "_rescan_wq");
+	INIT_WORK(&priv->bus_rescan_worker, iwmct_rescan_worker);
+	INIT_WORK(&priv->isr_worker, iwmct_irq_read_worker);
+
+	init_waitqueue_head(&priv->wait_q);
+
+	sdio_claim_host(func);
+	/* FIXME: Remove after it is fixed in the Boot ROM upgrade */
+	func->enable_timeout = 10;
+
+	/* In our HW, setting the block size also wakes up the boot rom. */
+	ret = sdio_set_block_size(func, priv->dbg.block_size);
+	if (ret) {
+		LOG_ERROR(priv, INIT,
+			"sdio_set_block_size() failure: %d\n", ret);
+		goto error_sdio_enable;
+	}
+
+	ret = sdio_enable_func(func);
+	if (ret) {
+		LOG_ERROR(priv, INIT, "sdio_enable_func() failure: %d\n", ret);
+		goto error_sdio_enable;
+	}
+
+	/* init reset and dev_sync states */
+	atomic_set(&priv->reset, 0);
+	atomic_set(&priv->dev_sync, 0);
+
+	/* init read req queue */
+	INIT_LIST_HEAD(&priv->read_req_list);
+
+	/* process configurable parameters */
+	iwmct_dbg_init_params(priv);
+	ret = sysfs_create_group(&func->dev.kobj, &iwmct_attribute_group);
+	if (ret) {
+		LOG_ERROR(priv, INIT, "Failed to register attributes and "
+			 "initialize module_params\n");
+		goto error_dev_attrs;
+	}
+
+	iwmct_dbgfs_register(priv, DRV_NAME);
+
+	if (!priv->dbg.direct && priv->dbg.download_trans_blks > 8) {
+		LOG_INFO(priv, INIT,
+			 "Reducing transaction to 8 blocks = 2K (from %d)\n",
+			 priv->dbg.download_trans_blks);
+		priv->dbg.download_trans_blks = 8;
+	}
+	priv->trans_len = priv->dbg.download_trans_blks * priv->dbg.block_size;
+	LOG_INFO(priv, INIT, "Transaction length = %d\n", priv->trans_len);
+
+	ret = sdio_claim_irq(func, iwmct_irq);
+	if (ret) {
+		LOG_ERROR(priv, INIT, "sdio_claim_irq() failure: %d\n", ret);
+		goto error_claim_irq;
+	}
+
+
+	/* Enable function's interrupt */
+	sdio_writeb(priv->func, val, addr, &ret);
+	if (ret) {
+		LOG_ERROR(priv, INIT, "Failure writing to "
+			  "Interrupt Enable Register (%d): %d\n", addr, ret);
+		goto error_enable_int;
+	}
+
+	sdio_release_host(func);
+
+	LOG_INFO(priv, INIT, "exit iwmct_probe\n");
+
+	return ret;
+
+error_enable_int:
+	sdio_release_irq(func);
+error_claim_irq:
+	sdio_disable_func(func);
+error_dev_attrs:
+	iwmct_dbgfs_unregister(priv->dbgfs);
+	sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group);
+error_sdio_enable:
+	sdio_release_host(func);
+	return ret;
+}
+
+static void iwmct_remove(struct sdio_func *func)
+{
+	struct iwmct_work_struct *read_req;
+	struct iwmct_priv *priv = sdio_get_drvdata(func);
+
+	priv = sdio_get_drvdata(func);
+
+	LOG_INFO(priv, INIT, "enter\n");
+
+	sdio_claim_host(func);
+	sdio_release_irq(func);
+	sdio_release_host(func);
+
+	/* Safely destroy osc workqueue */
+	destroy_workqueue(priv->bus_rescan_wq);
+	destroy_workqueue(priv->wq);
+
+	sdio_claim_host(func);
+	sdio_disable_func(func);
+	sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group);
+	iwmct_dbgfs_unregister(priv->dbgfs);
+	sdio_release_host(func);
+
+	/* free read requests */
+	while (!list_empty(&priv->read_req_list)) {
+		read_req = list_entry(priv->read_req_list.next,
+			struct iwmct_work_struct, list);
+
+		list_del(&read_req->list);
+		kfree(read_req);
+	}
+
+	kfree(priv);
+}
+
+
+static const struct sdio_device_id iwmct_ids[] = {
+	/* Intel Wireless MultiCom 3200 Top Driver */
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1404)},
+	{ },	/* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(sdio, iwmct_ids);
+
+static struct sdio_driver iwmct_driver = {
+	.probe		= iwmct_probe,
+	.remove		= iwmct_remove,
+	.name		= DRV_NAME,
+	.id_table	= iwmct_ids,
+};
+
+static int __init iwmct_init(void)
+{
+	int rc;
+
+	/* Default log filter settings */
+	iwmct_log_set_filter(LOG_SRC_ALL, LOG_SEV_FILTER_RUNTIME);
+	iwmct_log_set_filter(LOG_SRC_FW_MSG, LOG_SEV_FILTER_ALL);
+	iwmct_log_set_fw_filter(LOG_SRC_ALL, FW_LOG_SEV_FILTER_RUNTIME);
+
+	rc = sdio_register_driver(&iwmct_driver);
+
+	return rc;
+}
+
+static void __exit iwmct_exit(void)
+{
+	sdio_unregister_driver(&iwmct_driver);
+}
+
+module_init(iwmct_init);
+module_exit(iwmct_exit);
+
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
index f60309175ef5..4d4cad393dce 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/3c501.c
@@ -249,11 +249,11 @@ static int __init el1_probe1(struct net_device *dev, int ioaddr)
 	 *	for the Sager NP943 prefix.
 	 */
 
-	if (station_addr[0] == 0x02  &&  station_addr[1] == 0x60
-						&& station_addr[2] == 0x8c)
+	if (station_addr[0] == 0x02 && station_addr[1] == 0x60 &&
+	    station_addr[2] == 0x8c)
 		mname = "3c501";
-	else if (station_addr[0] == 0x00  &&  station_addr[1] == 0x80
-						&& station_addr[2] == 0xC8)
+	else if (station_addr[0] == 0x00 && station_addr[1] == 0x80 &&
+		 station_addr[2] == 0xC8)
 		mname = "NP943";
 	else {
 		release_region(ioaddr, EL1_IO_EXTENT);
@@ -345,7 +345,7 @@ static int el_open(struct net_device *dev)
 	if (el_debug > 2)
 		pr_debug("%s: Doing el_open()...\n", dev->name);
 
-	retval = request_irq(dev->irq, &el_interrupt, 0, dev->name, dev);
+	retval = request_irq(dev->irq, el_interrupt, 0, dev->name, dev);
 	if (retval)
 		return retval;
 
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index c71e12d05f6e..66e0323c1839 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -214,8 +214,8 @@ el2_probe1(struct net_device *dev, int ioaddr)
     iobase_reg = inb(ioaddr+0x403);
     membase_reg = inb(ioaddr+0x404);
     /* ASIC location registers should be 0 or have only a single bit set. */
-    if (   (iobase_reg  & (iobase_reg - 1))
-	|| (membase_reg & (membase_reg - 1))) {
+    if ((iobase_reg  & (iobase_reg - 1)) ||
+	(membase_reg & (membase_reg - 1))) {
 	retval = -ENODEV;
 	goto out1;
     }
@@ -291,8 +291,8 @@ el2_probe1(struct net_device *dev, int ioaddr)
 	    writel(0xba5eba5e, mem_base);
 	    for (i = sizeof(test_val); i < EL2_MEMSIZE; i+=sizeof(test_val)) {
 		writel(test_val, mem_base + i);
-		if (readl(mem_base) != 0xba5eba5e
-		    || readl(mem_base + i) != test_val) {
+		if (readl(mem_base) != 0xba5eba5e ||
+		    readl(mem_base + i) != test_val) {
 		    pr_warning("3c503: memory failure or memory address conflict.\n");
 		    dev->mem_start = 0;
 		    ei_status.name = "3c503-PIO";
@@ -397,9 +397,10 @@ el2_open(struct net_device *dev)
 		unsigned long cookie = probe_irq_on();
 		outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR);
 		outb_p(0x00, E33G_IDCFR);
-		if (*irqp == probe_irq_off(cookie)	/* It's a good IRQ line! */
-		    && ((retval = request_irq(dev->irq = *irqp,
-		    eip_interrupt, 0, dev->name, dev)) == 0))
+		if (*irqp == probe_irq_off(cookie) &&	/* It's a good IRQ line! */
+		    ((retval = request_irq(dev->irq = *irqp,
+					   eip_interrupt, 0,
+					   dev->name, dev)) == 0))
 		    break;
 	    } else {
 		    if (retval != -EBUSY)
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index a21c9d15ef8a..9257d7ce0378 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -886,7 +886,7 @@ static int elp_open(struct net_device *dev)
 	/*
 	 * install our interrupt service routine
 	 */
-	if ((retval = request_irq(dev->irq, &elp_interrupt, 0, dev->name, dev))) {
+	if ((retval = request_irq(dev->irq, elp_interrupt, 0, dev->name, dev))) {
 		pr_err("%s: could not allocate IRQ%d\n", dev->name, dev->irq);
 		return retval;
 	}
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index a6dc8bcbc7df..fbc231153e55 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -399,7 +399,7 @@ static int __init el16_probe1(struct net_device *dev, int ioaddr)
 
 	irq = inb(ioaddr + IRQ_CONFIG) & 0x0f;
 
-	irqval = request_irq(irq, &el16_interrupt, 0, DRV_NAME, dev);
+	irqval = request_irq(irq, el16_interrupt, 0, DRV_NAME, dev);
 	if (irqval) {
 		pr_cont("\n");
 		pr_err("3c507: unable to get IRQ %d (irqval=%d).\n", irq, irqval);
@@ -836,8 +836,8 @@ static void el16_rx(struct net_device *dev)
 		void __iomem *data_frame = lp->base + data_buffer_addr;
 		ushort pkt_len = readw(data_frame);
 
-		if (rfd_cmd != 0 || data_buffer_addr != rx_head + 22
-			|| (pkt_len & 0xC000) != 0xC000) {
+		if (rfd_cmd != 0 || data_buffer_addr != rx_head + 22 ||
+		    (pkt_len & 0xC000) != 0xC000) {
 			pr_err("%s: Rx frame at %#x corrupted, "
 			       "status %04x cmd %04x next %04x "
 			       "data-buf @%04x %04x.\n",
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 3b00a4e927aa..9d85efce5916 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -253,9 +253,9 @@ static int el3_isa_id_sequence(__be16 *phys_addr)
 		   This check is needed in order not to register them twice. */
 		for (i = 0; i < el3_cards; i++) {
 			struct el3_private *lp = netdev_priv(el3_devs[i]);
-			if (lp->type == EL3_PNP
-			    && !memcmp(phys_addr, el3_devs[i]->dev_addr,
-				       ETH_ALEN)) {
+			if (lp->type == EL3_PNP &&
+			    !memcmp(phys_addr, el3_devs[i]->dev_addr,
+				    ETH_ALEN)) {
 				if (el3_debug > 3)
 					pr_debug("3c509 with address %02x %02x %02x %02x %02x %02x was found by ISAPnP\n",
 						phys_addr[0] & 0xff, phys_addr[0] >> 8,
@@ -780,7 +780,7 @@ el3_open(struct net_device *dev)
 	outw(RxReset, ioaddr + EL3_CMD);
 	outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
 
-	i = request_irq(dev->irq, &el3_interrupt, 0, dev->name, dev);
+	i = request_irq(dev->irq, el3_interrupt, 0, dev->name, dev);
 	if (i)
 		return i;
 
@@ -835,8 +835,8 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 #ifndef final_version
 	{	/* Error-checking code, delete someday. */
 		ushort status = inw(ioaddr + EL3_STATUS);
-		if (status & 0x0001 		/* IRQ line active, missed one. */
-			&& inw(ioaddr + EL3_STATUS) & 1) { 			/* Make sure. */
+		if (status & 0x0001 && 		/* IRQ line active, missed one. */
+		    inw(ioaddr + EL3_STATUS) & 1) { 			/* Make sure. */
 			pr_debug("%s: Missed interrupt, status then %04x now %04x"
 				   "  Tx %2.2x Rx %4.4x.\n", dev->name, status,
 				   inw(ioaddr + EL3_STATUS), inb(ioaddr + TX_STATUS),
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 4adcb950f5f1..063b049ffe55 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -764,13 +764,14 @@ static int corkscrew_open(struct net_device *dev)
 	/* Use the now-standard shared IRQ implementation. */
 	if (vp->capabilities == 0x11c7) {
 		/* Corkscrew: Cannot share ISA resources. */
-		if (dev->irq == 0
-		    || dev->dma == 0
-		    || request_irq(dev->irq, &corkscrew_interrupt, 0,
-				   vp->product_name, dev)) return -EAGAIN;
+		if (dev->irq == 0 ||
+		    dev->dma == 0 ||
+		    request_irq(dev->irq, corkscrew_interrupt, 0,
+				vp->product_name, dev))
+			return -EAGAIN;
 		enable_dma(dev->dma);
 		set_dma_mode(dev->dma, DMA_MODE_CASCADE);
-	} else if (request_irq(dev->irq, &corkscrew_interrupt, IRQF_SHARED,
+	} else if (request_irq(dev->irq, corkscrew_interrupt, IRQF_SHARED,
 			       vp->product_name, dev)) {
 		return -EAGAIN;
 	}
@@ -1368,8 +1369,8 @@ static int boomerang_rx(struct net_device *dev)
 
 			/* Check if the packet is long enough to just accept without
 			   copying to a properly sized skbuff. */
-			if (pkt_len < rx_copybreak
-			    && (skb = dev_alloc_skb(pkt_len + 4)) != NULL) {
+			if (pkt_len < rx_copybreak &&
+			    (skb = dev_alloc_skb(pkt_len + 4)) != NULL) {
 				skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
 				/* 'skb_put()' points to the start of sk_buff data area. */
 				memcpy(skb_put(skb, pkt_len),
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index cb0b730799ba..27d80ca5e4c0 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -288,7 +288,7 @@ static int elmc_open(struct net_device *dev)
 
 	elmc_id_attn586();	/* disable interrupts */
 
-	ret = request_irq(dev->irq, &elmc_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM,
+	ret = request_irq(dev->irq, elmc_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM,
 			  dev->name, dev);
 	if (ret) {
 		pr_err("%s: couldn't get irq %d\n", dev->name, dev->irq);
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 6021e6dded8f..36c4191e7bca 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -443,7 +443,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
 	 *	Grab the IRQ
 	 */
 
-	err = request_irq(dev->irq, &mc32_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM, DRV_NAME, dev);
+	err = request_irq(dev->irq, mc32_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM, DRV_NAME, dev);
 	if (err) {
 		release_region(dev->base_addr, MC32_IO_EXTENT);
 		pr_err("%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq);
@@ -1168,8 +1168,8 @@ static void mc32_rx_ring(struct net_device *dev)
 
 			/* Try to save time by avoiding a copy on big frames */
 
-			if ((length > RX_COPYBREAK)
-			    && ((newskb=dev_alloc_skb(1532)) != NULL))
+			if ((length > RX_COPYBREAK) &&
+			    ((newskb=dev_alloc_skb(1532)) != NULL))
 			{
 				skb=lp->rx_ring[rx_ring_tail].skb;
 				skb_put(skb, length);
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 975e25b19ebe..78b7167a8ce3 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -1942,8 +1942,8 @@ vortex_error(struct net_device *dev, int status)
 	if (status & TxComplete) {			/* Really "TxError" for us. */
 		tx_status = ioread8(ioaddr + TxStatus);
 		/* Presumably a tx-timeout. We must merely re-enable. */
-		if (vortex_debug > 2
-			|| (tx_status != 0x88 && vortex_debug > 0)) {
+		if (vortex_debug > 2 ||
+		    (tx_status != 0x88 && vortex_debug > 0)) {
 			pr_err("%s: Transmit error, Tx status register %2.2x.\n",
 				   dev->name, tx_status);
 			if (tx_status == 0x82) {
@@ -2560,7 +2560,7 @@ boomerang_rx(struct net_device *dev)
 		struct sk_buff *skb;
 		entry = vp->dirty_rx % RX_RING_SIZE;
 		if (vp->rx_skbuff[entry] == NULL) {
-			skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
+			skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
 			if (skb == NULL) {
 				static unsigned long last_jif;
 				if (time_after(jiffies, last_jif + 10 * HZ)) {
@@ -2572,7 +2572,6 @@ boomerang_rx(struct net_device *dev)
 				break;			/* Bad news!  */
 			}
 
-			skb_reserve(skb, NET_IP_ALIGN);
 			vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
 			vp->rx_skbuff[entry] = skb;
 		}
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 83a1922e68e0..3f452bcbfb9e 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -549,14 +549,12 @@ rx_status_loop:
 			pr_debug("%s: rx slot %d status 0x%x len %d\n",
 			       dev->name, rx_tail, status, len);
 
-		new_skb = netdev_alloc_skb(dev, buflen + NET_IP_ALIGN);
+		new_skb = netdev_alloc_skb_ip_align(dev, buflen);
 		if (!new_skb) {
 			dev->stats.rx_dropped++;
 			goto rx_next;
 		}
 
-		skb_reserve(new_skb, NET_IP_ALIGN);
-
 		dma_unmap_single(&cp->pdev->dev, mapping,
 				 buflen, PCI_DMA_FROMDEVICE);
 
@@ -911,8 +909,8 @@ static void __cp_set_rx_mode (struct net_device *dev)
 		    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
 		    AcceptAllPhys;
 		mc_filter[1] = mc_filter[0] = 0xffffffff;
-	} else if ((dev->mc_count > multicast_filter_limit)
-		   || (dev->flags & IFF_ALLMULTI)) {
+	} else if ((dev->mc_count > multicast_filter_limit) ||
+		   (dev->flags & IFF_ALLMULTI)) {
 		/* Too many to filter perfectly -- accept all multicasts. */
 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
 		mc_filter[1] = mc_filter[0] = 0xffffffff;
@@ -1057,12 +1055,10 @@ static int cp_refill_rx(struct cp_private *cp)
 		struct sk_buff *skb;
 		dma_addr_t mapping;
 
-		skb = netdev_alloc_skb(dev, cp->rx_buf_sz + NET_IP_ALIGN);
+		skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
 		if (!skb)
 			goto err_out;
 
-		skb_reserve(skb, NET_IP_ALIGN);
-
 		mapping = dma_map_single(&cp->pdev->dev, skb->data,
 					 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
 		cp->rx_skb[i] = skb;
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 4a3628755026..25f7339daabd 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -1549,8 +1549,8 @@ static inline void rtl8139_thread_iter (struct net_device *dev,
 	mii_lpa = mdio_read (dev, tp->phys[0], MII_LPA);
 
 	if (!tp->mii.force_media && mii_lpa != 0xffff) {
-		int duplex = (mii_lpa & LPA_100FULL)
-		    || (mii_lpa & 0x01C0) == 0x0040;
+		int duplex = ((mii_lpa & LPA_100FULL) ||
+			      (mii_lpa & 0x01C0) == 0x0040);
 		if (tp->mii.full_duplex != duplex) {
 			tp->mii.full_duplex = duplex;
 
@@ -1936,8 +1936,8 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
 		 RTL_R16 (RxBufAddr),
 		 RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd));
 
-	while (netif_running(dev) && received < budget
-	       && (RTL_R8 (ChipCmd) & RxBufEmpty) == 0) {
+	while (netif_running(dev) && received < budget &&
+	       (RTL_R8 (ChipCmd) & RxBufEmpty) == 0) {
 		u32 ring_offset = cur_rx % RX_BUF_LEN;
 		u32 rx_status;
 		unsigned int pkt_size;
@@ -2004,9 +2004,8 @@ no_early_rx:
 		/* Malloc up new buffer, compatible with net-2e. */
 		/* Omit the four octet CRC from the length. */
 
-		skb = netdev_alloc_skb(dev, pkt_size + NET_IP_ALIGN);
+		skb = netdev_alloc_skb_ip_align(dev, pkt_size);
 		if (likely(skb)) {
-			skb_reserve (skb, NET_IP_ALIGN);	/* 16 byte align the IP fields. */
 #if RX_BUF_IDX == 3
 			wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
 #else
@@ -2522,8 +2521,8 @@ static void __set_rx_mode (struct net_device *dev)
 		    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
 		    AcceptAllPhys;
 		mc_filter[1] = mc_filter[0] = 0xffffffff;
-	} else if ((dev->mc_count > multicast_filter_limit)
-		   || (dev->flags & IFF_ALLMULTI)) {
+	} else if ((dev->mc_count > multicast_filter_limit) ||
+		   (dev->flags & IFF_ALLMULTI)) {
 		/* Too many to filter perfectly -- accept all multicasts. */
 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
 		mc_filter[1] = mc_filter[0] = 0xffffffff;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index b2f71f79baaf..0bbd5ae49862 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1001,7 +1001,7 @@ config SMC911X
 
 config SMSC911X
 	tristate "SMSC LAN911x/LAN921x families embedded ethernet support"
-	depends on ARM || SUPERH || BLACKFIN
+	depends on ARM || SUPERH || BLACKFIN || MIPS
 	select CRC32
 	select MII
 	select PHYLIB
@@ -3235,7 +3235,7 @@ config VIRTIO_NET
 
 config VMXNET3
        tristate "VMware VMXNET3 ethernet driver"
-       depends on PCI && X86 && INET
+       depends on PCI && INET
        help
          This driver supports VMware's vmxnet3 virtual ethernet NIC.
          To compile this driver as a module, choose M here: the
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index b5dc7f550725..50cecf417471 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -328,7 +328,7 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr)
 
 	/* Reserve any actual interrupt. */
 	if (dev->irq) {
-		retval = request_irq(dev->irq, &cops_interrupt, 0, dev->name, dev);
+		retval = request_irq(dev->irq, cops_interrupt, 0, dev->name, dev);
 		if (retval)
 			goto err_out;
 	}
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index aaf14d306a4a..eb0448b03f41 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -230,9 +230,9 @@ static int ipddp_delete(struct ipddp_route *rt)
 	spin_lock_bh(&ipddp_route_lock);
         while((tmp = *r) != NULL)
         {
-                if(tmp->ip == rt->ip
-                        && tmp->at.s_net == rt->at.s_net
-                        && tmp->at.s_node == rt->at.s_node)
+                if(tmp->ip == rt->ip &&
+		   tmp->at.s_net == rt->at.s_net &&
+		   tmp->at.s_node == rt->at.s_node)
                 {
                         *r = tmp->next;
 			spin_unlock_bh(&ipddp_route_lock);
@@ -255,9 +255,9 @@ static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt)
 
         for(f = ipddp_route_list; f != NULL; f = f->next)
         {
-                if(f->ip == rt->ip
-                        && f->at.s_net == rt->at.s_net
-                        && f->at.s_node == rt->at.s_node)
+                if(f->ip == rt->ip &&
+		   f->at.s_net == rt->at.s_net &&
+		   f->at.s_node == rt->at.s_node)
                         return (f);
         }
 
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index 08760baece7a..dbfbd3b7ff86 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -1158,7 +1158,7 @@ struct net_device * __init ltpc_probe(void)
 	}
 
 	/* grab it and don't let go :-) */
-	if (irq && request_irq( irq, &ltpc_interrupt, 0, "ltpc", dev) >= 0)
+	if (irq && request_irq( irq, ltpc_interrupt, 0, "ltpc", dev) >= 0)
 	{
 		(void) inb_p(io+7);  /* enable interrupts from board */
 		(void) inb_p(io+7);  /* and reset irq line */
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index e3082a9350fc..e6afab2455b1 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -156,7 +156,7 @@ static int __init arcrimi_found(struct net_device *dev)
 	}
 
 	/* reserve the irq */
-	if (request_irq(dev->irq, &arcnet_interrupt, 0, "arcnet (RIM I)", dev)) {
+	if (request_irq(dev->irq, arcnet_interrupt, 0, "arcnet (RIM I)", dev)) {
 		iounmap(p);
 		release_mem_region(dev->mem_start, MIRROR_SIZE);
 		BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq);
@@ -174,9 +174,9 @@ static int __init arcrimi_found(struct net_device *dev)
 	 * 2k (or there are no mirrors at all) but on some, it's 4k.
 	 */
 	mirror_size = MIRROR_SIZE;
-	if (readb(p) == TESTvalue
-	    && check_mirror(shmem - MIRROR_SIZE, MIRROR_SIZE) == 0
-	    && check_mirror(shmem - 2 * MIRROR_SIZE, MIRROR_SIZE) == 1)
+	if (readb(p) == TESTvalue &&
+	    check_mirror(shmem - MIRROR_SIZE, MIRROR_SIZE) == 0 &&
+	    check_mirror(shmem - 2 * MIRROR_SIZE, MIRROR_SIZE) == 1)
 		mirror_size = 2 * MIRROR_SIZE;
 
 	first_mirror = shmem - mirror_size;
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 75a572560909..d8f029303754 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -301,8 +301,8 @@ static int choose_mtu(void)
 
 	/* choose the smallest MTU of all available encaps */
 	for (count = 0; count < 256; count++) {
-		if (arc_proto_map[count] != &arc_proto_null
-		    && arc_proto_map[count]->mtu < mtu) {
+		if (arc_proto_map[count] != &arc_proto_null &&
+		    arc_proto_map[count]->mtu < mtu) {
 			mtu = arc_proto_map[count]->mtu;
 		}
 	}
@@ -953,13 +953,13 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
 				 *    > RECON_THRESHOLD/min;
 				 * then print a warning message.
 				 */
-				if (!lp->network_down
-				    && (lp->last_recon - lp->first_recon) <= HZ * 60
-				  && lp->num_recons >= RECON_THRESHOLD) {
+				if (!lp->network_down &&
+				    (lp->last_recon - lp->first_recon) <= HZ * 60 &&
+				    lp->num_recons >= RECON_THRESHOLD) {
 					lp->network_down = 1;
 					BUGMSG(D_NORMAL, "many reconfigurations detected: cabling problem?\n");
-				} else if (!lp->network_down
-					   && lp->last_recon - lp->first_recon > HZ * 60) {
+				} else if (!lp->network_down &&
+					   lp->last_recon - lp->first_recon > HZ * 60) {
 					/* reset counters if we've gone for over a minute. */
 					lp->first_recon = lp->last_recon;
 					lp->num_recons = 1;
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index 651275a5f3d2..0a74f21409c5 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -200,7 +200,7 @@ int com20020_found(struct net_device *dev, int shared)
 	outb(dev->dev_addr[0], _XREG);
 
 	/* reserve the irq */
-	if (request_irq(dev->irq, &arcnet_interrupt, shared,
+	if (request_irq(dev->irq, arcnet_interrupt, shared,
 			"arcnet (COM20020)", dev)) {
 		BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq);
 		return -ENODEV;
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index 89de29b3b1dc..28dea518d554 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -238,7 +238,7 @@ static int __init com90io_found(struct net_device *dev)
 	int err;
 
 	/* Reserve the irq */
-	if (request_irq(dev->irq, &arcnet_interrupt, 0, "arcnet (COM90xx-IO)", dev)) {
+	if (request_irq(dev->irq, arcnet_interrupt, 0, "arcnet (COM90xx-IO)", dev)) {
 		BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq);
 		return -ENODEV;
 	}
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index d762fe46251e..112e230cb13d 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -501,7 +501,7 @@ static int __init com90xx_found(int ioaddr, int airq, u_long shmem, void __iomem
 		goto err_free_dev;
 
 	/* reserve the irq */
-	if (request_irq(airq, &arcnet_interrupt, 0, "arcnet (90xx)", dev)) {
+	if (request_irq(airq, arcnet_interrupt, 0, "arcnet (90xx)", dev)) {
 		BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", airq);
 		goto err_release_mem;
 	}
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 2a7b7745cc55..be256b34cea8 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -35,11 +35,13 @@
 
 #include <mach/regs-switch.h>
 #include <mach/regs-misc.h>
+#include <asm/mach/irq.h>
+#include <mach/regs-irq.h>
 
 #include "ks8695net.h"
 
 #define MODULENAME	"ks8695_ether"
-#define MODULEVERSION	"1.01"
+#define MODULEVERSION	"1.02"
 
 /*
  * Transmit and device reset timeout, default 5 seconds.
@@ -95,6 +97,9 @@ struct ks8695_skbuff {
 #define MAX_RX_DESC 16
 #define MAX_RX_DESC_MASK 0xf
 
+/*napi_weight have better more than rx DMA buffers*/
+#define NAPI_WEIGHT   64
+
 #define MAX_RXBUF_SIZE 0x700
 
 #define TX_RING_DMA_SIZE (sizeof(struct tx_ring_desc) * MAX_TX_DESC)
@@ -120,6 +125,7 @@ enum ks8695_dtype {
  *	@dev: The platform device object for this interface
  *	@dtype: The type of this device
  *	@io_regs: The ioremapped registers for this interface
+ *      @napi : Add support NAPI for Rx
  *	@rx_irq_name: The textual name of the RX IRQ from the platform data
  *	@tx_irq_name: The textual name of the TX IRQ from the platform data
  *	@link_irq_name: The textual name of the link IRQ from the
@@ -143,6 +149,7 @@ enum ks8695_dtype {
  *	@rx_ring_dma: The DMA mapped equivalent of rx_ring
  *	@rx_buffers: The sk_buff mappings for the RX ring
  *	@next_rx_desc_read: The next RX descriptor to read from on IRQ
+ *      @rx_lock: A lock to protect Rx irq function
  *	@msg_enable: The flags for which messages to emit
  */
 struct ks8695_priv {
@@ -152,6 +159,8 @@ struct ks8695_priv {
 	enum ks8695_dtype dtype;
 	void __iomem *io_regs;
 
+	struct napi_struct	napi;
+
 	const char *rx_irq_name, *tx_irq_name, *link_irq_name;
 	int rx_irq, tx_irq, link_irq;
 
@@ -172,6 +181,7 @@ struct ks8695_priv {
 	dma_addr_t rx_ring_dma;
 	struct ks8695_skbuff rx_buffers[MAX_RX_DESC];
 	int next_rx_desc_read;
+	spinlock_t rx_lock;
 
 	int msg_enable;
 };
@@ -392,29 +402,74 @@ ks8695_tx_irq(int irq, void *dev_id)
 }
 
 /**
+ *	ks8695_get_rx_enable_bit - Get rx interrupt enable/status bit
+ *	@ksp: Private data for the KS8695 Ethernet
+ *
+ *    For KS8695 document:
+ *    Interrupt Enable Register (offset 0xE204)
+ *        Bit29 : WAN MAC Receive Interrupt Enable
+ *        Bit16 : LAN MAC Receive Interrupt Enable
+ *    Interrupt Status Register (Offset 0xF208)
+ *        Bit29: WAN MAC Receive Status
+ *        Bit16: LAN MAC Receive Status
+ *    So, this Rx interrrupt enable/status bit number is equal
+ *    as Rx IRQ number.
+ */
+static inline u32 ks8695_get_rx_enable_bit(struct ks8695_priv *ksp)
+{
+	return ksp->rx_irq;
+}
+
+/**
  *	ks8695_rx_irq - Receive IRQ handler
  *	@irq: The IRQ which went off (ignored)
  *	@dev_id: The net_device for the interrupt
  *
- *	Process the RX ring, passing any received packets up to the
- *	host.  If we received anything other than errors, we then
- *	refill the ring.
+ *	Inform NAPI that packet reception needs to be scheduled
  */
+
 static irqreturn_t
 ks8695_rx_irq(int irq, void *dev_id)
 {
 	struct net_device *ndev = (struct net_device *)dev_id;
 	struct ks8695_priv *ksp = netdev_priv(ndev);
+
+	spin_lock(&ksp->rx_lock);
+
+	if (napi_schedule_prep(&ksp->napi)) {
+		unsigned long status = readl(KS8695_IRQ_VA + KS8695_INTEN);
+		unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
+		/*disable rx interrupt*/
+		status &= ~mask_bit;
+		writel(status , KS8695_IRQ_VA + KS8695_INTEN);
+		__napi_schedule(&ksp->napi);
+	}
+
+	spin_unlock(&ksp->rx_lock);
+	return IRQ_HANDLED;
+}
+
+/**
+ *	ks8695_rx - Receive packets  called by NAPI poll method
+ *	@ksp: Private data for the KS8695 Ethernet
+ *	@budget: The max packets would be receive
+ */
+
+static int ks8695_rx(struct ks8695_priv *ksp, int budget)
+{
+	struct net_device *ndev = ksp->ndev;
 	struct sk_buff *skb;
 	int buff_n;
 	u32 flags;
 	int pktlen;
 	int last_rx_processed = -1;
+	int received = 0;
 
 	buff_n = ksp->next_rx_desc_read;
-	do {
-		if (ksp->rx_buffers[buff_n].skb &&
-		    !(ksp->rx_ring[buff_n].status & cpu_to_le32(RDES_OWN))) {
+	while (received < budget
+			&& ksp->rx_buffers[buff_n].skb
+			&& (!(ksp->rx_ring[buff_n].status &
+					cpu_to_le32(RDES_OWN)))) {
 			rmb();
 			flags = le32_to_cpu(ksp->rx_ring[buff_n].status);
 			/* Found an SKB which we own, this means we
@@ -464,7 +519,7 @@ ks8695_rx_irq(int irq, void *dev_id)
 			/* Relinquish the SKB to the network layer */
 			skb_put(skb, pktlen);
 			skb->protocol = eth_type_trans(skb, ndev);
-			netif_rx(skb);
+			netif_receive_skb(skb);
 
 			/* Record stats */
 			ndev->stats.rx_packets++;
@@ -478,29 +533,55 @@ rx_failure:
 			/* Give the ring entry back to the hardware */
 			ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
 rx_finished:
+			received++;
 			/* And note this as processed so we can start
 			 * from here next time
 			 */
 			last_rx_processed = buff_n;
-		} else {
-			/* Ran out of things to process, stop now */
-			break;
-		}
-		buff_n = (buff_n + 1) & MAX_RX_DESC_MASK;
-	} while (buff_n != ksp->next_rx_desc_read);
-
-	/* And note which RX descriptor we last did anything with */
-	if (likely(last_rx_processed != -1))
-		ksp->next_rx_desc_read =
-			(last_rx_processed + 1) & MAX_RX_DESC_MASK;
-
+			buff_n = (buff_n + 1) & MAX_RX_DESC_MASK;
+			/*And note which RX descriptor we last did */
+			if (likely(last_rx_processed != -1))
+				ksp->next_rx_desc_read =
+					(last_rx_processed + 1) &
+					MAX_RX_DESC_MASK;
+	}
 	/* And refill the buffers */
 	ks8695_refill_rxbuffers(ksp);
 
-	/* Kick the RX DMA engine, in case it became suspended */
+	/* Kick the RX DMA engine, in case it became
+	 *  suspended */
 	ks8695_writereg(ksp, KS8695_DRSC, 0);
+	return received;
+}
 
-	return IRQ_HANDLED;
+
+/**
+ *	ks8695_poll - Receive packet by NAPI poll method
+ *	@ksp: Private data for the KS8695 Ethernet
+ *	@budget: The remaining number packets for network subsystem
+ *
+ *     Invoked by the network core when it requests for new
+ *     packets from the driver
+ */
+static int ks8695_poll(struct napi_struct *napi, int budget)
+{
+	struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi);
+	unsigned long  work_done;
+
+	unsigned long isr = readl(KS8695_IRQ_VA + KS8695_INTEN);
+	unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
+
+	work_done = ks8695_rx(ksp, budget);
+
+	if (work_done < budget) {
+		unsigned long flags;
+		spin_lock_irqsave(&ksp->rx_lock, flags);
+		/*enable rx interrupt*/
+		writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN);
+		__napi_complete(napi);
+		spin_unlock_irqrestore(&ksp->rx_lock, flags);
+	}
+	return work_done;
 }
 
 /**
@@ -1253,6 +1334,7 @@ ks8695_stop(struct net_device *ndev)
 	struct ks8695_priv *ksp = netdev_priv(ndev);
 
 	netif_stop_queue(ndev);
+	napi_disable(&ksp->napi);
 	netif_carrier_off(ndev);
 
 	ks8695_shutdown(ksp);
@@ -1287,6 +1369,7 @@ ks8695_open(struct net_device *ndev)
 		return ret;
 	}
 
+	napi_enable(&ksp->napi);
 	netif_start_queue(ndev);
 
 	return 0;
@@ -1472,6 +1555,8 @@ ks8695_probe(struct platform_device *pdev)
 	SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
 	ndev->watchdog_timeo	 = msecs_to_jiffies(watchdog);
 
+	netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT);
+
 	/* Retrieve the default MAC addr from the chip. */
 	/* The bootloader should have left it in there for us. */
 
@@ -1505,6 +1590,7 @@ ks8695_probe(struct platform_device *pdev)
 
 	/* And initialise the queue's lock */
 	spin_lock_init(&ksp->txq_lock);
+	spin_lock_init(&ksp->rx_lock);
 
 	/* Specify the RX DMA ring buffer */
 	ksp->rx_ring = ksp->ring_base + TX_RING_DMA_SIZE;
@@ -1626,6 +1712,7 @@ ks8695_drv_remove(struct platform_device *pdev)
 	struct ks8695_priv *ksp = netdev_priv(ndev);
 
 	platform_set_drvdata(pdev, NULL);
+	netif_napi_del(&ksp->napi);
 
 	unregister_netdev(ndev);
 	ks8695_release_device(ksp);
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index 25e2627eb118..b7f3866d546f 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -160,8 +160,8 @@ struct  w90p910_ether {
 	struct mii_if_info mii;
 	struct timer_list check_timer;
 	void __iomem *reg;
-	unsigned int rxirq;
-	unsigned int txirq;
+	int rxirq;
+	int txirq;
 	unsigned int cur_tx;
 	unsigned int cur_rx;
 	unsigned int finish_tx;
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index 544d5af6950e..b14f4799d5d1 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -350,13 +350,13 @@ static int __init at1700_probe1(struct net_device *dev, int ioaddr)
 	slot = -1;
 	/* We must check for the EEPROM-config boards first, else accessing
 	   IOCONFIG0 will move the board! */
-	if (at1700_probe_list[inb(ioaddr + IOCONFIG1) & 0x07] == ioaddr
-		&& read_eeprom(ioaddr, 4) == 0x0000
-		&& (read_eeprom(ioaddr, 5) & 0xff00) == 0xF400)
+	if (at1700_probe_list[inb(ioaddr + IOCONFIG1) & 0x07] == ioaddr &&
+	    read_eeprom(ioaddr, 4) == 0x0000 &&
+	    (read_eeprom(ioaddr, 5) & 0xff00) == 0xF400)
 		is_at1700 = 1;
-	else if (inb(ioaddr   + SAPROM    ) == 0x00
-		&& inb(ioaddr + SAPROM + 1) == 0x00
-		&& inb(ioaddr + SAPROM + 2) == 0x0e)
+	else if (inb(ioaddr + SAPROM    ) == 0x00 &&
+		 inb(ioaddr + SAPROM + 1) == 0x00 &&
+		 inb(ioaddr + SAPROM + 2) == 0x0e)
 		is_fmv18x = 1;
 	else {
 		goto err_out;
@@ -468,7 +468,7 @@ found:
 	lp->jumpered = is_fmv18x;
 	lp->mca_slot = slot;
 	/* Snarf the interrupt vector now. */
-	ret = request_irq(irq, &net_interrupt, 0, DRV_NAME, dev);
+	ret = request_irq(irq, net_interrupt, 0, DRV_NAME, dev);
 	if (ret) {
 		printk(KERN_ERR "AT1700 at %#3x is unusable due to a "
 		       "conflict on IRQ %d.\n",
@@ -839,8 +839,8 @@ set_rx_mode(struct net_device *dev)
 	if (dev->flags & IFF_PROMISC) {
 		memset(mc_filter, 0xff, sizeof(mc_filter));
 		outb(3, ioaddr + RX_MODE);	/* Enable promiscuous mode */
-	} else if (dev->mc_count > MC_FILTERBREAK
-			   ||  (dev->flags & IFF_ALLMULTI)) {
+	} else if (dev->mc_count > MC_FILTERBREAK ||
+			   (dev->flags & IFF_ALLMULTI)) {
 		/* Too many to filter perfectly -- accept all multicasts. */
 		memset(mc_filter, 0xff, sizeof(mc_filter));
 		outb(2, ioaddr + RX_MODE);	/* Use normal mode. */
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index 0c0deceb6938..c5721cb38265 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -930,8 +930,8 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id )
 			}
 #endif
 
-			if (lp->tx_full && (netif_queue_stopped(dev))
-				&& dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
+			if (lp->tx_full && (netif_queue_stopped(dev)) &&
+				dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
 				/* The ring is no longer full, clear tbusy. */
 				lp->tx_full = 0;
 				netif_wake_queue (dev);
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index 2a1120ad2e74..a348a22551d9 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -470,12 +470,28 @@ struct atl1c_ring_header {
 struct atl1c_buffer {
 	struct sk_buff *skb;	/* socket buffer */
 	u16 length;		/* rx buffer length */
-	u16 state;		/* state of buffer */
-#define ATL1_BUFFER_FREE	0
-#define ATL1_BUFFER_BUSY	1
+	u16 flags;		/* information of buffer */
+#define ATL1C_BUFFER_FREE		0x0001
+#define ATL1C_BUFFER_BUSY		0x0002
+#define ATL1C_BUFFER_STATE_MASK		0x0003
+
+#define ATL1C_PCIMAP_SINGLE		0x0004
+#define ATL1C_PCIMAP_PAGE		0x0008
+#define ATL1C_PCIMAP_TYPE_MASK		0x000C
+
 	dma_addr_t dma;
 };
 
+#define ATL1C_SET_BUFFER_STATE(buff, state) do {	\
+	((buff)->flags) &= ~ATL1C_BUFFER_STATE_MASK;	\
+	((buff)->flags) |= (state);			\
+	} while (0)
+
+#define ATL1C_SET_PCIMAP_TYPE(buff, type) do {		\
+	((buff)->flags) &= ~ATL1C_PCIMAP_TYPE_MASK;	\
+	((buff)->flags) |= (type);			\
+	} while (0)
+
 /* transimit packet descriptor (tpd) ring */
 struct atl1c_tpd_ring {
 	void *desc;		/* descriptor ring virtual address */
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 1372e9a99f5b..1e2f57d4c367 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -710,6 +710,29 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
 	return 0;
 }
 
+static inline void atl1c_clean_buffer(struct pci_dev *pdev,
+				struct atl1c_buffer *buffer_info, int in_irq)
+{
+	if (buffer_info->flags & ATL1C_BUFFER_FREE)
+		return;
+	if (buffer_info->dma) {
+		if (buffer_info->flags & ATL1C_PCIMAP_SINGLE)
+			pci_unmap_single(pdev, buffer_info->dma,
+					buffer_info->length, PCI_DMA_TODEVICE);
+		else if (buffer_info->flags & ATL1C_PCIMAP_PAGE)
+			pci_unmap_page(pdev, buffer_info->dma,
+					buffer_info->length, PCI_DMA_TODEVICE);
+	}
+	if (buffer_info->skb) {
+		if (in_irq)
+			dev_kfree_skb_irq(buffer_info->skb);
+		else
+			dev_kfree_skb(buffer_info->skb);
+	}
+	buffer_info->dma = 0;
+	buffer_info->skb = NULL;
+	ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
+}
 /*
  * atl1c_clean_tx_ring - Free Tx-skb
  * @adapter: board private structure
@@ -725,22 +748,12 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
 	ring_count = tpd_ring->count;
 	for (index = 0; index < ring_count; index++) {
 		buffer_info = &tpd_ring->buffer_info[index];
-		if (buffer_info->state == ATL1_BUFFER_FREE)
-			continue;
-		if (buffer_info->dma)
-			pci_unmap_single(pdev, buffer_info->dma,
-					buffer_info->length,
-					PCI_DMA_TODEVICE);
-		if (buffer_info->skb)
-			dev_kfree_skb(buffer_info->skb);
-		buffer_info->dma = 0;
-		buffer_info->skb = NULL;
-		buffer_info->state = ATL1_BUFFER_FREE;
+		atl1c_clean_buffer(pdev, buffer_info, 0);
 	}
 
 	/* Zero out Tx-buffers */
 	memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
-				ring_count);
+		ring_count);
 	atomic_set(&tpd_ring->next_to_clean, 0);
 	tpd_ring->next_to_use = 0;
 }
@@ -760,16 +773,7 @@ static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter)
 	for (i = 0; i < adapter->num_rx_queues; i++) {
 		for (j = 0; j < rfd_ring[i].count; j++) {
 			buffer_info = &rfd_ring[i].buffer_info[j];
-			if (buffer_info->state == ATL1_BUFFER_FREE)
-				continue;
-			if (buffer_info->dma)
-				pci_unmap_single(pdev, buffer_info->dma,
-						buffer_info->length,
-						PCI_DMA_FROMDEVICE);
-			if (buffer_info->skb)
-				dev_kfree_skb(buffer_info->skb);
-			buffer_info->state = ATL1_BUFFER_FREE;
-			buffer_info->skb = NULL;
+			atl1c_clean_buffer(pdev, buffer_info, 0);
 		}
 		/* zero out the descriptor ring */
 		memset(rfd_ring[i].desc, 0, rfd_ring[i].size);
@@ -796,7 +800,8 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
 		atomic_set(&tpd_ring[i].next_to_clean, 0);
 		buffer_info = tpd_ring[i].buffer_info;
 		for (j = 0; j < tpd_ring->count; j++)
-			buffer_info[i].state = ATL1_BUFFER_FREE;
+			ATL1C_SET_BUFFER_STATE(&buffer_info[i],
+					ATL1C_BUFFER_FREE);
 	}
 	for (i = 0; i < adapter->num_rx_queues; i++) {
 		rfd_ring[i].next_to_use = 0;
@@ -805,7 +810,7 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
 		rrd_ring[i].next_to_clean = 0;
 		for (j = 0; j < rfd_ring[i].count; j++) {
 			buffer_info = &rfd_ring[i].buffer_info[j];
-			buffer_info->state = ATL1_BUFFER_FREE;
+			ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
 		}
 	}
 }
@@ -1447,6 +1452,7 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
 	struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *)
 				&adapter->tpd_ring[type];
 	struct atl1c_buffer *buffer_info;
+	struct pci_dev *pdev = adapter->pdev;
 	u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
 	u16 hw_next_to_clean;
 	u16 shift;
@@ -1462,16 +1468,7 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
 
 	while (next_to_clean != hw_next_to_clean) {
 		buffer_info = &tpd_ring->buffer_info[next_to_clean];
-		if (buffer_info->state == ATL1_BUFFER_BUSY) {
-			pci_unmap_page(adapter->pdev, buffer_info->dma,
-					buffer_info->length, PCI_DMA_TODEVICE);
-			buffer_info->dma = 0;
-			if (buffer_info->skb) {
-				dev_kfree_skb_irq(buffer_info->skb);
-				buffer_info->skb = NULL;
-			}
-			buffer_info->state = ATL1_BUFFER_FREE;
-		}
+		atl1c_clean_buffer(pdev, buffer_info, 1);
 		if (++next_to_clean == tpd_ring->count)
 			next_to_clean = 0;
 		atomic_set(&tpd_ring->next_to_clean, next_to_clean);
@@ -1587,7 +1584,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid
 	buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
 	next_info = &rfd_ring->buffer_info[next_next];
 
-	while (next_info->state == ATL1_BUFFER_FREE) {
+	while (next_info->flags & ATL1C_BUFFER_FREE) {
 		rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
 
 		skb = dev_alloc_skb(adapter->rx_buffer_len);
@@ -1603,12 +1600,13 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid
 		 * the 14 byte MAC header is removed
 		 */
 		vir_addr = skb->data;
-		buffer_info->state = ATL1_BUFFER_BUSY;
+		ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
 		buffer_info->skb = skb;
 		buffer_info->length = adapter->rx_buffer_len;
 		buffer_info->dma = pci_map_single(pdev, vir_addr,
 						buffer_info->length,
 						PCI_DMA_FROMDEVICE);
+		ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE);
 		rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
 		rfd_next_to_use = next_next;
 		if (++next_next == rfd_ring->count)
@@ -1653,7 +1651,8 @@ static void atl1c_clean_rfd(struct atl1c_rfd_ring *rfd_ring,
 			RRS_RX_RFD_INDEX_MASK;
 	for (i = 0; i < num; i++) {
 		buffer_info[rfd_index].skb = NULL;
-		buffer_info[rfd_index].state = ATL1_BUFFER_FREE;
+		ATL1C_SET_BUFFER_STATE(&buffer_info[rfd_index],
+					ATL1C_BUFFER_FREE);
 		if (++rfd_index == rfd_ring->count)
 			rfd_index = 0;
 	}
@@ -1967,7 +1966,8 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
 		buffer_info->length = map_len;
 		buffer_info->dma = pci_map_single(adapter->pdev,
 					skb->data, hdr_len, PCI_DMA_TODEVICE);
-		buffer_info->state = ATL1_BUFFER_BUSY;
+		ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
+		ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE);
 		mapped_len += map_len;
 		use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
 		use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
@@ -1981,16 +1981,14 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
 		else {
 			use_tpd = atl1c_get_tpd(adapter, type);
 			memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
-			use_tpd = atl1c_get_tpd(adapter, type);
-			memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
 		}
 		buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
 		buffer_info->length = buf_len - mapped_len;
 		buffer_info->dma =
 			pci_map_single(adapter->pdev, skb->data + mapped_len,
 					buffer_info->length, PCI_DMA_TODEVICE);
-		buffer_info->state = ATL1_BUFFER_BUSY;
-
+		ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
+		ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE);
 		use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
 		use_tpd->buffer_len  = cpu_to_le16(buffer_info->length);
 	}
@@ -2010,8 +2008,8 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
 					frag->page_offset,
 					buffer_info->length,
 					PCI_DMA_TODEVICE);
-		buffer_info->state = ATL1_BUFFER_BUSY;
-
+		ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
+		ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE);
 		use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
 		use_tpd->buffer_len  = cpu_to_le16(buffer_info->length);
 	}
@@ -2137,7 +2135,7 @@ static int atl1c_request_irq(struct atl1c_adapter *adapter)
 
 	if (!adapter->have_msi)
 		flags |= IRQF_SHARED;
-	err = request_irq(adapter->pdev->irq, &atl1c_intr, flags,
+	err = request_irq(adapter->pdev->irq, atl1c_intr, flags,
 			netdev->name, netdev);
 	if (err) {
 		if (netif_msg_ifup(adapter))
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c
index 60edb9f232bb..a76006c1bc6b 100644
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -131,11 +131,6 @@ static int atl1e_set_settings(struct net_device *netdev,
 	return 0;
 }
 
-static u32 atl1e_get_tx_csum(struct net_device *netdev)
-{
-	return (netdev->features & NETIF_F_HW_CSUM) != 0;
-}
-
 static u32 atl1e_get_msglevel(struct net_device *netdev)
 {
 #ifdef DBG
@@ -145,10 +140,6 @@ static u32 atl1e_get_msglevel(struct net_device *netdev)
 #endif
 }
 
-static void atl1e_set_msglevel(struct net_device *netdev, u32 data)
-{
-}
-
 static int atl1e_get_regs_len(struct net_device *netdev)
 {
 	return AT_REGS_LEN * sizeof(u32);
@@ -387,18 +378,14 @@ static const struct ethtool_ops atl1e_ethtool_ops = {
 	.get_wol                = atl1e_get_wol,
 	.set_wol                = atl1e_set_wol,
 	.get_msglevel           = atl1e_get_msglevel,
-	.set_msglevel           = atl1e_set_msglevel,
 	.nway_reset             = atl1e_nway_reset,
 	.get_link               = ethtool_op_get_link,
 	.get_eeprom_len         = atl1e_get_eeprom_len,
 	.get_eeprom             = atl1e_get_eeprom,
 	.set_eeprom             = atl1e_set_eeprom,
-	.get_tx_csum            = atl1e_get_tx_csum,
-	.get_sg                 = ethtool_op_get_sg,
+	.set_tx_csum            = ethtool_op_set_tx_hw_csum,
 	.set_sg                 = ethtool_op_set_sg,
-#ifdef NETIF_F_TSO
-	.get_tso                = ethtool_op_get_tso,
-#endif
+	.set_tso                = ethtool_op_set_tso,
 };
 
 void atl1e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 955da733c2ad..08f8c0969e9b 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -1433,14 +1433,12 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
 
 			packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
 					RRS_PKT_SIZE_MASK) - 4; /* CRC */
-			skb = netdev_alloc_skb(netdev,
-					       packet_size + NET_IP_ALIGN);
+			skb = netdev_alloc_skb_ip_align(netdev, packet_size);
 			if (skb == NULL) {
 				dev_warn(&pdev->dev, "%s: Memory squeeze,"
 					"deferring packet.\n", netdev->name);
 				goto skip_pkt;
 			}
-			skb_reserve(skb, NET_IP_ALIGN);
 			skb->dev = netdev;
 			memcpy(skb->data, (u8 *)(prrs + 1), packet_size);
 			skb_put(skb, packet_size);
@@ -1666,41 +1664,6 @@ static int atl1e_tso_csum(struct atl1e_adapter *adapter,
 			}
 			return 0;
 		}
-
-		if (offload_type & SKB_GSO_TCPV6) {
-			real_len = (((unsigned char *)ipv6_hdr(skb) - skb->data)
-					+ ntohs(ipv6_hdr(skb)->payload_len));
-			if (real_len < skb->len)
-				pskb_trim(skb, real_len);
-
-			/* check payload == 0 byte ? */
-			hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
-			if (unlikely(skb->len == hdr_len)) {
-				/* only xsum need */
-				dev_warn(&pdev->dev,
-					"IPV6 tso with zero data??\n");
-				goto check_sum;
-			} else {
-				tcp_hdr(skb)->check = ~csum_ipv6_magic(
-						&ipv6_hdr(skb)->saddr,
-						&ipv6_hdr(skb)->daddr,
-						0, IPPROTO_TCP, 0);
-				tpd->word3 |= 1 << TPD_IP_VERSION_SHIFT;
-				hdr_len >>= 1;
-				tpd->word3 |= (hdr_len & TPD_V6_IPHLLO_MASK) <<
-					TPD_V6_IPHLLO_SHIFT;
-				tpd->word3 |= ((hdr_len >> 3) &
-					TPD_V6_IPHLHI_MASK) <<
-					TPD_V6_IPHLHI_SHIFT;
-				tpd->word3 |= (tcp_hdrlen(skb) >> 2 &
-					TPD_TCPHDRLEN_MASK) <<
-					TPD_TCPHDRLEN_SHIFT;
-				tpd->word3 |= ((skb_shinfo(skb)->gso_size) &
-					TPD_MSS_MASK) << TPD_MSS_SHIFT;
-					tpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT;
-			}
-		}
-		return 0;
 	}
 
 check_sum:
@@ -1932,7 +1895,7 @@ static int atl1e_request_irq(struct atl1e_adapter *adapter)
 
 	if (!adapter->have_msi)
 		flags |= IRQF_SHARED;
-	err = request_irq(adapter->pdev->irq, &atl1e_intr, flags,
+	err = request_irq(adapter->pdev->irq, atl1e_intr, flags,
 			netdev->name, netdev);
 	if (err) {
 		dev_dbg(&pdev->dev,
@@ -2289,7 +2252,6 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
 		NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
 	netdev->features |= NETIF_F_LLTX;
 	netdev->features |= NETIF_F_TSO;
-	netdev->features |= NETIF_F_TSO6;
 
 	return 0;
 }
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 00569dc1313c..b6cf3263127c 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1344,8 +1344,8 @@ static u32 atl1_check_link(struct atl1_adapter *adapter)
 
 	/* link result is our setting */
 	if (!reconfig) {
-		if (adapter->link_speed != speed
-		    || adapter->link_duplex != duplex) {
+		if (adapter->link_speed != speed ||
+		    adapter->link_duplex != duplex) {
 			adapter->link_speed = speed;
 			adapter->link_duplex = duplex;
 			atl1_setup_mac_ctrl(adapter);
@@ -1864,21 +1864,14 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
 
 		rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
 
-		skb = netdev_alloc_skb(adapter->netdev,
-				       adapter->rx_buffer_len + NET_IP_ALIGN);
+		skb = netdev_alloc_skb_ip_align(adapter->netdev,
+						adapter->rx_buffer_len);
 		if (unlikely(!skb)) {
 			/* Better luck next round */
 			adapter->netdev->stats.rx_dropped++;
 			break;
 		}
 
-		/*
-		 * Make buffer alignment 2 beyond a 16 byte boundary
-		 * this will result in a 16 byte aligned IP header after
-		 * the 14 byte MAC header is removed
-		 */
-		skb_reserve(skb, NET_IP_ALIGN);
-
 		buffer_info->alloced = 1;
 		buffer_info->skb = skb;
 		buffer_info->length = (u16) adapter->rx_buffer_len;
@@ -2094,8 +2087,8 @@ static void atl1_intr_tx(struct atl1_adapter *adapter)
 	}
 	atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean);
 
-	if (netif_queue_stopped(adapter->netdev)
-	    && netif_carrier_ok(adapter->netdev))
+	if (netif_queue_stopped(adapter->netdev) &&
+	    netif_carrier_ok(adapter->netdev))
 		netif_wake_queue(adapter->netdev);
 }
 
@@ -2596,7 +2589,7 @@ static s32 atl1_up(struct atl1_adapter *adapter)
 		irq_flags |= IRQF_SHARED;
 	}
 
-	err = request_irq(adapter->pdev->irq, &atl1_intr, irq_flags,
+	err = request_irq(adapter->pdev->irq, atl1_intr, irq_flags,
 			netdev->name, netdev);
 	if (unlikely(err))
 		goto err_up;
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index ab688862093f..c0451d75cdcf 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -409,7 +409,7 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
 		if (rxd->status.ok && rxd->status.pkt_size >= 60) {
 			int rx_size = (int)(rxd->status.pkt_size - 4);
 			/* alloc new buffer */
-			skb = netdev_alloc_skb(netdev, rx_size + NET_IP_ALIGN);
+			skb = netdev_alloc_skb_ip_align(netdev, rx_size);
 			if (NULL == skb) {
 				printk(KERN_WARNING
 					"%s: Mem squeeze, deferring packet.\n",
@@ -421,7 +421,6 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
 				netdev->stats.rx_dropped++;
 				break;
 			}
-			skb_reserve(skb, NET_IP_ALIGN);
 			skb->dev = netdev;
 			memcpy(skb->data, rxd->packet, rx_size);
 			skb_put(skb, rx_size);
@@ -652,7 +651,7 @@ static int atl2_request_irq(struct atl2_adapter *adapter)
 	if (adapter->have_msi)
 		flags &= ~IRQF_SHARED;
 
-	return request_irq(adapter->pdev->irq, &atl2_intr, flags, netdev->name,
+	return request_irq(adapter->pdev->irq, atl2_intr, flags, netdev->name,
 		netdev);
 }
 
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index 9043294fe617..2f8261c9614a 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -437,7 +437,7 @@ static int net_open(struct net_device *dev)
 	/* The interrupt line is turned off (tri-stated) when the device isn't in
 	   use.  That's especially important for "attached" interfaces where the
 	   port or interrupt may be shared. */
-	ret = request_irq(dev->irq, &atp_interrupt, 0, dev->name, dev);
+	ret = request_irq(dev->irq, atp_interrupt, 0, dev->name, dev);
 	if (ret)
 		return ret;
 
@@ -673,8 +673,8 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance)
 				netif_wake_queue(dev);	/* Inform upper layers. */
 			}
 			num_tx_since_rx++;
-		} else if (num_tx_since_rx > 8
-				   && time_after(jiffies, dev->last_rx + HZ)) {
+		} else if (num_tx_since_rx > 8 &&
+			   time_after(jiffies, dev->last_rx + HZ)) {
 			if (net_debug > 2)
 				printk(KERN_DEBUG "%s: Missed packet? No Rx after %d Tx and "
 					   "%ld jiffies status %02x  CMR1 %02x.\n", dev->name,
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 3f4b4300f533..6bac04603a88 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -881,7 +881,7 @@ static int au1000_open(struct net_device *dev)
 	if (au1000_debug > 4)
 		printk("%s: open: dev=%p\n", dev->name, dev);
 
-	if ((retval = request_irq(dev->irq, &au1000_interrupt, 0,
+	if ((retval = request_irq(dev->irq, au1000_interrupt, 0,
 					dev->name, dev))) {
 		printk(KERN_ERR "%s: unable to get IRQ %d\n",
 				dev->name, dev->irq);
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index ba29dc319b34..1f6c5486d715 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -320,16 +320,13 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
 		if (len < copybreak) {
 			struct sk_buff *nskb;
 
-			nskb = netdev_alloc_skb(dev, len + NET_IP_ALIGN);
+			nskb = netdev_alloc_skb_ip_align(dev, len);
 			if (!nskb) {
 				/* forget packet, just rearm desc */
 				priv->stats.rx_dropped++;
 				continue;
 			}
 
-			/* since we're copying the data, we can align
-			 * them properly */
-			skb_reserve(nskb, NET_IP_ALIGN);
 			dma_sync_single_for_cpu(kdev, desc->address,
 						len, DMA_FROM_DEVICE);
 			memcpy(nskb->data, skb->data, len);
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 3b79a225628a..9e56014d27ed 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -32,23 +32,34 @@
 
 #include "be_hw.h"
 
-#define DRV_VER			"2.101.205"
+#define DRV_VER			"2.101.346u"
 #define DRV_NAME		"be2net"
 #define BE_NAME			"ServerEngines BladeEngine2 10Gbps NIC"
+#define BE3_NAME		"ServerEngines BladeEngine3 10Gbps NIC"
 #define OC_NAME			"Emulex OneConnect 10Gbps NIC"
+#define OC_NAME1		"Emulex OneConnect 10Gbps NIC (be3)"
 #define DRV_DESC		BE_NAME "Driver"
 
 #define BE_VENDOR_ID 		0x19a2
 #define BE_DEVICE_ID1		0x211
+#define BE_DEVICE_ID2		0x221
 #define OC_DEVICE_ID1		0x700
 #define OC_DEVICE_ID2		0x701
+#define OC_DEVICE_ID3		0x710
 
 static inline char *nic_name(struct pci_dev *pdev)
 {
-	if (pdev->device == OC_DEVICE_ID1 || pdev->device == OC_DEVICE_ID2)
+	switch (pdev->device) {
+	case OC_DEVICE_ID1:
+	case OC_DEVICE_ID2:
 		return OC_NAME;
-	else
+	case OC_DEVICE_ID3:
+		return OC_NAME1;
+	case BE_DEVICE_ID2:
+		return BE3_NAME;
+	default:
 		return BE_NAME;
+	}
 }
 
 /* Number of bytes of an RX frame that are copied to skb->data */
@@ -159,7 +170,7 @@ struct be_drvr_stats {
 	u32 cache_barrier[16];
 
 	u32 be_ethrx_post_fail;/* number of ethrx buffer alloc failures */
-	u32 be_polls;		/* number of times NAPI called poll function */
+	u32 be_rx_polls;	/* number of times NAPI called poll function */
 	u32 be_rx_events;	/* number of ucast rx completion events  */
 	u32 be_rx_compl;	/* number of rx completion entries processed */
 	ulong be_rx_jiffies;
@@ -181,7 +192,6 @@ struct be_drvr_stats {
 
 struct be_stats_obj {
 	struct be_drvr_stats drvr_stats;
-	struct net_device_stats net_stats;
 	struct be_dma_mem cmd;
 };
 
@@ -244,6 +254,7 @@ struct be_adapter {
 	struct vlan_group *vlan_grp;
 	u16 num_vlans;
 	u8 vlan_tag[VLAN_GROUP_ARRAY_LEN];
+	struct be_dma_mem mc_cmd_mem;
 
 	struct be_stats_obj stats;
 	/* Work queue used to perform periodic tasks like getting statistics */
@@ -258,9 +269,12 @@ struct be_adapter {
 	bool link_up;
 	u32 port_num;
 	bool promiscuous;
+	bool wol;
 	u32 cap;
 	u32 rx_fc;		/* Rx flow control */
 	u32 tx_fc;		/* Tx flow control */
+	int link_speed;
+	u8 port_type;
 };
 
 extern const struct ethtool_ops be_ethtool_ops;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 28a0eda92680..1b68bd98dc0c 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -71,8 +71,8 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
 		extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
 				CQE_STATUS_EXTD_MASK;
 		dev_warn(&adapter->pdev->dev,
-			"Error in cmd completion: status(compl/extd)=%d/%d\n",
-			compl_status, extd_status);
+		"Error in cmd completion - opcode %d, compl %d, extd %d\n",
+			compl->tag0, compl_status, extd_status);
 	}
 	return compl_status;
 }
@@ -277,7 +277,7 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
 
 /* Don't touch the hdr after it's prepared */
 static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
-				bool embedded, u8 sge_cnt)
+				bool embedded, u8 sge_cnt, u32 opcode)
 {
 	if (embedded)
 		wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
@@ -285,6 +285,7 @@ static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
 		wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
 				MCC_WRB_SGE_CNT_SHIFT;
 	wrb->payload_length = payload_len;
+	wrb->tag0 = opcode;
 	be_dws_cpu_to_le(wrb, 20);
 }
 
@@ -349,7 +350,11 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
 	struct be_queue_info *mccq = &adapter->mcc_obj.q;
 	struct be_mcc_wrb *wrb;
 
-	BUG_ON(atomic_read(&mccq->used) >= mccq->len);
+	if (atomic_read(&mccq->used) >= mccq->len) {
+		dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
+		return NULL;
+	}
+
 	wrb = queue_head_node(mccq);
 	queue_head_inc(mccq);
 	atomic_inc(&mccq->used);
@@ -357,6 +362,57 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
 	return wrb;
 }
 
+/* Tell fw we're about to start firing cmds by writing a
+ * special pattern across the wrb hdr; uses mbox
+ */
+int be_cmd_fw_init(struct be_adapter *adapter)
+{
+	u8 *wrb;
+	int status;
+
+	spin_lock(&adapter->mbox_lock);
+
+	wrb = (u8 *)wrb_from_mbox(adapter);
+	*wrb++ = 0xFF;
+	*wrb++ = 0x12;
+	*wrb++ = 0x34;
+	*wrb++ = 0xFF;
+	*wrb++ = 0xFF;
+	*wrb++ = 0x56;
+	*wrb++ = 0x78;
+	*wrb = 0xFF;
+
+	status = be_mbox_notify_wait(adapter);
+
+	spin_unlock(&adapter->mbox_lock);
+	return status;
+}
+
+/* Tell fw we're done with firing cmds by writing a
+ * special pattern across the wrb hdr; uses mbox
+ */
+int be_cmd_fw_clean(struct be_adapter *adapter)
+{
+	u8 *wrb;
+	int status;
+
+	spin_lock(&adapter->mbox_lock);
+
+	wrb = (u8 *)wrb_from_mbox(adapter);
+	*wrb++ = 0xFF;
+	*wrb++ = 0xAA;
+	*wrb++ = 0xBB;
+	*wrb++ = 0xFF;
+	*wrb++ = 0xFF;
+	*wrb++ = 0xCC;
+	*wrb++ = 0xDD;
+	*wrb = 0xFF;
+
+	status = be_mbox_notify_wait(adapter);
+
+	spin_unlock(&adapter->mbox_lock);
+	return status;
+}
 int be_cmd_eq_create(struct be_adapter *adapter,
 		struct be_queue_info *eq, int eq_delay)
 {
@@ -370,7 +426,7 @@ int be_cmd_eq_create(struct be_adapter *adapter,
 	wrb = wrb_from_mbox(adapter);
 	req = embedded_payload(wrb);
 
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
 
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 		OPCODE_COMMON_EQ_CREATE, sizeof(*req));
@@ -414,7 +470,8 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
 	wrb = wrb_from_mbox(adapter);
 	req = embedded_payload(wrb);
 
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+			OPCODE_COMMON_NTWK_MAC_QUERY);
 
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 		OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
@@ -448,9 +505,14 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
 	spin_lock_bh(&adapter->mcc_lock);
 
 	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
 	req = embedded_payload(wrb);
 
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+			OPCODE_COMMON_NTWK_PMAC_ADD);
 
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 		OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
@@ -464,6 +526,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
 		*pmac_id = le32_to_cpu(resp->pmac_id);
 	}
 
+err:
 	spin_unlock_bh(&adapter->mcc_lock);
 	return status;
 }
@@ -478,9 +541,14 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
 	spin_lock_bh(&adapter->mcc_lock);
 
 	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
 	req = embedded_payload(wrb);
 
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+			OPCODE_COMMON_NTWK_PMAC_DEL);
 
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 		OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
@@ -490,8 +558,8 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
 
 	status = be_mcc_notify_wait(adapter);
 
+err:
 	spin_unlock_bh(&adapter->mcc_lock);
-
 	return status;
 }
 
@@ -512,7 +580,8 @@ int be_cmd_cq_create(struct be_adapter *adapter,
 	req = embedded_payload(wrb);
 	ctxt = &req->context;
 
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+			OPCODE_COMMON_CQ_CREATE);
 
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 		OPCODE_COMMON_CQ_CREATE, sizeof(*req));
@@ -569,7 +638,8 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
 	req = embedded_payload(wrb);
 	ctxt = &req->context;
 
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+			OPCODE_COMMON_MCC_CREATE);
 
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 			OPCODE_COMMON_MCC_CREATE, sizeof(*req));
@@ -613,7 +683,8 @@ int be_cmd_txq_create(struct be_adapter *adapter,
 	req = embedded_payload(wrb);
 	ctxt = &req->context;
 
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+			OPCODE_ETH_TX_CREATE);
 
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
 		sizeof(*req));
@@ -660,7 +731,8 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
 	wrb = wrb_from_mbox(adapter);
 	req = embedded_payload(wrb);
 
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+			OPCODE_ETH_RX_CREATE);
 
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
 		sizeof(*req));
@@ -701,8 +773,6 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
 	wrb = wrb_from_mbox(adapter);
 	req = embedded_payload(wrb);
 
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
-
 	switch (queue_type) {
 	case QTYPE_EQ:
 		subsys = CMD_SUBSYSTEM_COMMON;
@@ -727,6 +797,9 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
 	default:
 		BUG();
 	}
+
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
+
 	be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
 	req->id = cpu_to_le16(q->id);
 
@@ -752,7 +825,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
 	wrb = wrb_from_mbox(adapter);
 	req = embedded_payload(wrb);
 
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+			OPCODE_COMMON_NTWK_INTERFACE_CREATE);
 
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 		OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
@@ -787,7 +861,8 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
 	wrb = wrb_from_mbox(adapter);
 	req = embedded_payload(wrb);
 
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+			OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
 
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 		OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
@@ -810,15 +885,20 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
 	struct be_mcc_wrb *wrb;
 	struct be_cmd_req_get_stats *req;
 	struct be_sge *sge;
+	int status = 0;
 
 	spin_lock_bh(&adapter->mcc_lock);
 
 	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
 	req = nonemb_cmd->va;
 	sge = nonembedded_sgl(wrb);
 
-	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
-	wrb->tag0 = OPCODE_ETH_GET_STATISTICS;
+	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
+			OPCODE_ETH_GET_STATISTICS);
 
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
 		OPCODE_ETH_GET_STATISTICS, sizeof(*req));
@@ -828,13 +908,14 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
 
 	be_mcc_notify(adapter);
 
+err:
 	spin_unlock_bh(&adapter->mcc_lock);
-	return 0;
+	return status;
 }
 
 /* Uses synchronous mcc */
 int be_cmd_link_status_query(struct be_adapter *adapter,
-			bool *link_up)
+			bool *link_up, u8 *mac_speed, u16 *link_speed)
 {
 	struct be_mcc_wrb *wrb;
 	struct be_cmd_req_link_status *req;
@@ -843,11 +924,16 @@ int be_cmd_link_status_query(struct be_adapter *adapter,
 	spin_lock_bh(&adapter->mcc_lock);
 
 	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
 	req = embedded_payload(wrb);
 
 	*link_up = false;
 
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+			OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
 
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 		OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
@@ -855,10 +941,14 @@ int be_cmd_link_status_query(struct be_adapter *adapter,
 	status = be_mcc_notify_wait(adapter);
 	if (!status) {
 		struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
-		if (resp->mac_speed != PHY_LINK_SPEED_ZERO)
+		if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
 			*link_up = true;
+			*link_speed = le16_to_cpu(resp->link_speed);
+			*mac_speed = resp->mac_speed;
+		}
 	}
 
+err:
 	spin_unlock_bh(&adapter->mcc_lock);
 	return status;
 }
@@ -875,7 +965,8 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
 	wrb = wrb_from_mbox(adapter);
 	req = embedded_payload(wrb);
 
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+			OPCODE_COMMON_GET_FW_VERSION);
 
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 		OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
@@ -897,13 +988,19 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
 {
 	struct be_mcc_wrb *wrb;
 	struct be_cmd_req_modify_eq_delay *req;
+	int status = 0;
 
 	spin_lock_bh(&adapter->mcc_lock);
 
 	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
 	req = embedded_payload(wrb);
 
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+			OPCODE_COMMON_MODIFY_EQ_DELAY);
 
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 		OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
@@ -915,8 +1012,9 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
 
 	be_mcc_notify(adapter);
 
+err:
 	spin_unlock_bh(&adapter->mcc_lock);
-	return 0;
+	return status;
 }
 
 /* Uses sycnhronous mcc */
@@ -930,9 +1028,14 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
 	spin_lock_bh(&adapter->mcc_lock);
 
 	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
 	req = embedded_payload(wrb);
 
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+			OPCODE_COMMON_NTWK_VLAN_CONFIG);
 
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 		OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
@@ -948,6 +1051,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
 
 	status = be_mcc_notify_wait(adapter);
 
+err:
 	spin_unlock_bh(&adapter->mcc_lock);
 	return status;
 }
@@ -964,9 +1068,13 @@ int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
 	spin_lock_bh(&adapter->mcc_lock);
 
 	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
 	req = embedded_payload(wrb);
 
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_PROMISCUOUS);
 
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
 		OPCODE_ETH_PROMISCUOUS, sizeof(*req));
@@ -978,6 +1086,7 @@ int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
 
 	status = be_mcc_notify_wait(adapter);
 
+err:
 	spin_unlock_bh(&adapter->mcc_lock);
 	return status;
 }
@@ -987,24 +1096,35 @@ int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
  * (mc == NULL) => multicast promiscous
  */
 int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
-		struct dev_mc_list *mc_list, u32 mc_count)
+		struct dev_mc_list *mc_list, u32 mc_count,
+		struct be_dma_mem *mem)
 {
-#define BE_MAX_MC		32 /* set mcast promisc if > 32 */
 	struct be_mcc_wrb *wrb;
-	struct be_cmd_req_mcast_mac_config *req;
+	struct be_cmd_req_mcast_mac_config *req = mem->va;
+	struct be_sge *sge;
+	int status;
 
 	spin_lock_bh(&adapter->mcc_lock);
 
 	wrb = wrb_from_mccq(adapter);
-	req = embedded_payload(wrb);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
+	sge = nonembedded_sgl(wrb);
+	memset(req, 0, sizeof(*req));
 
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
+			OPCODE_COMMON_NTWK_MULTICAST_SET);
+	sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
+	sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
+	sge->len = cpu_to_le32(mem->size);
 
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 		OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
 
 	req->interface_id = if_id;
-	if (mc_list && mc_count <= BE_MAX_MC) {
+	if (mc_list) {
 		int i;
 		struct dev_mc_list *mc;
 
@@ -1016,11 +1136,11 @@ int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
 		req->promiscuous = 1;
 	}
 
-	be_mcc_notify_wait(adapter);
+	status = be_mcc_notify_wait(adapter);
 
+err:
 	spin_unlock_bh(&adapter->mcc_lock);
-
-	return 0;
+	return status;
 }
 
 /* Uses synchrounous mcc */
@@ -1033,9 +1153,14 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
 	spin_lock_bh(&adapter->mcc_lock);
 
 	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
 	req = embedded_payload(wrb);
 
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+			OPCODE_COMMON_SET_FLOW_CONTROL);
 
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 		OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
@@ -1045,6 +1170,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
 
 	status = be_mcc_notify_wait(adapter);
 
+err:
 	spin_unlock_bh(&adapter->mcc_lock);
 	return status;
 }
@@ -1059,9 +1185,14 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
 	spin_lock_bh(&adapter->mcc_lock);
 
 	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
 	req = embedded_payload(wrb);
 
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+			OPCODE_COMMON_GET_FLOW_CONTROL);
 
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 		OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
@@ -1074,6 +1205,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
 		*rx_fc = le16_to_cpu(resp->rx_flow_control);
 	}
 
+err:
 	spin_unlock_bh(&adapter->mcc_lock);
 	return status;
 }
@@ -1090,7 +1222,8 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
 	wrb = wrb_from_mbox(adapter);
 	req = embedded_payload(wrb);
 
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+			OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
 
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 		OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
@@ -1118,7 +1251,8 @@ int be_cmd_reset_function(struct be_adapter *adapter)
 	wrb = wrb_from_mbox(adapter);
 	req = embedded_payload(wrb);
 
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+			OPCODE_COMMON_FUNCTION_RESET);
 
 	be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
 		OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
@@ -1129,6 +1263,113 @@ int be_cmd_reset_function(struct be_adapter *adapter)
 	return status;
 }
 
+/* Uses sync mcc */
+int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
+			u8 bcn, u8 sts, u8 state)
+{
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_req_enable_disable_beacon *req;
+	int status;
+
+	spin_lock_bh(&adapter->mcc_lock);
+
+	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
+	req = embedded_payload(wrb);
+
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+			OPCODE_COMMON_ENABLE_DISABLE_BEACON);
+
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+		OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
+
+	req->port_num = port_num;
+	req->beacon_state = state;
+	req->beacon_duration = bcn;
+	req->status_duration = sts;
+
+	status = be_mcc_notify_wait(adapter);
+
+err:
+	spin_unlock_bh(&adapter->mcc_lock);
+	return status;
+}
+
+/* Uses sync mcc */
+int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
+{
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_req_get_beacon_state *req;
+	int status;
+
+	spin_lock_bh(&adapter->mcc_lock);
+
+	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
+	req = embedded_payload(wrb);
+
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+			OPCODE_COMMON_GET_BEACON_STATE);
+
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+		OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
+
+	req->port_num = port_num;
+
+	status = be_mcc_notify_wait(adapter);
+	if (!status) {
+		struct be_cmd_resp_get_beacon_state *resp =
+						embedded_payload(wrb);
+		*state = resp->beacon_state;
+	}
+
+err:
+	spin_unlock_bh(&adapter->mcc_lock);
+	return status;
+}
+
+/* Uses sync mcc */
+int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
+				u8 *connector)
+{
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_req_port_type *req;
+	int status;
+
+	spin_lock_bh(&adapter->mcc_lock);
+
+	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
+	req = embedded_payload(wrb);
+
+	be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0,
+			OPCODE_COMMON_READ_TRANSRECV_DATA);
+
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+		OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
+
+	req->port = cpu_to_le32(port);
+	req->page_num = cpu_to_le32(TR_PAGE_A0);
+	status = be_mcc_notify_wait(adapter);
+	if (!status) {
+		struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
+			*connector = resp->data.connector;
+	}
+
+err:
+	spin_unlock_bh(&adapter->mcc_lock);
+	return status;
+}
+
 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
 			u32 flash_type, u32 flash_opcode, u32 buf_size)
 {
@@ -1140,9 +1381,15 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
 	spin_lock_bh(&adapter->mcc_lock);
 
 	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
+	req = cmd->va;
 	sge = nonembedded_sgl(wrb);
 
-	be_wrb_hdr_prepare(wrb, cmd->size, false, 1);
+	be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
+			OPCODE_COMMON_WRITE_FLASHROM);
 
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 		OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
@@ -1156,6 +1403,171 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
 
 	status = be_mcc_notify_wait(adapter);
 
+err:
+	spin_unlock_bh(&adapter->mcc_lock);
+	return status;
+}
+
+int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc)
+{
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_write_flashrom *req;
+	int status;
+
+	spin_lock_bh(&adapter->mcc_lock);
+
+	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
+	req = embedded_payload(wrb);
+
+	be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
+			OPCODE_COMMON_READ_FLASHROM);
+
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+		OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
+
+	req->params.op_type = cpu_to_le32(FLASHROM_TYPE_REDBOOT);
+	req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
+	req->params.offset = 0x3FFFC;
+	req->params.data_buf_size = 0x4;
+
+	status = be_mcc_notify_wait(adapter);
+	if (!status)
+		memcpy(flashed_crc, req->params.data_buf, 4);
+
+err:
+	spin_unlock_bh(&adapter->mcc_lock);
+	return status;
+}
+
+extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
+				struct be_dma_mem *nonemb_cmd)
+{
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_req_acpi_wol_magic_config *req;
+	struct be_sge *sge;
+	int status;
+
+	spin_lock_bh(&adapter->mcc_lock);
+
+	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
+	req = nonemb_cmd->va;
+	sge = nonembedded_sgl(wrb);
+
+	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
+			OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
+
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
+		OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
+	memcpy(req->magic_mac, mac, ETH_ALEN);
+
+	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
+	sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+	sge->len = cpu_to_le32(nonemb_cmd->size);
+
+	status = be_mcc_notify_wait(adapter);
+
+err:
+	spin_unlock_bh(&adapter->mcc_lock);
+	return status;
+}
+
+int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
+		u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
+{
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_req_loopback_test *req;
+	int status;
+
+	spin_lock_bh(&adapter->mcc_lock);
+
+	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
+
+	req = embedded_payload(wrb);
+
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+				OPCODE_LOWLEVEL_LOOPBACK_TEST);
+
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
+			OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
+
+	req->pattern = cpu_to_le64(pattern);
+	req->src_port = cpu_to_le32(port_num);
+	req->dest_port = cpu_to_le32(port_num);
+	req->pkt_size = cpu_to_le32(pkt_size);
+	req->num_pkts = cpu_to_le32(num_pkts);
+	req->loopback_type = cpu_to_le32(loopback_type);
+
+	status = be_mcc_notify_wait(adapter);
+	if (!status) {
+		struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
+		status = le32_to_cpu(resp->status);
+	}
+
+err:
+	spin_unlock_bh(&adapter->mcc_lock);
+	return status;
+}
+
+int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
+				u32 byte_cnt, struct be_dma_mem *cmd)
+{
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_req_ddrdma_test *req;
+	struct be_sge *sge;
+	int status;
+	int i, j = 0;
+
+	spin_lock_bh(&adapter->mcc_lock);
+
+	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
+	req = cmd->va;
+	sge = nonembedded_sgl(wrb);
+	be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
+				OPCODE_LOWLEVEL_HOST_DDR_DMA);
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
+			OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
+
+	sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
+	sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
+	sge->len = cpu_to_le32(cmd->size);
+
+	req->pattern = cpu_to_le64(pattern);
+	req->byte_count = cpu_to_le32(byte_cnt);
+	for (i = 0; i < byte_cnt; i++) {
+		req->snd_buff[i] = (u8)(pattern >> (j*8));
+		j++;
+		if (j > 7)
+			j = 0;
+	}
+
+	status = be_mcc_notify_wait(adapter);
+
+	if (!status) {
+		struct be_cmd_resp_ddrdma_test *resp;
+		resp = cmd->va;
+		if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
+				resp->snd_err) {
+			status = -1;
+		}
+	}
+
+err:
 	spin_unlock_bh(&adapter->mcc_lock);
 	return status;
 }
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index e5f9676cf1bc..e7323be507d0 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -112,12 +112,14 @@ struct be_mcc_mailbox {
 
 #define CMD_SUBSYSTEM_COMMON	0x1
 #define CMD_SUBSYSTEM_ETH 	0x3
+#define CMD_SUBSYSTEM_LOWLEVEL  0xb
 
 #define OPCODE_COMMON_NTWK_MAC_QUERY			1
 #define OPCODE_COMMON_NTWK_MAC_SET			2
 #define OPCODE_COMMON_NTWK_MULTICAST_SET		3
 #define OPCODE_COMMON_NTWK_VLAN_CONFIG  		4
 #define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY		5
+#define OPCODE_COMMON_READ_FLASHROM			6
 #define OPCODE_COMMON_WRITE_FLASHROM			7
 #define OPCODE_COMMON_CQ_CREATE				12
 #define OPCODE_COMMON_EQ_CREATE				13
@@ -138,6 +140,9 @@ struct be_mcc_mailbox {
 #define OPCODE_COMMON_NTWK_PMAC_ADD			59
 #define OPCODE_COMMON_NTWK_PMAC_DEL			60
 #define OPCODE_COMMON_FUNCTION_RESET			61
+#define OPCODE_COMMON_ENABLE_DISABLE_BEACON		69
+#define OPCODE_COMMON_GET_BEACON_STATE			70
+#define OPCODE_COMMON_READ_TRANSRECV_DATA		73
 
 #define OPCODE_ETH_ACPI_CONFIG				2
 #define OPCODE_ETH_PROMISCUOUS				3
@@ -146,6 +151,10 @@ struct be_mcc_mailbox {
 #define OPCODE_ETH_RX_CREATE            		8
 #define OPCODE_ETH_TX_DESTROY           		9
 #define OPCODE_ETH_RX_DESTROY           		10
+#define OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG		12
+
+#define OPCODE_LOWLEVEL_HOST_DDR_DMA                    17
+#define OPCODE_LOWLEVEL_LOOPBACK_TEST                   18
 
 struct be_cmd_req_hdr {
 	u8 opcode;		/* dword 0 */
@@ -587,6 +596,8 @@ struct be_cmd_req_promiscuous_config {
 	u16 rsvd0;
 } __packed;
 
+/******************** Multicast MAC Config *******************/
+#define BE_MAX_MC		64 /* set mcast promisc if > 64 */
 struct macaddr {
 	u8 byte[ETH_ALEN];
 };
@@ -596,7 +607,7 @@ struct be_cmd_req_mcast_mac_config {
 	u16 num_mac;
 	u8 promiscuous;
 	u8 interface_id;
-	struct macaddr mac[32];
+	struct macaddr mac[BE_MAX_MC];
 } __packed;
 
 static inline struct be_hw_stats *
@@ -633,9 +644,47 @@ struct be_cmd_resp_link_status {
 	u8 mac_fault;
 	u8 mgmt_mac_duplex;
 	u8 mgmt_mac_speed;
-	u16 rsvd0;
+	u16 link_speed;
+	u32 rsvd0;
 } __packed;
 
+/******************** Port Identification ***************************/
+/*    Identifies the type of port attached to NIC     */
+struct be_cmd_req_port_type {
+	struct be_cmd_req_hdr hdr;
+	u32 page_num;
+	u32 port;
+};
+
+enum {
+	TR_PAGE_A0 = 0xa0,
+	TR_PAGE_A2 = 0xa2
+};
+
+struct be_cmd_resp_port_type {
+	struct be_cmd_resp_hdr hdr;
+	u32 page_num;
+	u32 port;
+	struct data {
+		u8 identifier;
+		u8 identifier_ext;
+		u8 connector;
+		u8 transceiver[8];
+		u8 rsvd0[3];
+		u8 length_km;
+		u8 length_hm;
+		u8 length_om1;
+		u8 length_om2;
+		u8 length_cu;
+		u8 length_cu_m;
+		u8 vendor_name[16];
+		u8 rsvd;
+		u8 vendor_oui[3];
+		u8 vendor_pn[16];
+		u8 vendor_rev[4];
+	} data;
+};
+
 /******************** Get FW Version *******************/
 struct be_cmd_req_get_fw_version {
 	struct be_cmd_req_hdr hdr;
@@ -699,6 +748,37 @@ struct be_cmd_resp_query_fw_cfg {
 	u32 rsvd[26];
 };
 
+/******************** Port Beacon ***************************/
+
+#define BEACON_STATE_ENABLED		0x1
+#define BEACON_STATE_DISABLED		0x0
+
+struct be_cmd_req_enable_disable_beacon {
+	struct be_cmd_req_hdr hdr;
+	u8  port_num;
+	u8  beacon_state;
+	u8  beacon_duration;
+	u8  status_duration;
+} __packed;
+
+struct be_cmd_resp_enable_disable_beacon {
+	struct be_cmd_resp_hdr resp_hdr;
+	u32 rsvd0;
+} __packed;
+
+struct be_cmd_req_get_beacon_state {
+	struct be_cmd_req_hdr hdr;
+	u8  port_num;
+	u8  rsvd0;
+	u16 rsvd1;
+} __packed;
+
+struct be_cmd_resp_get_beacon_state {
+	struct be_cmd_resp_hdr resp_hdr;
+	u8 beacon_state;
+	u8 rsvd0[3];
+} __packed;
+
 /****************** Firmware Flash ******************/
 struct flashrom_params {
 	u32 op_code;
@@ -713,6 +793,53 @@ struct be_cmd_write_flashrom {
 	struct flashrom_params params;
 };
 
+/************************ WOL *******************************/
+struct be_cmd_req_acpi_wol_magic_config{
+	struct be_cmd_req_hdr hdr;
+	u32 rsvd0[145];
+	u8 magic_mac[6];
+	u8 rsvd2[2];
+} __packed;
+
+/********************** LoopBack test *********************/
+struct be_cmd_req_loopback_test {
+	struct be_cmd_req_hdr hdr;
+	u32 loopback_type;
+	u32 num_pkts;
+	u64 pattern;
+	u32 src_port;
+	u32 dest_port;
+	u32 pkt_size;
+};
+
+struct be_cmd_resp_loopback_test {
+	struct be_cmd_resp_hdr resp_hdr;
+	u32    status;
+	u32    num_txfer;
+	u32    num_rx;
+	u32    miscomp_off;
+	u32    ticks_compl;
+};
+
+/********************** DDR DMA test *********************/
+struct be_cmd_req_ddrdma_test {
+	struct be_cmd_req_hdr hdr;
+	u64 pattern;
+	u32 byte_count;
+	u32 rsvd0;
+	u8  snd_buff[4096];
+	u8  rsvd1[4096];
+};
+
+struct be_cmd_resp_ddrdma_test {
+	struct be_cmd_resp_hdr hdr;
+	u64 pattern;
+	u32 byte_cnt;
+	u32 snd_err;
+	u8  rsvd0[4096];
+	u8  rcv_buff[4096];
+};
+
 extern int be_pci_fnum_get(struct be_adapter *adapter);
 extern int be_cmd_POST(struct be_adapter *adapter);
 extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -743,7 +870,7 @@ extern int be_cmd_rxq_create(struct be_adapter *adapter,
 extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
 			int type);
 extern int be_cmd_link_status_query(struct be_adapter *adapter,
-			bool *link_up);
+			bool *link_up, u8 *mac_speed, u16 *link_speed);
 extern int be_cmd_reset(struct be_adapter *adapter);
 extern int be_cmd_get_stats(struct be_adapter *adapter,
 			struct be_dma_mem *nonemb_cmd);
@@ -756,7 +883,8 @@ extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id,
 extern int be_cmd_promiscuous_config(struct be_adapter *adapter,
 			u8 port_num, bool en);
 extern int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
-			struct dev_mc_list *mc_list, u32 mc_count);
+			struct dev_mc_list *mc_list, u32 mc_count,
+			struct be_dma_mem *mem);
 extern int be_cmd_set_flow_control(struct be_adapter *adapter,
 			u32 tx_fc, u32 rx_fc);
 extern int be_cmd_get_flow_control(struct be_adapter *adapter,
@@ -765,6 +893,22 @@ extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
 			u32 *port_num, u32 *cap);
 extern int be_cmd_reset_function(struct be_adapter *adapter);
 extern int be_process_mcc(struct be_adapter *adapter);
+extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
+			u8 port_num, u8 beacon, u8 status, u8 state);
+extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
+			u8 port_num, u32 *state);
+extern int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
+					u8 *connector);
 extern int be_cmd_write_flashrom(struct be_adapter *adapter,
 			struct be_dma_mem *cmd, u32 flash_oper,
 			u32 flash_opcode, u32 buf_size);
+extern int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc);
+extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
+				struct be_dma_mem *nonemb_cmd);
+extern int be_cmd_fw_init(struct be_adapter *adapter);
+extern int be_cmd_fw_clean(struct be_adapter *adapter);
+extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
+				u32 loopback_type, u32 pkt_size,
+				u32 num_pkts, u64 pattern);
+extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
+			u32 byte_cnt, struct be_dma_mem *cmd);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index f0fd95b43c07..298b92cbd689 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -55,7 +55,7 @@ static const struct be_ethtool_stat et_stats[] = {
 	{DRVSTAT_INFO(be_tx_stops)},
 	{DRVSTAT_INFO(be_fwd_reqs)},
 	{DRVSTAT_INFO(be_tx_wrbs)},
-	{DRVSTAT_INFO(be_polls)},
+	{DRVSTAT_INFO(be_rx_polls)},
 	{DRVSTAT_INFO(be_tx_events)},
 	{DRVSTAT_INFO(be_rx_events)},
 	{DRVSTAT_INFO(be_tx_compl)},
@@ -107,6 +107,18 @@ static const struct be_ethtool_stat et_stats[] = {
 };
 #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
 
+static const char et_self_tests[][ETH_GSTRING_LEN] = {
+	"MAC Loopback test",
+	"PHY Loopback test",
+	"External Loopback test",
+	"DDR DMA test"
+};
+
+#define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
+#define BE_MAC_LOOPBACK 0x0
+#define BE_PHY_LOOPBACK 0x1
+#define BE_ONE_PORT_EXT_LOOPBACK 0x2
+
 static void
 be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
 {
@@ -234,7 +246,7 @@ be_get_ethtool_stats(struct net_device *netdev,
 	struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
 	struct be_port_rxf_stats *port_stats =
 			&rxf_stats->port[adapter->port_num];
-	struct net_device_stats *net_stats = &adapter->stats.net_stats;
+	struct net_device_stats *net_stats = &netdev->stats;
 	struct be_erx_stats *erx_stats = &hw_stats->erx;
 	void *p = NULL;
 	int i;
@@ -278,19 +290,78 @@ be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
 			data += ETH_GSTRING_LEN;
 		}
 		break;
+	case ETH_SS_TEST:
+		for (i = 0; i < ETHTOOL_TESTS_NUM; i++) {
+			memcpy(data, et_self_tests[i], ETH_GSTRING_LEN);
+			data += ETH_GSTRING_LEN;
+		}
+		break;
 	}
 }
 
-static int be_get_stats_count(struct net_device *netdev)
+static int be_get_sset_count(struct net_device *netdev, int stringset)
 {
-	return ETHTOOL_STATS_NUM;
+	switch (stringset) {
+	case ETH_SS_TEST:
+		return ETHTOOL_TESTS_NUM;
+	case ETH_SS_STATS:
+		return ETHTOOL_STATS_NUM;
+	default:
+		return -EINVAL;
+	}
 }
 
 static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 {
-	ecmd->speed = SPEED_10000;
+	struct be_adapter *adapter = netdev_priv(netdev);
+	u8 mac_speed = 0, connector = 0;
+	u16 link_speed = 0;
+	bool link_up = false;
+	int status;
+
+	if (adapter->link_speed < 0) {
+		status = be_cmd_link_status_query(adapter, &link_up,
+						&mac_speed, &link_speed);
+
+		/* link_speed is in units of 10 Mbps */
+		if (link_speed) {
+			ecmd->speed = link_speed*10;
+		} else {
+			switch (mac_speed) {
+			case PHY_LINK_SPEED_1GBPS:
+				ecmd->speed = SPEED_1000;
+				break;
+			case PHY_LINK_SPEED_10GBPS:
+				ecmd->speed = SPEED_10000;
+				break;
+			}
+		}
+
+		status = be_cmd_read_port_type(adapter, adapter->port_num,
+						&connector);
+		switch (connector) {
+		case 7:
+			ecmd->port = PORT_FIBRE;
+			break;
+		default:
+			ecmd->port = PORT_TP;
+			break;
+		}
+
+		/* Save for future use */
+		adapter->link_speed = ecmd->speed;
+		adapter->port_type = ecmd->port;
+	} else {
+		ecmd->speed = adapter->link_speed;
+		ecmd->port = adapter->port_type;
+	}
+
 	ecmd->duplex = DUPLEX_FULL;
 	ecmd->autoneg = AUTONEG_DISABLE;
+	ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
+	ecmd->phy_address = adapter->port_num;
+	ecmd->transceiver = XCVR_INTERNAL;
+
 	return 0;
 }
 
@@ -335,6 +406,123 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
 }
 
 static int
+be_phys_id(struct net_device *netdev, u32 data)
+{
+	struct be_adapter *adapter = netdev_priv(netdev);
+	int status;
+	u32 cur;
+
+	be_cmd_get_beacon_state(adapter, adapter->port_num, &cur);
+
+	if (cur == BEACON_STATE_ENABLED)
+		return 0;
+
+	if (data < 2)
+		data = 2;
+
+	status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0,
+			BEACON_STATE_ENABLED);
+	set_current_state(TASK_INTERRUPTIBLE);
+	schedule_timeout(data*HZ);
+
+	status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0,
+			BEACON_STATE_DISABLED);
+
+	return status;
+}
+
+static void
+be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+	struct be_adapter *adapter = netdev_priv(netdev);
+
+	wol->supported = WAKE_MAGIC;
+	if (adapter->wol)
+		wol->wolopts = WAKE_MAGIC;
+	else
+		wol->wolopts = 0;
+	memset(&wol->sopass, 0, sizeof(wol->sopass));
+	return;
+}
+
+static int
+be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+	struct be_adapter *adapter = netdev_priv(netdev);
+
+	if (wol->wolopts & ~WAKE_MAGIC)
+		return -EINVAL;
+
+	if (wol->wolopts & WAKE_MAGIC)
+		adapter->wol = true;
+	else
+		adapter->wol = false;
+
+	return 0;
+}
+
+static int
+be_test_ddr_dma(struct be_adapter *adapter)
+{
+	int ret, i;
+	struct be_dma_mem ddrdma_cmd;
+	u64 pattern[2] = {0x5a5a5a5a5a5a5a5a, 0xa5a5a5a5a5a5a5a5};
+
+	ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
+	ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
+					&ddrdma_cmd.dma);
+	if (!ddrdma_cmd.va) {
+		dev_err(&adapter->pdev->dev, "Memory allocation failure \n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < 2; i++) {
+		ret = be_cmd_ddr_dma_test(adapter, pattern[i],
+					4096, &ddrdma_cmd);
+		if (ret != 0)
+			goto err;
+	}
+
+err:
+	pci_free_consistent(adapter->pdev, ddrdma_cmd.size,
+			ddrdma_cmd.va, ddrdma_cmd.dma);
+	return ret;
+}
+
+static void
+be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
+{
+	struct be_adapter *adapter = netdev_priv(netdev);
+
+	memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
+
+	if (test->flags & ETH_TEST_FL_OFFLINE) {
+		data[0] = be_cmd_loopback_test(adapter, adapter->port_num,
+						BE_MAC_LOOPBACK, 1500,
+						2, 0xabc);
+		if (data[0] != 0)
+			test->flags |= ETH_TEST_FL_FAILED;
+
+		data[1] = be_cmd_loopback_test(adapter, adapter->port_num,
+						BE_PHY_LOOPBACK, 1500,
+						2, 0xabc);
+		if (data[1] != 0)
+			test->flags |= ETH_TEST_FL_FAILED;
+
+		data[2] = be_cmd_loopback_test(adapter, adapter->port_num,
+						BE_ONE_PORT_EXT_LOOPBACK,
+						1500, 2, 0xabc);
+		if (data[2] != 0)
+			test->flags |= ETH_TEST_FL_FAILED;
+
+		data[3] = be_test_ddr_dma(adapter);
+		if (data[3] != 0)
+			test->flags |= ETH_TEST_FL_FAILED;
+	}
+
+}
+
+static int
 be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
 {
 	struct be_adapter *adapter = netdev_priv(netdev);
@@ -351,6 +539,8 @@ be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
 const struct ethtool_ops be_ethtool_ops = {
 	.get_settings = be_get_settings,
 	.get_drvinfo = be_get_drvinfo,
+	.get_wol = be_get_wol,
+	.set_wol = be_set_wol,
 	.get_link = ethtool_op_get_link,
 	.get_coalesce = be_get_coalesce,
 	.set_coalesce = be_set_coalesce,
@@ -366,7 +556,9 @@ const struct ethtool_ops be_ethtool_ops = {
 	.get_tso = ethtool_op_get_tso,
 	.set_tso = ethtool_op_set_tso,
 	.get_strings = be_get_stat_strings,
-	.get_stats_count = be_get_stats_count,
+	.phys_id = be_phys_id,
+	.get_sset_count = be_get_sset_count,
 	.get_ethtool_stats = be_get_ethtool_stats,
 	.flash_device = be_do_flash,
+	.self_test = be_self_test,
 };
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index a3394b4aa14a..e2b3beffd49d 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -52,6 +52,10 @@
  */
 #define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK	(1 << 29) /* bit 29 */
 
+/********* Power managment (WOL) **********/
+#define PCICFG_PM_CONTROL_OFFSET		0x44
+#define PCICFG_PM_CONTROL_MASK			0x108	/* bits 3 & 8 */
+
 /********* ISR0 Register offset **********/
 #define CEV_ISR0_OFFSET 			0xC18
 #define CEV_ISR_SIZE				4
@@ -225,6 +229,7 @@ struct be_eth_rx_compl {
 #define NUM_FLASHDIR_ENTRIES		32
 
 #define FLASHROM_TYPE_ISCSI_ACTIVE	0
+#define FLASHROM_TYPE_REDBOOT		1
 #define FLASHROM_TYPE_BIOS		2
 #define FLASHROM_TYPE_PXE_BIOS		3
 #define FLASHROM_TYPE_FCOE_BIOS		8
@@ -234,9 +239,11 @@ struct be_eth_rx_compl {
 
 #define FLASHROM_OPER_FLASH		1
 #define FLASHROM_OPER_SAVE		2
+#define FLASHROM_OPER_REPORT		4
 
 #define FLASH_IMAGE_MAX_SIZE            (1310720) /* Max firmware image size */
 #define FLASH_BIOS_IMAGE_MAX_SIZE       (262144)  /* Max OPTION ROM image sz */
+#define FLASH_REDBOOT_IMAGE_MAX_SIZE    (262144)  /* Max redboot image sz */
 
 /* Offsets for components on Flash. */
 #define FLASH_iSCSI_PRIMARY_IMAGE_START (1048576)
@@ -246,6 +253,8 @@ struct be_eth_rx_compl {
 #define FLASH_iSCSI_BIOS_START          (7340032)
 #define FLASH_PXE_BIOS_START            (7864320)
 #define FLASH_FCoE_BIOS_START           (524288)
+#define FLASH_REDBOOT_START		(32768)
+#define FLASH_REDBOOT_ISM_START		(0)
 
 struct controller_id {
 	u32 vendor;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 876b357101fa..957a0f7f2764 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -31,8 +31,10 @@ MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
 
 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
+	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
+	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
 	{ 0 }
 };
 MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -123,6 +125,9 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
 	struct sockaddr *addr = p;
 	int status = 0;
 
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
 	status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
 	if (status)
 		return status;
@@ -141,7 +146,7 @@ void netdev_stats_update(struct be_adapter *adapter)
 	struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
 	struct be_port_rxf_stats *port_stats =
 			&rxf_stats->port[adapter->port_num];
-	struct net_device_stats *dev_stats = &adapter->stats.net_stats;
+	struct net_device_stats *dev_stats = &adapter->netdev->stats;
 	struct be_erx_stats *erx_stats = &hw_stats->erx;
 
 	dev_stats->rx_packets = port_stats->rx_total_frames;
@@ -168,7 +173,8 @@ void netdev_stats_update(struct be_adapter *adapter)
 		port_stats->rx_udp_checksum_errs;
 
 	/*  no space in linux buffers: best possible approximation */
-	dev_stats->rx_dropped = erx_stats->rx_drops_no_fragments[0];
+	dev_stats->rx_dropped =
+		erx_stats->rx_drops_no_fragments[adapter->rx_obj.q.id];
 
 	/* detailed rx errors */
 	dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
@@ -214,6 +220,7 @@ void be_link_status_update(struct be_adapter *adapter, bool link_up)
 
 	/* If link came up or went down */
 	if (adapter->link_up != link_up) {
+		adapter->link_speed = -1;
 		if (link_up) {
 			netif_start_queue(netdev);
 			netif_carrier_on(netdev);
@@ -269,9 +276,7 @@ static void be_rx_eqd_update(struct be_adapter *adapter)
 
 static struct net_device_stats *be_get_stats(struct net_device *dev)
 {
-	struct be_adapter *adapter = netdev_priv(dev);
-
-	return &adapter->stats.net_stats;
+	return &dev->stats;
 }
 
 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
@@ -389,15 +394,11 @@ static int make_tx_wrbs(struct be_adapter *adapter,
 	atomic_add(wrb_cnt, &txq->used);
 	queue_head_inc(txq);
 
-	if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) {
-		dev_err(&pdev->dev, "TX DMA mapping failed\n");
-		return 0;
-	}
-
 	if (skb->len > skb->data_len) {
 		int len = skb->len - skb->data_len;
+		busaddr = pci_map_single(pdev, skb->data, len,
+					 PCI_DMA_TODEVICE);
 		wrb = queue_head_node(txq);
-		busaddr = skb_shinfo(skb)->dma_head;
 		wrb_fill(wrb, busaddr, len);
 		be_dws_cpu_to_le(wrb, sizeof(*wrb));
 		queue_head_inc(txq);
@@ -407,8 +408,9 @@ static int make_tx_wrbs(struct be_adapter *adapter,
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 		struct skb_frag_struct *frag =
 			&skb_shinfo(skb)->frags[i];
-
-		busaddr = skb_shinfo(skb)->dma_maps[i];
+		busaddr = pci_map_page(pdev, frag->page,
+				       frag->page_offset,
+				       frag->size, PCI_DMA_TODEVICE);
 		wrb = queue_head_node(txq);
 		wrb_fill(wrb, busaddr, frag->size);
 		be_dws_cpu_to_le(wrb, sizeof(*wrb));
@@ -562,13 +564,15 @@ static void be_set_multicast_list(struct net_device *netdev)
 		be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
 	}
 
-	if (netdev->flags & IFF_ALLMULTI) {
-		be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0);
+	/* Enable multicast promisc if num configured exceeds what we support */
+	if (netdev->flags & IFF_ALLMULTI || netdev->mc_count > BE_MAX_MC) {
+		be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0,
+				&adapter->mc_cmd_mem);
 		goto done;
 	}
 
 	be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list,
-		netdev->mc_count);
+		netdev->mc_count, &adapter->mc_cmd_mem);
 done:
 	return;
 }
@@ -758,7 +762,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
 	if ((adapter->cap == 0x400) && !vtm)
 		vlanf = 0;
 
-	skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
+	skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
 	if (!skb) {
 		if (net_ratelimit())
 			dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
@@ -766,8 +770,6 @@ static void be_rx_compl_process(struct be_adapter *adapter,
 		return;
 	}
 
-	skb_reserve(skb, NET_IP_ALIGN);
-
 	skb_fill_rx_data(adapter, skb, rxcp);
 
 	if (do_pkt_csum(rxcp, adapter->rx_csum))
@@ -981,23 +983,41 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
 {
 	struct be_queue_info *txq = &adapter->tx_obj.q;
+	struct be_eth_wrb *wrb;
 	struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
 	struct sk_buff *sent_skb;
+	u64 busaddr;
 	u16 cur_index, num_wrbs = 0;
 
 	cur_index = txq->tail;
 	sent_skb = sent_skbs[cur_index];
 	BUG_ON(!sent_skb);
 	sent_skbs[cur_index] = NULL;
+	wrb = queue_tail_node(txq);
+	be_dws_le_to_cpu(wrb, sizeof(*wrb));
+	busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
+	if (busaddr != 0) {
+		pci_unmap_single(adapter->pdev, busaddr,
+				 wrb->frag_len, PCI_DMA_TODEVICE);
+	}
+	num_wrbs++;
+	queue_tail_inc(txq);
 
-	do {
+	while (cur_index != last_index) {
 		cur_index = txq->tail;
+		wrb = queue_tail_node(txq);
+		be_dws_le_to_cpu(wrb, sizeof(*wrb));
+		busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
+		if (busaddr != 0) {
+			pci_unmap_page(adapter->pdev, busaddr,
+				       wrb->frag_len, PCI_DMA_TODEVICE);
+		}
 		num_wrbs++;
 		queue_tail_inc(txq);
-	} while (cur_index != last_index);
+	}
 
 	atomic_sub(num_wrbs, &txq->used);
-	skb_dma_unmap(&adapter->pdev->dev, sent_skb, DMA_TO_DEVICE);
+
 	kfree_skb(sent_skb);
 }
 
@@ -1377,6 +1397,7 @@ int be_poll_rx(struct napi_struct *napi, int budget)
 	struct be_eth_rx_compl *rxcp;
 	u32 work_done;
 
+	adapter->stats.drvr_stats.be_rx_polls++;
 	for (work_done = 0; work_done < budget; work_done++) {
 		rxcp = be_rx_compl_get(adapter);
 		if (!rxcp)
@@ -1475,6 +1496,14 @@ static void be_worker(struct work_struct *work)
 	schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
 }
 
+static void be_msix_disable(struct be_adapter *adapter)
+{
+	if (adapter->msix_enabled) {
+		pci_disable_msix(adapter->pdev);
+		adapter->msix_enabled = false;
+	}
+}
+
 static void be_msix_enable(struct be_adapter *adapter)
 {
 	int i, status;
@@ -1590,6 +1619,8 @@ static int be_open(struct net_device *netdev)
 	struct be_eq_obj *tx_eq = &adapter->tx_eq;
 	bool link_up;
 	int status;
+	u8 mac_speed;
+	u16 link_speed;
 
 	/* First time posting */
 	be_post_rx_frags(adapter);
@@ -1608,7 +1639,8 @@ static int be_open(struct net_device *netdev)
 	/* Rx compl queue may be in unarmed state; rearm it */
 	be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
 
-	status = be_cmd_link_status_query(adapter, &link_up);
+	status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
+			&link_speed);
 	if (status)
 		goto ret_sts;
 	be_link_status_update(adapter, link_up);
@@ -1627,6 +1659,44 @@ ret_sts:
 	return status;
 }
 
+static int be_setup_wol(struct be_adapter *adapter, bool enable)
+{
+	struct be_dma_mem cmd;
+	int status = 0;
+	u8 mac[ETH_ALEN];
+
+	memset(mac, 0, ETH_ALEN);
+
+	cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
+	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+	if (cmd.va == NULL)
+		return -1;
+	memset(cmd.va, 0, cmd.size);
+
+	if (enable) {
+		status = pci_write_config_dword(adapter->pdev,
+			PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
+		if (status) {
+			dev_err(&adapter->pdev->dev,
+				"Could not enable Wake-on-lan \n");
+			pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
+					cmd.dma);
+			return status;
+		}
+		status = be_cmd_enable_magic_wol(adapter,
+				adapter->netdev->dev_addr, &cmd);
+		pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
+		pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
+	} else {
+		status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
+		pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
+		pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
+	}
+
+	pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+	return status;
+}
+
 static int be_setup(struct be_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
@@ -1658,6 +1728,8 @@ static int be_setup(struct be_adapter *adapter)
 	if (status != 0)
 		goto rx_qs_destroy;
 
+	adapter->link_speed = -1;
+
 	return 0;
 
 rx_qs_destroy:
@@ -1678,6 +1750,8 @@ static int be_clear(struct be_adapter *adapter)
 
 	be_cmd_if_destroy(adapter, adapter->if_handle);
 
+	/* tell fw we're done with firing cmds */
+	be_cmd_fw_clean(adapter);
 	return 0;
 }
 
@@ -1720,6 +1794,31 @@ static int be_close(struct net_device *netdev)
 #define FW_FILE_HDR_SIGN 	"ServerEngines Corp. "
 char flash_cookie[2][16] =	{"*** SE FLAS",
 				"H DIRECTORY *** "};
+
+static bool be_flash_redboot(struct be_adapter *adapter,
+			const u8 *p)
+{
+	u32 crc_offset;
+	u8 flashed_crc[4];
+	int status;
+	crc_offset = FLASH_REDBOOT_START + FLASH_REDBOOT_IMAGE_MAX_SIZE - 4
+			+ sizeof(struct flash_file_hdr) - 32*1024;
+	p += crc_offset;
+	status = be_cmd_get_flash_crc(adapter, flashed_crc);
+	if (status) {
+		dev_err(&adapter->pdev->dev,
+		"could not get crc from flash, not flashing redboot\n");
+		return false;
+	}
+
+	/*update redboot only if crc does not match*/
+	if (!memcmp(flashed_crc, p, 4))
+		return false;
+	else
+		return true;
+
+}
+
 static int be_flash_image(struct be_adapter *adapter,
 			const struct firmware *fw,
 			struct be_dma_mem *flash_cmd, u32 flash_type)
@@ -1759,6 +1858,12 @@ static int be_flash_image(struct be_adapter *adapter,
 		image_offset = FLASH_PXE_BIOS_START;
 		image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
 		break;
+	case FLASHROM_TYPE_REDBOOT:
+		if (!be_flash_redboot(adapter, fw->data))
+			return 0;
+		image_offset = FLASH_REDBOOT_ISM_START;
+		image_size = FLASH_REDBOOT_IMAGE_MAX_SIZE;
+		break;
 	default:
 		return 0;
 	}
@@ -1906,6 +2011,8 @@ static void be_netdev_init(struct net_device *netdev)
 		NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
 		NETIF_F_GRO;
 
+	netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
+
 	netdev->flags |= IFF_MULTICAST;
 
 	adapter->rx_csum = true;
@@ -1977,34 +2084,61 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
 	if (mem->va)
 		pci_free_consistent(adapter->pdev, mem->size,
 			mem->va, mem->dma);
+
+	mem = &adapter->mc_cmd_mem;
+	if (mem->va)
+		pci_free_consistent(adapter->pdev, mem->size,
+			mem->va, mem->dma);
 }
 
 static int be_ctrl_init(struct be_adapter *adapter)
 {
 	struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
 	struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
+	struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
 	int status;
 
 	status = be_map_pci_bars(adapter);
 	if (status)
-		return status;
+		goto done;
 
 	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
 	mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
 				mbox_mem_alloc->size, &mbox_mem_alloc->dma);
 	if (!mbox_mem_alloc->va) {
-		be_unmap_pci_bars(adapter);
-		return -1;
+		status = -ENOMEM;
+		goto unmap_pci_bars;
 	}
+
 	mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
 	mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
 	mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
 	memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
+
+	mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
+	mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
+			&mc_cmd_mem->dma);
+	if (mc_cmd_mem->va == NULL) {
+		status = -ENOMEM;
+		goto free_mbox;
+	}
+	memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
+
 	spin_lock_init(&adapter->mbox_lock);
 	spin_lock_init(&adapter->mcc_lock);
 	spin_lock_init(&adapter->mcc_cq_lock);
 
 	return 0;
+
+free_mbox:
+	pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
+		mbox_mem_alloc->va, mbox_mem_alloc->dma);
+
+unmap_pci_bars:
+	be_unmap_pci_bars(adapter);
+
+done:
+	return status;
 }
 
 static void be_stats_cleanup(struct be_adapter *adapter)
@@ -2032,6 +2166,7 @@ static int be_stats_init(struct be_adapter *adapter)
 static void __devexit be_remove(struct pci_dev *pdev)
 {
 	struct be_adapter *adapter = pci_get_drvdata(pdev);
+
 	if (!adapter)
 		return;
 
@@ -2043,10 +2178,7 @@ static void __devexit be_remove(struct pci_dev *pdev)
 
 	be_ctrl_cleanup(adapter);
 
-	if (adapter->msix_enabled) {
-		pci_disable_msix(adapter->pdev);
-		adapter->msix_enabled = false;
-	}
+	be_msix_disable(adapter);
 
 	pci_set_drvdata(pdev, NULL);
 	pci_release_regions(pdev);
@@ -2055,25 +2187,33 @@ static void __devexit be_remove(struct pci_dev *pdev)
 	free_netdev(adapter->netdev);
 }
 
-static int be_hw_up(struct be_adapter *adapter)
+static int be_get_config(struct be_adapter *adapter)
 {
 	int status;
+	u8 mac[ETH_ALEN];
 
-	status = be_cmd_POST(adapter);
+	status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
 	if (status)
 		return status;
 
-	status = be_cmd_reset_function(adapter);
+	status = be_cmd_query_fw_cfg(adapter,
+				&adapter->port_num, &adapter->cap);
 	if (status)
 		return status;
 
-	status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
+	memset(mac, 0, ETH_ALEN);
+	status = be_cmd_mac_addr_query(adapter, mac,
+			MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
 	if (status)
 		return status;
 
-	status = be_cmd_query_fw_cfg(adapter,
-				&adapter->port_num, &adapter->cap);
-	return status;
+	if (!is_valid_ether_addr(mac))
+		return -EADDRNOTAVAIL;
+
+	memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+	memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+
+	return 0;
 }
 
 static int __devinit be_probe(struct pci_dev *pdev,
@@ -2082,7 +2222,6 @@ static int __devinit be_probe(struct pci_dev *pdev,
 	int status = 0;
 	struct be_adapter *adapter;
 	struct net_device *netdev;
-	u8 mac[ETH_ALEN];
 
 	status = pci_enable_device(pdev);
 	if (status)
@@ -2102,6 +2241,8 @@ static int __devinit be_probe(struct pci_dev *pdev,
 	adapter->pdev = pdev;
 	pci_set_drvdata(pdev, adapter);
 	adapter->netdev = netdev;
+	be_netdev_init(netdev);
+	SET_NETDEV_DEV(netdev, &pdev->dev);
 
 	be_msix_enable(adapter);
 
@@ -2120,27 +2261,34 @@ static int __devinit be_probe(struct pci_dev *pdev,
 	if (status)
 		goto free_netdev;
 
-	status = be_stats_init(adapter);
+	/* sync up with fw's ready state */
+	status = be_cmd_POST(adapter);
 	if (status)
 		goto ctrl_clean;
 
-	status = be_hw_up(adapter);
+	/* tell fw we're ready to fire cmds */
+	status = be_cmd_fw_init(adapter);
 	if (status)
-		goto stats_clean;
+		goto ctrl_clean;
 
-	status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
-			true /* permanent */, 0);
+	status = be_cmd_reset_function(adapter);
+	if (status)
+		goto ctrl_clean;
+
+	status = be_stats_init(adapter);
+	if (status)
+		goto ctrl_clean;
+
+	status = be_get_config(adapter);
 	if (status)
 		goto stats_clean;
-	memcpy(netdev->dev_addr, mac, ETH_ALEN);
 
 	INIT_DELAYED_WORK(&adapter->work, be_worker);
-	be_netdev_init(netdev);
-	SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
 
 	status = be_setup(adapter);
 	if (status)
 		goto stats_clean;
+
 	status = register_netdev(netdev);
 	if (status != 0)
 		goto unsetup;
@@ -2155,7 +2303,9 @@ stats_clean:
 ctrl_clean:
 	be_ctrl_cleanup(adapter);
 free_netdev:
+	be_msix_disable(adapter);
 	free_netdev(adapter->netdev);
+	pci_set_drvdata(pdev, NULL);
 rel_reg:
 	pci_release_regions(pdev);
 disable_dev:
@@ -2170,6 +2320,9 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
 	struct be_adapter *adapter = pci_get_drvdata(pdev);
 	struct net_device *netdev =  adapter->netdev;
 
+	if (adapter->wol)
+		be_setup_wol(adapter, true);
+
 	netif_device_detach(netdev);
 	if (netif_running(netdev)) {
 		rtnl_lock();
@@ -2200,6 +2353,11 @@ static int be_resume(struct pci_dev *pdev)
 	pci_set_power_state(pdev, 0);
 	pci_restore_state(pdev);
 
+	/* tell fw we're ready to fire cmds */
+	status = be_cmd_fw_init(adapter);
+	if (status)
+		return status;
+
 	be_setup(adapter);
 	if (netif_running(netdev)) {
 		rtnl_lock();
@@ -2207,6 +2365,9 @@ static int be_resume(struct pci_dev *pdev)
 		rtnl_unlock();
 	}
 	netif_device_attach(netdev);
+
+	if (adapter->wol)
+		be_setup_wol(adapter, false);
 	return 0;
 }
 
@@ -2221,8 +2382,8 @@ static struct pci_driver be_driver = {
 
 static int __init be_init_module(void)
 {
-	if (rx_frag_size != 8192 && rx_frag_size != 4096
-		&& rx_frag_size != 2048) {
+	if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
+	    rx_frag_size != 2048) {
 		printk(KERN_WARNING DRV_NAME
 			" : Module param rx_frag_size must be 2048/4096/8192."
 			" Using 2048\n");
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 14bd3801f7d3..8ffea3990d07 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -554,8 +554,8 @@ static void adjust_tx_list(void)
 {
 	int timeout_cnt = MAX_TIMEOUT_CNT;
 
-	if (tx_list_head->status.status_word != 0
-	    && current_tx_ptr != tx_list_head) {
+	if (tx_list_head->status.status_word != 0 &&
+	    current_tx_ptr != tx_list_head) {
 		goto adjust_head;	/* released something, just return; */
 	}
 
@@ -567,8 +567,8 @@ static void adjust_tx_list(void)
 	if (current_tx_ptr->next->next == tx_list_head) {
 		while (tx_list_head->status.status_word == 0) {
 			udelay(10);
-			if (tx_list_head->status.status_word != 0
-			    || !(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)) {
+			if (tx_list_head->status.status_word != 0 ||
+			    !(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)) {
 				goto adjust_head;
 			}
 			if (timeout_cnt-- < 0) {
@@ -596,8 +596,8 @@ adjust_head:
 			       ": no sk_buff in a transmitted frame!\n");
 		}
 		tx_list_head = tx_list_head->next;
-	} while (tx_list_head->status.status_word != 0
-		 && current_tx_ptr != tx_list_head);
+	} while (tx_list_head->status.status_word != 0 &&
+		 current_tx_ptr != tx_list_head);
 	return;
 
 }
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 406f06424251..9b587c344194 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -438,8 +438,8 @@ bmac_init_phy(struct net_device *dev)
 
 		ctrl = bmac_mif_read(dev, 0);
 		capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1;
-		if (bmac_mif_read(dev, 4) != capable
-		    || (ctrl & 0x1000) == 0) {
+		if (bmac_mif_read(dev, 4) != capable ||
+		    (ctrl & 0x1000) == 0) {
 			bmac_mif_write(dev, 4, capable);
 			bmac_mif_write(dev, 0, 0x1200);
 		} else
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 08cddb6ff740..4bfc80812926 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -59,8 +59,8 @@
 
 #define DRV_MODULE_NAME		"bnx2"
 #define PFX DRV_MODULE_NAME	": "
-#define DRV_MODULE_VERSION	"2.0.2"
-#define DRV_MODULE_RELDATE	"Aug 21, 2009"
+#define DRV_MODULE_VERSION	"2.0.3"
+#define DRV_MODULE_RELDATE	"Dec 03, 2009"
 #define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-5.0.0.j3.fw"
 #define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
 #define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-5.0.0.j3.fw"
@@ -1466,6 +1466,8 @@ bnx2_enable_forced_2g5(struct bnx2 *bp)
 	} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
 		bmcr |= BCM5708S_BMCR_FORCE_2500;
+	} else {
+		return;
 	}
 
 	if (bp->autoneg & AUTONEG_SPEED) {
@@ -1500,6 +1502,8 @@ bnx2_disable_forced_2g5(struct bnx2 *bp)
 	} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
 		bmcr &= ~BCM5708S_BMCR_FORCE_2500;
+	} else {
+		return;
 	}
 
 	if (bp->autoneg & AUTONEG_SPEED)
@@ -2811,13 +2815,21 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
 			}
 		}
 
-		skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
+		pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
+			skb_headlen(skb), PCI_DMA_TODEVICE);
 
 		tx_buf->skb = NULL;
 		last = tx_buf->nr_frags;
 
 		for (i = 0; i < last; i++) {
 			sw_cons = NEXT_TX_BD(sw_cons);
+
+			pci_unmap_page(bp->pdev,
+				pci_unmap_addr(
+					&txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
+					mapping),
+				skb_shinfo(skb)->frags[i].size,
+				PCI_DMA_TODEVICE);
 		}
 
 		sw_cons = NEXT_TX_BD(sw_cons);
@@ -5146,8 +5158,12 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
 
 	ring_prod = prod = rxr->rx_pg_prod;
 	for (i = 0; i < bp->rx_pg_ring_size; i++) {
-		if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
+		if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
+			printk(KERN_WARNING PFX "%s: init'ed rx page ring %d "
+						"with %d/%d pages only\n",
+			       bp->dev->name, ring_num, i, bp->rx_pg_ring_size);
 			break;
+		}
 		prod = NEXT_RX_BD(prod);
 		ring_prod = RX_PG_RING_IDX(prod);
 	}
@@ -5155,8 +5171,12 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
 
 	ring_prod = prod = rxr->rx_prod;
 	for (i = 0; i < bp->rx_ring_size; i++) {
-		if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
+		if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
+			printk(KERN_WARNING PFX "%s: init'ed rx ring %d with "
+						"%d/%d skbs only\n",
+			       bp->dev->name, ring_num, i, bp->rx_ring_size);
 			break;
+		}
 		prod = NEXT_RX_BD(prod);
 		ring_prod = RX_RING_IDX(prod);
 	}
@@ -5291,17 +5311,29 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
 		for (j = 0; j < TX_DESC_CNT; ) {
 			struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
 			struct sk_buff *skb = tx_buf->skb;
+			int k, last;
 
 			if (skb == NULL) {
 				j++;
 				continue;
 			}
 
-			skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
+			pci_unmap_single(bp->pdev,
+					 pci_unmap_addr(tx_buf, mapping),
+					 skb_headlen(skb),
+					 PCI_DMA_TODEVICE);
 
 			tx_buf->skb = NULL;
 
-			j += skb_shinfo(skb)->nr_frags + 1;
+			last = tx_buf->nr_frags;
+			j++;
+			for (k = 0; k < last; k++, j++) {
+				tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
+				pci_unmap_page(bp->pdev,
+					pci_unmap_addr(tx_buf, mapping),
+					skb_shinfo(skb)->frags[k].size,
+					PCI_DMA_TODEVICE);
+			}
 			dev_kfree_skb(skb);
 		}
 	}
@@ -5680,11 +5712,12 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
 	for (i = 14; i < pkt_size; i++)
 		packet[i] = (unsigned char) (i & 0xff);
 
-	if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
+	map = pci_map_single(bp->pdev, skb->data, pkt_size,
+		PCI_DMA_TODEVICE);
+	if (pci_dma_mapping_error(bp->pdev, map)) {
 		dev_kfree_skb(skb);
 		return -EIO;
 	}
-	map = skb_shinfo(skb)->dma_head;
 
 	REG_WR(bp, BNX2_HC_COMMAND,
 	       bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
@@ -5719,7 +5752,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
 
 	udelay(5);
 
-	skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
+	pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
 	dev_kfree_skb(skb);
 
 	if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
@@ -6238,8 +6271,11 @@ bnx2_reset_task(struct work_struct *work)
 {
 	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
 
-	if (!netif_running(bp->dev))
+	rtnl_lock();
+	if (!netif_running(bp->dev)) {
+		rtnl_unlock();
 		return;
+	}
 
 	bnx2_netif_stop(bp);
 
@@ -6247,6 +6283,28 @@ bnx2_reset_task(struct work_struct *work)
 
 	atomic_set(&bp->intr_sem, 1);
 	bnx2_netif_start(bp);
+	rtnl_unlock();
+}
+
+static void
+bnx2_dump_state(struct bnx2 *bp)
+{
+	struct net_device *dev = bp->dev;
+
+	printk(KERN_ERR PFX "%s DEBUG: intr_sem[%x]\n", dev->name,
+		atomic_read(&bp->intr_sem));
+	printk(KERN_ERR PFX "%s DEBUG: EMAC_TX_STATUS[%08x] "
+			    "RPM_MGMT_PKT_CTRL[%08x]\n", dev->name,
+		REG_RD(bp, BNX2_EMAC_TX_STATUS),
+		REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
+	printk(KERN_ERR PFX "%s DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
+		dev->name, bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0),
+		bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
+	printk(KERN_ERR PFX "%s DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
+		dev->name, REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
+	if (bp->flags & BNX2_FLAG_USING_MSIX)
+		printk(KERN_ERR PFX "%s DEBUG: PBA[%08x]\n", dev->name,
+			REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
 }
 
 static void
@@ -6254,6 +6312,8 @@ bnx2_tx_timeout(struct net_device *dev)
 {
 	struct bnx2 *bp = netdev_priv(dev);
 
+	bnx2_dump_state(bp);
+
 	/* This allows the netif to be shutdown gracefully before resetting */
 	schedule_work(&bp->reset_task);
 }
@@ -6298,7 +6358,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	struct bnx2_napi *bnapi;
 	struct bnx2_tx_ring_info *txr;
 	struct netdev_queue *txq;
-	struct skb_shared_info *sp;
 
 	/*  Determine which tx ring we will be placed on */
 	i = skb_get_queue_mapping(skb);
@@ -6363,16 +6422,15 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	} else
 		mss = 0;
 
-	if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
+	mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+	if (pci_dma_mapping_error(bp->pdev, mapping)) {
 		dev_kfree_skb(skb);
 		return NETDEV_TX_OK;
 	}
 
-	sp = skb_shinfo(skb);
-	mapping = sp->dma_head;
-
 	tx_buf = &txr->tx_buf_ring[ring_prod];
 	tx_buf->skb = skb;
+	pci_unmap_addr_set(tx_buf, mapping, mapping);
 
 	txbd = &txr->tx_desc_ring[ring_prod];
 
@@ -6393,7 +6451,12 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
 		txbd = &txr->tx_desc_ring[ring_prod];
 
 		len = frag->size;
-		mapping = sp->dma_maps[i];
+		mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
+			len, PCI_DMA_TODEVICE);
+		if (pci_dma_mapping_error(bp->pdev, mapping))
+			goto dma_error;
+		pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
+				   mapping);
 
 		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
 		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
@@ -6420,6 +6483,30 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	}
 
 	return NETDEV_TX_OK;
+dma_error:
+	/* save value of frag that failed */
+	last_frag = i;
+
+	/* start back at beginning and unmap skb */
+	prod = txr->tx_prod;
+	ring_prod = TX_RING_IDX(prod);
+	tx_buf = &txr->tx_buf_ring[ring_prod];
+	tx_buf->skb = NULL;
+	pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
+			 skb_headlen(skb), PCI_DMA_TODEVICE);
+
+	/* unmap remaining mapped pages */
+	for (i = 0; i < last_frag; i++) {
+		prod = NEXT_TX_BD(prod);
+		ring_prod = TX_RING_IDX(prod);
+		tx_buf = &txr->tx_buf_ring[ring_prod];
+		pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping),
+			       skb_shinfo(skb)->frags[i].size,
+			       PCI_DMA_TODEVICE);
+	}
+
+	dev_kfree_skb(skb);
+	return NETDEV_TX_OK;
 }
 
 /* Called with rtnl_lock */
@@ -7635,6 +7722,86 @@ bnx2_get_pci_speed(struct bnx2 *bp)
 
 }
 
+static void __devinit
+bnx2_read_vpd_fw_ver(struct bnx2 *bp)
+{
+	int rc, i, v0_len = 0;
+	u8 *data;
+	u8 *v0_str = NULL;
+	bool mn_match = false;
+
+#define BNX2_VPD_NVRAM_OFFSET	0x300
+#define BNX2_VPD_LEN		128
+#define BNX2_MAX_VER_SLEN	30
+
+	data = kmalloc(256, GFP_KERNEL);
+	if (!data)
+		return;
+
+	rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
+			     BNX2_VPD_LEN);
+	if (rc)
+		goto vpd_done;
+
+	for (i = 0; i < BNX2_VPD_LEN; i += 4) {
+		data[i] = data[i + BNX2_VPD_LEN + 3];
+		data[i + 1] = data[i + BNX2_VPD_LEN + 2];
+		data[i + 2] = data[i + BNX2_VPD_LEN + 1];
+		data[i + 3] = data[i + BNX2_VPD_LEN];
+	}
+
+	for (i = 0; i <= BNX2_VPD_LEN - 3; ) {
+		unsigned char val = data[i];
+		unsigned int block_end;
+
+		if (val == 0x82 || val == 0x91) {
+			i = (i + 3 + (data[i + 1] + (data[i + 2] << 8)));
+			continue;
+		}
+
+		if (val != 0x90)
+			goto vpd_done;
+
+		block_end = (i + 3 + (data[i + 1] + (data[i + 2] << 8)));
+		i += 3;
+
+		if (block_end > BNX2_VPD_LEN)
+			goto vpd_done;
+
+		while (i < (block_end - 2)) {
+			int len = data[i + 2];
+
+			if (i + 3 + len > block_end)
+				goto vpd_done;
+
+			if (data[i] == 'M' && data[i + 1] == 'N') {
+				if (len != 4 ||
+				    memcmp(&data[i + 3], "1028", 4))
+					goto vpd_done;
+				mn_match = true;
+
+			} else if (data[i] == 'V' && data[i + 1] == '0') {
+				if (len > BNX2_MAX_VER_SLEN)
+					goto vpd_done;
+
+				v0_len = len;
+				v0_str = &data[i + 3];
+			}
+			i += 3 + len;
+
+			if (mn_match && v0_str) {
+				memcpy(bp->fw_version, v0_str, v0_len);
+				bp->fw_version[v0_len] = ' ';
+				goto vpd_done;
+			}
+		}
+		goto vpd_done;
+	}
+
+vpd_done:
+	kfree(data);
+}
+
 static int __devinit
 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
 {
@@ -7808,10 +7975,18 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
 		goto err_out_unmap;
 	}
 
+	bnx2_read_vpd_fw_ver(bp);
+
+	j = strlen(bp->fw_version);
 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
-	for (i = 0, j = 0; i < 3; i++) {
+	for (i = 0; i < 3 && j < 24; i++) {
 		u8 num, k, skip0;
 
+		if (i == 0) {
+			bp->fw_version[j++] = 'b';
+			bp->fw_version[j++] = 'c';
+			bp->fw_version[j++] = ' ';
+		}
 		num = (u8) (reg >> (24 - (i * 8)));
 		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
 			if (num >= k || !skip0 || k == 1) {
@@ -7842,8 +8017,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
 	    reg != BNX2_CONDITION_MFW_RUN_NONE) {
 		u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
 
-		bp->fw_version[j++] = ' ';
-		for (i = 0; i < 3; i++) {
+		if (j < 32)
+			bp->fw_version[j++] = ' ';
+		for (i = 0; i < 3 && j < 28; i++) {
 			reg = bnx2_reg_rd_ind(bp, addr + i * 4);
 			reg = swab32(reg);
 			memcpy(&bp->fw_version[j], &reg, 4);
@@ -8264,6 +8440,7 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
 	}
 	pci_set_master(pdev);
 	pci_restore_state(pdev);
+	pci_save_state(pdev);
 
 	if (netif_running(dev)) {
 		bnx2_set_power_state(bp, PCI_D0);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index a4d83409f205..939dc44d50a0 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6345,6 +6345,8 @@ struct l2_fhdr {
 
 #define BNX2_MCP_ROM					0x00150000
 #define BNX2_MCP_SCRATCH				0x00160000
+#define BNX2_MCP_STATE_P1				 0x0016f9c8
+#define BNX2_MCP_STATE_P0				 0x0016fdc8
 
 #define BNX2_SHM_HDR_SIGNATURE				BNX2_MCP_SCRATCH
 #define BNX2_SHM_HDR_SIGNATURE_SIG_MASK			 0xffff0000
@@ -6559,6 +6561,7 @@ struct sw_pg {
 
 struct sw_tx_bd {
 	struct sk_buff		*skb;
+	DECLARE_PCI_UNMAP_ADDR(mapping)
 	unsigned short		is_gso;
 	unsigned short		nr_frags;
 };
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index bbf842284ebb..602ab86b6392 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -24,6 +24,10 @@
 #define BCM_VLAN			1
 #endif
 
+#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
+#define BCM_CNIC 1
+#include "cnic_if.h"
+#endif
 
 #define BNX2X_MULTI_QUEUE
 
@@ -255,9 +259,6 @@ struct bnx2x_eth_q_stats {
 struct bnx2x_fastpath {
 
 	struct napi_struct	napi;
-
-	u8			is_rx_queue;
-
 	struct host_status_block *status_blk;
 	dma_addr_t		status_blk_mapping;
 
@@ -762,7 +763,11 @@ struct bnx2x_eth_stats {
 			(offsetof(struct bnx2x_eth_stats, stat_name) / 4)
 
 
+#ifdef BCM_CNIC
+#define MAX_CONTEXT			15
+#else
 #define MAX_CONTEXT			16
+#endif
 
 union cdu_context {
 	struct eth_context eth;
@@ -811,13 +816,21 @@ struct bnx2x {
 	struct bnx2x_fastpath	fp[MAX_CONTEXT];
 	void __iomem		*regview;
 	void __iomem		*doorbells;
+#ifdef BCM_CNIC
+#define BNX2X_DB_SIZE		(18*BCM_PAGE_SIZE)
+#else
 #define BNX2X_DB_SIZE		(16*BCM_PAGE_SIZE)
+#endif
 
 	struct net_device	*dev;
 	struct pci_dev		*pdev;
 
 	atomic_t		intr_sem;
+#ifdef BCM_CNIC
+	struct msix_entry	msix_table[MAX_CONTEXT+2];
+#else
 	struct msix_entry	msix_table[MAX_CONTEXT+1];
+#endif
 #define INT_MODE_INTx			1
 #define INT_MODE_MSI			2
 #define INT_MODE_MSIX			3
@@ -863,8 +876,8 @@ struct bnx2x {
 
 	/* Flags for marking that there is a STAT_QUERY or
 	   SET_MAC ramrod pending */
-	u8			stats_pending;
-	u8			set_mac_pending;
+	int			stats_pending;
+	int			set_mac_pending;
 
 	/* End of fields used in the performance code paths */
 
@@ -884,6 +897,7 @@ struct bnx2x {
 #define BP_NOMCP(bp)			(bp->flags & NO_MCP_FLAG)
 #define HW_VLAN_TX_FLAG			0x400
 #define HW_VLAN_RX_FLAG			0x800
+#define MF_FUNC_DIS			0x1000
 
 	int			func;
 #define BP_PORT(bp)			(bp->func % PORT_MAX)
@@ -891,6 +905,11 @@ struct bnx2x {
 #define BP_E1HVN(bp)			(bp->func >> 1)
 #define BP_L_ID(bp)			(BP_E1HVN(bp) << 2)
 
+#ifdef BCM_CNIC
+#define BCM_CNIC_CID_START		16
+#define BCM_ISCSI_ETH_CL_ID		17
+#endif
+
 	int			pm_cap;
 	int			pcie_cap;
 	int			mrrs;
@@ -944,13 +963,11 @@ struct bnx2x {
 #define BNX2X_STATE_CLOSING_WAIT4_HALT	0x4000
 #define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000
 #define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000
-#define BNX2X_STATE_DISABLED		0xd000
 #define BNX2X_STATE_DIAG		0xe000
 #define BNX2X_STATE_ERROR		0xf000
 
 	int			multi_mode;
-	int			num_rx_queues;
-	int			num_tx_queues;
+	int			num_queues;
 
 	u32			rx_mode;
 #define BNX2X_RX_MODE_NONE		0
@@ -960,28 +977,51 @@ struct bnx2x {
 #define BNX2X_MAX_MULTICAST		64
 #define BNX2X_MAX_EMUL_MULTI		16
 
+	u32 			rx_mode_cl_mask;
+
 	dma_addr_t		def_status_blk_mapping;
 
 	struct bnx2x_slowpath	*slowpath;
 	dma_addr_t		slowpath_mapping;
 
-#ifdef BCM_ISCSI
-	void    		*t1;
-	dma_addr_t      	t1_mapping;
-	void    		*t2;
-	dma_addr_t      	t2_mapping;
-	void    		*timers;
-	dma_addr_t      	timers_mapping;
-	void    		*qm;
-	dma_addr_t      	qm_mapping;
-#endif
-
 	int			dropless_fc;
 
+#ifdef BCM_CNIC
+	u32			cnic_flags;
+#define BNX2X_CNIC_FLAG_MAC_SET		1
+
+	void			*t1;
+	dma_addr_t		t1_mapping;
+	void			*t2;
+	dma_addr_t		t2_mapping;
+	void			*timers;
+	dma_addr_t		timers_mapping;
+	void			*qm;
+	dma_addr_t		qm_mapping;
+	struct cnic_ops		*cnic_ops;
+	void			*cnic_data;
+	u32			cnic_tag;
+	struct cnic_eth_dev	cnic_eth_dev;
+	struct host_status_block *cnic_sb;
+	dma_addr_t		cnic_sb_mapping;
+#define CNIC_SB_ID(bp)			BP_L_ID(bp)
+	struct eth_spe		*cnic_kwq;
+	struct eth_spe		*cnic_kwq_prod;
+	struct eth_spe		*cnic_kwq_cons;
+	struct eth_spe		*cnic_kwq_last;
+	u16			cnic_kwq_pending;
+	u16			cnic_spq_pending;
+	struct mutex		cnic_mutex;
+	u8			iscsi_mac[6];
+#endif
+
 	int			dmae_ready;
 	/* used to synchronize dmae accesses */
 	struct mutex		dmae_mutex;
 
+	/* used to protect the FW mail box */
+	struct mutex		fw_mb_mutex;
+
 	/* used to synchronize stats collecting */
 	int			stats_state;
 	/* used by dmae command loader */
@@ -1030,20 +1070,15 @@ struct bnx2x {
 };
 
 
-#define BNX2X_MAX_QUEUES(bp)	(IS_E1HMF(bp) ? (MAX_CONTEXT/(2 * E1HVN_MAX)) \
-					      : (MAX_CONTEXT/2))
-#define BNX2X_NUM_QUEUES(bp)	(bp->num_rx_queues + bp->num_tx_queues)
-#define is_multi(bp)		(BNX2X_NUM_QUEUES(bp) > 2)
+#define BNX2X_MAX_QUEUES(bp)	(IS_E1HMF(bp) ? (MAX_CONTEXT/E1HVN_MAX) \
+					      : MAX_CONTEXT)
+#define BNX2X_NUM_QUEUES(bp)	(bp->num_queues)
+#define is_multi(bp)		(BNX2X_NUM_QUEUES(bp) > 1)
 
-#define for_each_rx_queue(bp, var) \
-			for (var = 0; var < bp->num_rx_queues; var++)
-#define for_each_tx_queue(bp, var) \
-			for (var = bp->num_rx_queues; \
-			     var < BNX2X_NUM_QUEUES(bp); var++)
 #define for_each_queue(bp, var) \
 			for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++)
 #define for_each_nondefault_queue(bp, var) \
-			for (var = 1; var < bp->num_rx_queues; var++)
+			for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++)
 
 
 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
@@ -1147,7 +1182,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
 #define MAX_SP_DESC_CNT			(SP_DESC_CNT - 1)
 
 
-#define BNX2X_BTR			3
+#define BNX2X_BTR			1
 #define MAX_SPQ_PENDING			8
 
 
diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x_hsi.h
index 8e2261fad485..52585338ada8 100644
--- a/drivers/net/bnx2x_hsi.h
+++ b/drivers/net/bnx2x_hsi.h
@@ -7,6 +7,20 @@
  * the Free Software Foundation.
  */
 
+struct license_key {
+	u32 reserved[6];
+
+#if defined(__BIG_ENDIAN)
+	u16 max_iscsi_init_conn;
+	u16 max_iscsi_trgt_conn;
+#elif defined(__LITTLE_ENDIAN)
+	u16 max_iscsi_trgt_conn;
+	u16 max_iscsi_init_conn;
+#endif
+
+	u32 reserved_a[6];
+};
+
 
 #define PORT_0				0
 #define PORT_1				1
@@ -250,6 +264,7 @@ struct port_hw_cfg {			    /* port 0: 0x12c  port 1: 0x2bc */
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101	    0x00000800
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727	    0x00000900
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC   0x00000a00
+#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823	    0x00000b00
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE	    0x0000fd00
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN	    0x0000ff00
 
@@ -881,7 +896,7 @@ struct shmem_region {			       /*   SharedMem Offset (size) */
 
 	struct shm_dev_info	dev_info;		 /* 0x8     (0x438) */
 
-	u8			reserved[52*PORT_MAX];
+	struct license_key	drv_lic_key[PORT_MAX];	/* 0x440 (52*2=0x68) */
 
 	/* FW information (for internal FW use) */
 	u32			fw_info_fio_offset;    /* 0x4a8       (0x4) */
@@ -1245,8 +1260,8 @@ struct host_func_stats {
 
 
 #define BCM_5710_FW_MAJOR_VERSION			5
-#define BCM_5710_FW_MINOR_VERSION			0
-#define BCM_5710_FW_REVISION_VERSION			21
+#define BCM_5710_FW_MINOR_VERSION			2
+#define BCM_5710_FW_REVISION_VERSION			7
 #define BCM_5710_FW_ENGINEERING_VERSION 		0
 #define BCM_5710_FW_COMPILE_FLAGS			1
 
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index e32d3370862e..cf5778919b4b 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -1107,18 +1107,21 @@ static void bnx2x_set_parallel_detection(struct link_params *params,
 			      MDIO_REG_BANK_SERDES_DIGITAL,
 			      MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
 			      &control2);
-
-
-	control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
-
-
+	if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
+		control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
+	else
+		control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
+	DP(NETIF_MSG_LINK, "params->speed_cap_mask = 0x%x, control2 = 0x%x\n",
+		params->speed_cap_mask, control2);
 	CL45_WR_OVER_CL22(bp, params->port,
 			      params->phy_addr,
 			      MDIO_REG_BANK_SERDES_DIGITAL,
 			      MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
 			      control2);
 
-	if (phy_flags & PHY_XGXS_FLAG) {
+	if ((phy_flags & PHY_XGXS_FLAG) &&
+	     (params->speed_cap_mask &
+		    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
 		DP(NETIF_MSG_LINK, "XGXS\n");
 
 		CL45_WR_OVER_CL22(bp, params->port,
@@ -1225,7 +1228,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
 				      params->phy_addr,
 				      MDIO_REG_BANK_CL73_USERB0,
 				    MDIO_CL73_USERB0_CL73_UCTRL,
-				    MDIO_CL73_USERB0_CL73_UCTRL_USTAT1_MUXSEL);
+				      0xe);
 
 		/* Enable BAM Station Manager*/
 		CL45_WR_OVER_CL22(bp, params->port,
@@ -1236,29 +1239,25 @@ static void bnx2x_set_autoneg(struct link_params *params,
 			MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
 			MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
 
-		/* Merge CL73 and CL37 aneg resolution */
-		CL45_RD_OVER_CL22(bp, params->port,
-				      params->phy_addr,
-				      MDIO_REG_BANK_CL73_USERB0,
-				      MDIO_CL73_USERB0_CL73_BAM_CTRL3,
-				      &reg_val);
-
-		if (params->speed_cap_mask &
-		    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
-			/* Set the CL73 AN speed */
+		/* Advertise CL73 link speeds */
 			CL45_RD_OVER_CL22(bp, params->port,
 					      params->phy_addr,
 					      MDIO_REG_BANK_CL73_IEEEB1,
 					      MDIO_CL73_IEEEB1_AN_ADV2,
 					      &reg_val);
+		if (params->speed_cap_mask &
+		    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+			reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
+		if (params->speed_cap_mask &
+		    PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
+			reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
 
 			CL45_WR_OVER_CL22(bp, params->port,
 					      params->phy_addr,
 					      MDIO_REG_BANK_CL73_IEEEB1,
 					      MDIO_CL73_IEEEB1_AN_ADV2,
-			  reg_val | MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4);
+				      reg_val);
 
-		}
 		/* CL73 Autoneg Enabled */
 		reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
 
@@ -1351,6 +1350,7 @@ static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params)
 
 static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u16 *ieee_fc)
 {
+	struct bnx2x *bp = params->bp;
 	*ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
 	/* resolve pause mode and advertisement
 	 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
@@ -1380,18 +1380,30 @@ static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u16 *ieee_fc)
 		*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
 		break;
 	}
+	DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc);
 }
 
 static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params,
 					   u16 ieee_fc)
 {
 	struct bnx2x *bp = params->bp;
+	u16 val;
 	/* for AN, we are always publishing full duplex */
 
 	CL45_WR_OVER_CL22(bp, params->port,
 			      params->phy_addr,
 			      MDIO_REG_BANK_COMBO_IEEE0,
 			      MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
+	CL45_RD_OVER_CL22(bp, params->port,
+			      params->phy_addr,
+			      MDIO_REG_BANK_CL73_IEEEB1,
+			      MDIO_CL73_IEEEB1_AN_ADV1, &val);
+	val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
+	val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
+	CL45_WR_OVER_CL22(bp, params->port,
+			      params->phy_addr,
+			      MDIO_REG_BANK_CL73_IEEEB1,
+			      MDIO_CL73_IEEEB1_AN_ADV1, val);
 }
 
 static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73)
@@ -1609,6 +1621,39 @@ static u8 bnx2x_ext_phy_resolve_fc(struct link_params *params,
 	return ret;
 }
 
+static u8 bnx2x_direct_parallel_detect_used(struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u16 pd_10g, status2_1000x;
+	CL45_RD_OVER_CL22(bp, params->port,
+			      params->phy_addr,
+			      MDIO_REG_BANK_SERDES_DIGITAL,
+			      MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+			      &status2_1000x);
+	CL45_RD_OVER_CL22(bp, params->port,
+			      params->phy_addr,
+			      MDIO_REG_BANK_SERDES_DIGITAL,
+			      MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+			      &status2_1000x);
+	if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
+		DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
+			 params->port);
+		return 1;
+	}
+
+	CL45_RD_OVER_CL22(bp, params->port,
+			      params->phy_addr,
+			      MDIO_REG_BANK_10G_PARALLEL_DETECT,
+			      MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
+			      &pd_10g);
+
+	if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
+		DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
+			 params->port);
+		return 1;
+	}
+	return 0;
+}
 
 static void bnx2x_flow_ctrl_resolve(struct link_params *params,
 				  struct link_vars *vars,
@@ -1627,21 +1672,53 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
 	    (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
 	    (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
 	     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) {
-		CL45_RD_OVER_CL22(bp, params->port,
-				      params->phy_addr,
-				      MDIO_REG_BANK_COMBO_IEEE0,
-				      MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
-				      &ld_pause);
-		CL45_RD_OVER_CL22(bp, params->port,
-				      params->phy_addr,
-			MDIO_REG_BANK_COMBO_IEEE0,
-			MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
-			&lp_pause);
-		pause_result = (ld_pause &
+		if (bnx2x_direct_parallel_detect_used(params)) {
+			vars->flow_ctrl = params->req_fc_auto_adv;
+			return;
+		}
+		if ((gp_status &
+		    (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
+		     MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) ==
+		    (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
+		     MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
+
+			CL45_RD_OVER_CL22(bp, params->port,
+					      params->phy_addr,
+					      MDIO_REG_BANK_CL73_IEEEB1,
+					      MDIO_CL73_IEEEB1_AN_ADV1,
+					      &ld_pause);
+			CL45_RD_OVER_CL22(bp, params->port,
+					     params->phy_addr,
+					     MDIO_REG_BANK_CL73_IEEEB1,
+					     MDIO_CL73_IEEEB1_AN_LP_ADV1,
+					     &lp_pause);
+			pause_result = (ld_pause &
+					MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK)
+					>> 8;
+			pause_result |= (lp_pause &
+					MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK)
+					>> 10;
+			DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
+				 pause_result);
+		} else {
+
+			CL45_RD_OVER_CL22(bp, params->port,
+					      params->phy_addr,
+					      MDIO_REG_BANK_COMBO_IEEE0,
+					      MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
+					      &ld_pause);
+			CL45_RD_OVER_CL22(bp, params->port,
+			       params->phy_addr,
+			       MDIO_REG_BANK_COMBO_IEEE0,
+			       MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
+			       &lp_pause);
+			pause_result = (ld_pause &
 				MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
-		pause_result |= (lp_pause &
+			pause_result |= (lp_pause &
 				 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
-		DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result);
+			DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n",
+				 pause_result);
+		}
 		bnx2x_pause_resolve(vars, pause_result);
 	} else if ((params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
 		   (bnx2x_ext_phy_resolve_fc(params, vars))) {
@@ -1853,6 +1930,8 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
 		    (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
 		     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
 		    (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
+		     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) ||
+		    (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
 		     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726))) {
 			vars->autoneg = AUTO_NEG_ENABLED;
 
@@ -1987,8 +2066,7 @@ static u8 bnx2x_emac_program(struct link_params *params,
 		    GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
 		    mode);
 
-	bnx2x_set_led(bp, params->port, LED_MODE_OPER,
-		    line_speed, params->hw_led_mode, params->chip_id);
+	bnx2x_set_led(params, LED_MODE_OPER, line_speed);
 	return 0;
 }
 
@@ -2122,6 +2200,8 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
 				       MDIO_PMA_REG_CTRL,
 				       1<<15);
 			break;
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
+			break;
 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
 			DP(NETIF_MSG_LINK, "XGXS PHY Failure detected\n");
 			break;
@@ -2512,16 +2592,11 @@ static void bnx2x_bcm8726_external_rom_boot(struct link_params *params)
 	/* Need to wait 100ms after reset */
 	msleep(100);
 
-	/* Set serial boot control for external load */
-	bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_MISC_CTRL1, 0x0001);
-
 	/* Micro controller re-boot */
 	bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
 		       MDIO_PMA_DEVAD,
 		       MDIO_PMA_REG_GEN_CTRL,
-		       MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+		       0x018B);
 
 	/* Set soft reset */
 	bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
@@ -2529,14 +2604,10 @@ static void bnx2x_bcm8726_external_rom_boot(struct link_params *params)
 		       MDIO_PMA_REG_GEN_CTRL,
 		       MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
 
-	/* Set PLL register value to be same like in P13 ver */
 	bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
 		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_PLL_CTRL,
-		       0x73A0);
+		       MDIO_PMA_REG_MISC_CTRL1, 0x0001);
 
-	/* Clear soft reset.
-	Will automatically reset micro-controller re-boot */
 	bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
 		       MDIO_PMA_DEVAD,
 		       MDIO_PMA_REG_GEN_CTRL,
@@ -3462,8 +3533,8 @@ static void bnx2x_8481_set_10G_led_mode(struct link_params *params,
 		       MDIO_PMA_REG_8481_LINK_SIGNAL,
 		       &val1);
 	/* Set bit 2 to 0, and bits [1:0] to 10 */
-	val1 &= ~((1<<0) | (1<<2)); /* Clear bits 0,2*/
-	val1 |= (1<<1); /* Set bit 1 */
+	val1 &= ~((1<<0) | (1<<2) | (1<<7)); /* Clear bits 0,2,7*/
+	val1 |= ((1<<1) | (1<<6)); /* Set bit 1, 6 */
 
 	bnx2x_cl45_write(bp, params->port,
 		       ext_phy_type,
@@ -3497,36 +3568,19 @@ static void bnx2x_8481_set_10G_led_mode(struct link_params *params,
 		       MDIO_PMA_REG_8481_LED2_MASK,
 		       0);
 
-	/* LED3 (10G/1G/100/10G Activity) */
-	bnx2x_cl45_read(bp, params->port,
-		      ext_phy_type,
-		      ext_phy_addr,
-		      MDIO_PMA_DEVAD,
-		      MDIO_PMA_REG_8481_LINK_SIGNAL,
-		      &val1);
-	/* Enable blink based on source 4(Activity) */
-	val1 &= ~((1<<7) | (1<<8)); /* Clear bits 7,8 */
-	val1 |= (1<<6); /* Set only bit 6 */
+	/* Unmask LED3 for 10G link */
 	bnx2x_cl45_write(bp, params->port,
 		       ext_phy_type,
 		       ext_phy_addr,
 		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_8481_LINK_SIGNAL,
-		       val1);
-
-	bnx2x_cl45_read(bp, params->port,
-		      ext_phy_type,
-		      ext_phy_addr,
-		      MDIO_PMA_DEVAD,
 		      MDIO_PMA_REG_8481_LED3_MASK,
-		      &val1);
-	val1 |= (1<<4); /* Unmask LED3 for 10G link */
+		       0x6);
 	bnx2x_cl45_write(bp, params->port,
 		       ext_phy_type,
 		       ext_phy_addr,
 		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_8481_LED3_MASK,
-		       val1);
+		       MDIO_PMA_REG_8481_LED3_BLINK,
+		       0);
 }
 
 
@@ -3544,7 +3598,10 @@ static void bnx2x_init_internal_phy(struct link_params *params,
 			bnx2x_set_preemphasis(params);
 
 		/* forced speed requested? */
-		if (vars->line_speed != SPEED_AUTO_NEG) {
+		if (vars->line_speed != SPEED_AUTO_NEG ||
+		    ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
+		     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
+			  params->loopback_mode == LOOPBACK_EXT)) {
 			DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
 
 			/* disable autoneg */
@@ -3693,19 +3750,6 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
 				}
 			}
 			/* Force speed */
-			/* First enable LASI */
-			bnx2x_cl45_write(bp, params->port,
-				       ext_phy_type,
-				       ext_phy_addr,
-				       MDIO_PMA_DEVAD,
-				       MDIO_PMA_REG_RX_ALARM_CTRL,
-				       0x0400);
-			bnx2x_cl45_write(bp, params->port,
-				       ext_phy_type,
-				       ext_phy_addr,
-				       MDIO_PMA_DEVAD,
-				       MDIO_PMA_REG_LASI_CTRL, 0x0004);
-
 			if (params->req_line_speed == SPEED_10000) {
 				DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n");
 
@@ -3715,6 +3759,9 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
 					       MDIO_PMA_DEVAD,
 					       MDIO_PMA_REG_DIGITAL_CTRL,
 					       0x400);
+				bnx2x_cl45_write(bp, params->port, ext_phy_type,
+					       ext_phy_addr, MDIO_PMA_DEVAD,
+					       MDIO_PMA_REG_LASI_CTRL, 1);
 			} else {
 				/* Force 1Gbps using autoneg with 1G
 				advertisment */
@@ -3756,6 +3803,17 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
 					       MDIO_AN_DEVAD,
 					       MDIO_AN_REG_CTRL,
 					       0x1200);
+				bnx2x_cl45_write(bp, params->port,
+					       ext_phy_type,
+					       ext_phy_addr,
+					       MDIO_PMA_DEVAD,
+					       MDIO_PMA_REG_RX_ALARM_CTRL,
+					       0x0400);
+				bnx2x_cl45_write(bp, params->port,
+					       ext_phy_type,
+					       ext_phy_addr,
+					       MDIO_PMA_DEVAD,
+					       MDIO_PMA_REG_LASI_CTRL, 0x0004);
 
 			}
 			bnx2x_save_bcm_spirom_ver(bp, params->port,
@@ -4291,6 +4349,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
 			break;
 		}
 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
 			/* This phy uses the NIG latch mechanism since link
 				indication arrives through its LED4 and not via
 				its LASI signal, so we get steady signal
@@ -4298,6 +4357,12 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
 			bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
 				    1 << NIG_LATCH_BC_ENABLE_MI_INT);
 
+			bnx2x_cl45_write(bp, params->port,
+				       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
+				       ext_phy_addr,
+				       MDIO_PMA_DEVAD,
+				       MDIO_PMA_REG_CTRL, 0x0000);
+
 			bnx2x_8481_set_led4(params, ext_phy_type, ext_phy_addr);
 			if (params->req_line_speed == SPEED_AUTO_NEG) {
 
@@ -4394,17 +4459,12 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
 				    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
 					DP(NETIF_MSG_LINK, "Advertising 10G\n");
 					/* Restart autoneg for 10G*/
-			bnx2x_cl45_read(bp, params->port,
-				      ext_phy_type,
-				      ext_phy_addr,
-				      MDIO_AN_DEVAD,
-				      MDIO_AN_REG_CTRL, &val);
-			val |= 0x200;
+
 			bnx2x_cl45_write(bp, params->port,
 				       ext_phy_type,
 				       ext_phy_addr,
 				       MDIO_AN_DEVAD,
-				       MDIO_AN_REG_CTRL, val);
+				       MDIO_AN_REG_CTRL, 0x3200);
 				}
 			} else {
 				/* Force speed */
@@ -4657,8 +4717,8 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
 				      0xc809, &val1);
 
 			DP(NETIF_MSG_LINK, "8705 1.c809 val=0x%x\n", val1);
-			ext_phy_link_up = ((rx_sd & 0x1) && (val1 & (1<<9))
-					   && ((val1 & (1<<8)) == 0));
+			ext_phy_link_up = ((rx_sd & 0x1) && (val1 & (1<<9)) &&
+					   ((val1 & (1<<8)) == 0));
 			if (ext_phy_link_up)
 				vars->line_speed = SPEED_10000;
 			break;
@@ -5148,6 +5208,7 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
 			}
 			break;
 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
 			/* Check 10G-BaseT link status */
 			/* Check PMD signal ok */
 			bnx2x_cl45_read(bp, params->port, ext_phy_type,
@@ -5363,8 +5424,10 @@ static void bnx2x_link_int_ack(struct link_params *params,
 		     (NIG_STATUS_XGXS0_LINK10G |
 		      NIG_STATUS_XGXS0_LINK_STATUS |
 		      NIG_STATUS_SERDES0_LINK_STATUS));
-	if (XGXS_EXT_PHY_TYPE(params->ext_phy_config)
-	    == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481) {
+	if ((XGXS_EXT_PHY_TYPE(params->ext_phy_config)
+		== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481) ||
+	(XGXS_EXT_PHY_TYPE(params->ext_phy_config)
+		== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823)) {
 		bnx2x_8481_rearm_latch_signal(bp, port, is_mi_int);
 	}
 	if (vars->phy_link_up) {
@@ -5477,6 +5540,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
 		status = bnx2x_format_ver(spirom_ver, version, len);
 		break;
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
 		spirom_ver = ((spirom_ver & 0xF80) >> 7) << 16 |
 			(spirom_ver & 0x7F);
 		status = bnx2x_format_ver(spirom_ver, version, len);
@@ -5728,13 +5792,15 @@ u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port,
 }
 
 
-u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
-	       u16 hw_led_mode, u32 chip_id)
+u8 bnx2x_set_led(struct link_params *params, u8 mode, u32 speed)
 {
+	u8 port = params->port;
+	u16 hw_led_mode = params->hw_led_mode;
 	u8 rc = 0;
 	u32 tmp;
 	u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
-
+	u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
+	struct bnx2x *bp = params->bp;
 	DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
 	DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
 		 speed, hw_led_mode);
@@ -5749,7 +5815,14 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
 		break;
 
 	case LED_MODE_OPER:
-		REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode);
+		if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
+			REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
+			REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+		} else {
+			REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
+				   hw_led_mode);
+		}
+
 		REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 +
 			   port*4, 0);
 		/* Set blinking rate to ~15.9Hz */
@@ -5761,7 +5834,7 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
 		EMAC_WR(bp, EMAC_REG_EMAC_LED,
 			    (tmp & (~EMAC_LED_OVERRIDE)));
 
-		if (!CHIP_IS_E1H(bp) &&
+		if (CHIP_IS_E1(bp) &&
 		    ((speed == SPEED_2500) ||
 		     (speed == SPEED_1000) ||
 		     (speed == SPEED_100) ||
@@ -5864,6 +5937,7 @@ static u8 bnx2x_link_initialize(struct link_params *params,
 
 	if (non_ext_phy ||
 	    (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
+	    (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) ||
 	    (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) ||
 	    (params->loopback_mode == LOOPBACK_EXT_PHY)) {
 		if (params->req_line_speed == SPEED_AUTO_NEG)
@@ -6030,10 +6104,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
 		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
 			    params->port*4, 0);
 
-		bnx2x_set_led(bp, params->port, LED_MODE_OPER,
-			    vars->line_speed, params->hw_led_mode,
-			    params->chip_id);
-
+		bnx2x_set_led(params, LED_MODE_OPER, vars->line_speed);
 	} else
 	/* No loopback */
 	{
@@ -6091,15 +6162,13 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
 {
 	struct bnx2x *bp = params->bp;
 	u32 ext_phy_config = params->ext_phy_config;
-	u16 hw_led_mode = params->hw_led_mode;
-	u32 chip_id = params->chip_id;
 	u8 port = params->port;
 	u32 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
 	u32 val = REG_RD(bp, params->shmem_base +
 			     offsetof(struct shmem_region, dev_info.
 				      port_feature_config[params->port].
 				      config));
-
+	DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
 	/* disable attentions */
 	vars->link_status = 0;
 	bnx2x_update_mng(params, vars->link_status);
@@ -6127,7 +6196,7 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
 	 * Hold it as vars low
 	 */
 	 /* clear link led */
-	bnx2x_set_led(bp, port, LED_MODE_OFF, 0, hw_led_mode, chip_id);
+	bnx2x_set_led(params, LED_MODE_OFF, 0);
 	if (reset_ext_phy) {
 		switch (ext_phy_type) {
 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
@@ -6163,6 +6232,22 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
 			bnx2x_8726_reset_phy(bp, params->port, ext_phy_addr);
 			break;
 		}
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
+		{
+			u8 ext_phy_addr =
+				XGXS_EXT_PHY_ADDR(params->ext_phy_config);
+			bnx2x_cl45_write(bp, port,
+				       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
+				       ext_phy_addr,
+				       MDIO_AN_DEVAD,
+				       MDIO_AN_REG_CTRL, 0x0000);
+			bnx2x_cl45_write(bp, port,
+				       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
+				       ext_phy_addr,
+				       MDIO_PMA_DEVAD,
+				       MDIO_PMA_REG_CTRL, 1);
+			break;
+		}
 		default:
 			/* HW reset */
 			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
@@ -6198,9 +6283,7 @@ static u8 bnx2x_update_link_down(struct link_params *params,
 	u8 port = params->port;
 
 	DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
-	bnx2x_set_led(bp, port, LED_MODE_OFF,
-		    0, params->hw_led_mode,
-		    params->chip_id);
+	bnx2x_set_led(params, LED_MODE_OFF, 0);
 
 	/* indicate no mac active */
 	vars->mac_type = MAC_TYPE_NONE;
@@ -6237,15 +6320,13 @@ static u8 bnx2x_update_link_up(struct link_params *params,
 	vars->link_status |= LINK_STATUS_LINK_UP;
 	if (link_10g) {
 		bnx2x_bmac_enable(params, vars, 0);
-		bnx2x_set_led(bp, port, LED_MODE_OPER,
-			    SPEED_10000, params->hw_led_mode,
-			    params->chip_id);
-
+		bnx2x_set_led(params, LED_MODE_OPER, SPEED_10000);
 	} else {
-		bnx2x_emac_enable(params, vars, 0);
 		rc = bnx2x_emac_program(params, vars->line_speed,
 				      vars->duplex);
 
+		bnx2x_emac_enable(params, vars, 0);
+
 		/* AN complete? */
 		if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
 			if (!(vars->phy_flags &
@@ -6343,6 +6424,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
 
 	if ((ext_phy_type != PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
 	    (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) &&
+	    (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) &&
 	    (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) &&
 	    (ext_phy_link_up && !vars->phy_link_up))
 		bnx2x_init_internal_phy(params, vars, 0);
@@ -6578,6 +6660,13 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, u32 shmem_base)
 	return 0;
 }
 
+
+static u8 bnx2x_84823_common_init_phy(struct bnx2x *bp, u32 shmem_base)
+{
+	/* HW reset */
+	bnx2x_ext_phy_hw_reset(bp, 1);
+	return 0;
+}
 u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
 {
 	u8 rc = 0;
@@ -6607,7 +6696,9 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
 		/* GPIO1 affects both ports, so there's need to pull
 		it for single port alone */
 		rc = bnx2x_8726_common_init_phy(bp, shmem_base);
-
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
+		rc = bnx2x_84823_common_init_phy(bp, shmem_base);
 		break;
 	default:
 		DP(NETIF_MSG_LINK,
diff --git a/drivers/net/bnx2x_link.h b/drivers/net/bnx2x_link.h
index f3e252264e1b..40c2981de8ed 100644
--- a/drivers/net/bnx2x_link.h
+++ b/drivers/net/bnx2x_link.h
@@ -178,8 +178,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
    Basically, the CLC takes care of the led for the link, but in case one needs
    to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
    blink the led, and LED_MODE_OFF to set the led off.*/
-u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
-	       u16 hw_led_mode, u32 chip_id);
+u8 bnx2x_set_led(struct link_params *params, u8 mode, u32 speed);
 #define LED_MODE_OFF	0
 #define LED_MODE_OPER 	2
 
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 20f0ed956df2..77ba13520d87 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -49,6 +49,7 @@
 #include <linux/prefetch.h>
 #include <linux/zlib.h>
 #include <linux/io.h>
+#include <linux/stringify.h>
 
 
 #include "bnx2x.h"
@@ -56,15 +57,20 @@
 #include "bnx2x_init_ops.h"
 #include "bnx2x_dump.h"
 
-#define DRV_MODULE_VERSION	"1.52.1"
-#define DRV_MODULE_RELDATE	"2009/08/12"
+#define DRV_MODULE_VERSION	"1.52.1-5"
+#define DRV_MODULE_RELDATE	"2009/11/09"
 #define BNX2X_BC_VER		0x040200
 
 #include <linux/firmware.h>
 #include "bnx2x_fw_file_hdr.h"
 /* FW files */
-#define FW_FILE_PREFIX_E1	"bnx2x-e1-"
-#define FW_FILE_PREFIX_E1H	"bnx2x-e1h-"
+#define FW_FILE_VERSION					\
+	__stringify(BCM_5710_FW_MAJOR_VERSION) "."	\
+	__stringify(BCM_5710_FW_MINOR_VERSION) "."	\
+	__stringify(BCM_5710_FW_REVISION_VERSION) "."	\
+	__stringify(BCM_5710_FW_ENGINEERING_VERSION)
+#define FW_FILE_NAME_E1		"bnx2x-e1-" FW_FILE_VERSION ".fw"
+#define FW_FILE_NAME_E1H	"bnx2x-e1h-" FW_FILE_VERSION ".fw"
 
 /* Time in jiffies before concluding the transmitter is hung */
 #define TX_TIMEOUT		(5*HZ)
@@ -77,21 +83,18 @@ MODULE_AUTHOR("Eliezer Tamir");
 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_MODULE_VERSION);
+MODULE_FIRMWARE(FW_FILE_NAME_E1);
+MODULE_FIRMWARE(FW_FILE_NAME_E1H);
 
 static int multi_mode = 1;
 module_param(multi_mode, int, 0);
 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
 			     "(0 Disable; 1 Enable (default))");
 
-static int num_rx_queues;
-module_param(num_rx_queues, int, 0);
-MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
-				" (default is half number of CPUs)");
-
-static int num_tx_queues;
-module_param(num_tx_queues, int, 0);
-MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
-				" (default is half number of CPUs)");
+static int num_queues;
+module_param(num_queues, int, 0);
+MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
+				" (default is as a number of CPUs)");
 
 static int disable_tpa;
 module_param(disable_tpa, int, 0);
@@ -550,7 +553,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
 		  bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
 
 	/* Rx */
-	for_each_rx_queue(bp, i) {
+	for_each_queue(bp, i) {
 		struct bnx2x_fastpath *fp = &bp->fp[i];
 
 		BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
@@ -567,7 +570,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
 	}
 
 	/* Tx */
-	for_each_tx_queue(bp, i) {
+	for_each_queue(bp, i) {
 		struct bnx2x_fastpath *fp = &bp->fp[i];
 
 		BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
@@ -582,7 +585,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
 
 	/* Rings */
 	/* Rx */
-	for_each_rx_queue(bp, i) {
+	for_each_queue(bp, i) {
 		struct bnx2x_fastpath *fp = &bp->fp[i];
 
 		start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
@@ -616,7 +619,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
 	}
 
 	/* Tx */
-	for_each_tx_queue(bp, i) {
+	for_each_queue(bp, i) {
 		struct bnx2x_fastpath *fp = &bp->fp[i];
 
 		start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
@@ -742,6 +745,9 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
 	if (msix) {
 		synchronize_irq(bp->msix_table[0].vector);
 		offset = 1;
+#ifdef BCM_CNIC
+		offset++;
+#endif
 		for_each_queue(bp, i)
 			synchronize_irq(bp->msix_table[i + offset].vector);
 	} else
@@ -781,21 +787,13 @@ static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
 	barrier();
 }
 
-static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
+static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
 {
 	struct host_status_block *fpsb = fp->status_blk;
-	u16 rc = 0;
 
 	barrier(); /* status block is written to by the chip */
-	if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
-		fp->fp_c_idx = fpsb->c_status_block.status_block_index;
-		rc |= 1;
-	}
-	if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
-		fp->fp_u_idx = fpsb->u_status_block.status_block_index;
-		rc |= 2;
-	}
-	return rc;
+	fp->fp_c_idx = fpsb->c_status_block.status_block_index;
+	fp->fp_u_idx = fpsb->u_status_block.status_block_index;
 }
 
 static u16 bnx2x_ack_int(struct bnx2x *bp)
@@ -835,6 +833,9 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 	u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
 	int nbd;
 
+	/* prefetch skb end pointer to speedup dev_kfree_skb() */
+	prefetch(&skb->end);
+
 	DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
 	   idx, tx_buf, skb);
 
@@ -879,7 +880,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 
 	/* release skb */
 	WARN_ON(!skb);
-	dev_kfree_skb_any(skb);
+	dev_kfree_skb(skb);
 	tx_buf->first_bd = 0;
 	tx_buf->skb = NULL;
 
@@ -909,19 +910,28 @@ static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
 	return (s16)(fp->bp->tx_ring_size) - used;
 }
 
-static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
+static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
+{
+	u16 hw_cons;
+
+	/* Tell compiler that status block fields can change */
+	barrier();
+	hw_cons = le16_to_cpu(*fp->tx_cons_sb);
+	return hw_cons != fp->tx_pkt_cons;
+}
+
+static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
 {
 	struct bnx2x *bp = fp->bp;
 	struct netdev_queue *txq;
 	u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
-	int done = 0;
 
 #ifdef BNX2X_STOP_ON_ERROR
 	if (unlikely(bp->panic))
-		return;
+		return -1;
 #endif
 
-	txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
+	txq = netdev_get_tx_queue(bp->dev, fp->index);
 	hw_cons = le16_to_cpu(*fp->tx_cons_sb);
 	sw_cons = fp->tx_pkt_cons;
 
@@ -942,7 +952,6 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
 */
 		bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
 		sw_cons++;
-		done++;
 	}
 
 	fp->tx_pkt_cons = sw_cons;
@@ -964,8 +973,12 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
 		    (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
 			netif_tx_wake_queue(txq);
 	}
+	return 0;
 }
 
+#ifdef BCM_CNIC
+static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
+#endif
 
 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
 			   union eth_rx_cqe *rr_cqe)
@@ -1022,16 +1035,24 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
 		bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
 		break;
 
+#ifdef BCM_CNIC
+	case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
+		DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
+		bnx2x_cnic_cfc_comp(bp, cid);
+		break;
+#endif
 
 	case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
 	case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
 		DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
-		bp->set_mac_pending = 0;
+		bp->set_mac_pending--;
+		smp_wmb();
 		break;
 
 	case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
-	case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
 		DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
+		bp->set_mac_pending--;
+		smp_wmb();
 		break;
 
 	default:
@@ -1539,6 +1560,8 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 		} else {
 			rx_buf = &fp->rx_buf_ring[bd_cons];
 			skb = rx_buf->skb;
+			prefetch(skb);
+			prefetch((u8 *)skb + 256);
 			len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
 			pad = cqe->fast_path_cqe.placement_offset;
 
@@ -1720,27 +1743,13 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
 	if (unlikely(bp->panic))
 		return IRQ_HANDLED;
 #endif
-	/* Handle Rx or Tx according to MSI-X vector */
-	if (fp->is_rx_queue) {
-		prefetch(fp->rx_cons_sb);
-		prefetch(&fp->status_blk->u_status_block.status_block_index);
 
-		napi_schedule(&bnx2x_fp(bp, fp->index, napi));
-
-	} else {
-		prefetch(fp->tx_cons_sb);
-		prefetch(&fp->status_blk->c_status_block.status_block_index);
-
-		bnx2x_update_fpsb_idx(fp);
-		rmb();
-		bnx2x_tx_int(fp);
-
-		/* Re-enable interrupts */
-		bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
-			     le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
-		bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
-			     le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
-	}
+	/* Handle Rx and Tx according to MSI-X vector */
+	prefetch(fp->rx_cons_sb);
+	prefetch(fp->tx_cons_sb);
+	prefetch(&fp->status_blk->u_status_block.status_block_index);
+	prefetch(&fp->status_blk->c_status_block.status_block_index);
+	napi_schedule(&bnx2x_fp(bp, fp->index, napi));
 
 	return IRQ_HANDLED;
 }
@@ -1775,35 +1784,32 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
 
 		mask = 0x2 << fp->sb_id;
 		if (status & mask) {
-			/* Handle Rx or Tx according to SB id */
-			if (fp->is_rx_queue) {
-				prefetch(fp->rx_cons_sb);
-				prefetch(&fp->status_blk->u_status_block.
-							status_block_index);
-
-				napi_schedule(&bnx2x_fp(bp, fp->index, napi));
-
-			} else {
-				prefetch(fp->tx_cons_sb);
-				prefetch(&fp->status_blk->c_status_block.
-							status_block_index);
-
-				bnx2x_update_fpsb_idx(fp);
-				rmb();
-				bnx2x_tx_int(fp);
-
-				/* Re-enable interrupts */
-				bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
-					     le16_to_cpu(fp->fp_u_idx),
-					     IGU_INT_NOP, 1);
-				bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
-					     le16_to_cpu(fp->fp_c_idx),
-					     IGU_INT_ENABLE, 1);
-			}
+			/* Handle Rx and Tx according to SB id */
+			prefetch(fp->rx_cons_sb);
+			prefetch(&fp->status_blk->u_status_block.
+						status_block_index);
+			prefetch(fp->tx_cons_sb);
+			prefetch(&fp->status_blk->c_status_block.
+						status_block_index);
+			napi_schedule(&bnx2x_fp(bp, fp->index, napi));
 			status &= ~mask;
 		}
 	}
 
+#ifdef BCM_CNIC
+	mask = 0x2 << CNIC_SB_ID(bp);
+	if (status & (mask | 0x1)) {
+		struct cnic_ops *c_ops = NULL;
+
+		rcu_read_lock();
+		c_ops = rcu_dereference(bp->cnic_ops);
+		if (c_ops)
+			c_ops->cnic_handler(bp->cnic_data, NULL);
+		rcu_read_unlock();
+
+		status &= ~mask;
+	}
+#endif
 
 	if (unlikely(status & 0x1)) {
 		queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
@@ -2128,18 +2134,30 @@ static void bnx2x_calc_fc_adv(struct bnx2x *bp)
 
 static void bnx2x_link_report(struct bnx2x *bp)
 {
-	if (bp->state == BNX2X_STATE_DISABLED) {
+	if (bp->flags & MF_FUNC_DIS) {
 		netif_carrier_off(bp->dev);
 		printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
 		return;
 	}
 
 	if (bp->link_vars.link_up) {
+		u16 line_speed;
+
 		if (bp->state == BNX2X_STATE_OPEN)
 			netif_carrier_on(bp->dev);
 		printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
 
-		printk("%d Mbps ", bp->link_vars.line_speed);
+		line_speed = bp->link_vars.line_speed;
+		if (IS_E1HMF(bp)) {
+			u16 vn_max_rate;
+
+			vn_max_rate =
+				((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
+				 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
+			if (vn_max_rate < line_speed)
+				line_speed = vn_max_rate;
+		}
+		printk("%d Mbps ", line_speed);
 
 		if (bp->link_vars.duplex == DUPLEX_FULL)
 			printk("full duplex");
@@ -2304,8 +2322,14 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
 	}
 
 	/* ... only if all min rates are zeros - disable fairness */
-	if (all_zero)
-		bp->vn_weight_sum = 0;
+	if (all_zero) {
+		bp->cmng.flags.cmng_enables &=
+					~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
+		DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
+		   "  fairness will be disabled\n");
+	} else
+		bp->cmng.flags.cmng_enables |=
+					CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
 }
 
 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
@@ -2324,17 +2348,14 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
 	} else {
 		vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
 				FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
-		/* If fairness is enabled (not all min rates are zeroes) and
-		   if current min rate is zero - set it to 1.
-		   This is a requirement of the algorithm. */
-		if (bp->vn_weight_sum && (vn_min_rate == 0))
+		/* If min rate is zero - set it to 1 */
+		if (!vn_min_rate)
 			vn_min_rate = DEF_MIN_RATE;
 		vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
 				FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
 	}
-
 	DP(NETIF_MSG_IFUP,
-	   "func %d: vn_min_rate=%d  vn_max_rate=%d  vn_weight_sum=%d\n",
+	   "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
 	   func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
 
 	memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
@@ -2405,8 +2426,7 @@ static void bnx2x_link_attn(struct bnx2x *bp)
 			memset(&(pstats->mac_stx[0]), 0,
 			       sizeof(struct mac_stx));
 		}
-		if ((bp->state == BNX2X_STATE_OPEN) ||
-		    (bp->state == BNX2X_STATE_DISABLED))
+		if (bp->state == BNX2X_STATE_OPEN)
 			bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
 	}
 
@@ -2449,9 +2469,7 @@ static void bnx2x_link_attn(struct bnx2x *bp)
 
 static void bnx2x__link_status_update(struct bnx2x *bp)
 {
-	int func = BP_FUNC(bp);
-
-	if (bp->state != BNX2X_STATE_OPEN)
+	if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
 		return;
 
 	bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
@@ -2461,7 +2479,6 @@ static void bnx2x__link_status_update(struct bnx2x *bp)
 	else
 		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
 
-	bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
 	bnx2x_calc_vn_weight_sum(bp);
 
 	/* indicate link status */
@@ -2501,6 +2518,7 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
 	u32 cnt = 1;
 	u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
 
+	mutex_lock(&bp->fw_mb_mutex);
 	SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
 	DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
 
@@ -2510,8 +2528,8 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
 
 		rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
 
-		/* Give the FW up to 2 second (200*10ms) */
-	} while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
+		/* Give the FW up to 5 second (500*10ms) */
+	} while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
 
 	DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
 	   cnt*delay, rc, seq);
@@ -2525,32 +2543,23 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
 		bnx2x_fw_dump(bp);
 		rc = 0;
 	}
+	mutex_unlock(&bp->fw_mb_mutex);
 
 	return rc;
 }
 
 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
-static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set);
+static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
 static void bnx2x_set_rx_mode(struct net_device *dev);
 
 static void bnx2x_e1h_disable(struct bnx2x *bp)
 {
 	int port = BP_PORT(bp);
-	int i;
-
-	bp->rx_mode = BNX2X_RX_MODE_NONE;
-	bnx2x_set_storm_rx_mode(bp);
 
 	netif_tx_disable(bp->dev);
-	bp->dev->trans_start = jiffies;	/* prevent tx timeout */
 
 	REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
 
-	bnx2x_set_mac_addr_e1h(bp, 0);
-
-	for (i = 0; i < MC_HASH_SIZE; i++)
-		REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
-
 	netif_carrier_off(bp->dev);
 }
 
@@ -2560,13 +2569,13 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
 
 	REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
 
-	bnx2x_set_mac_addr_e1h(bp, 1);
-
 	/* Tx queue should be only reenabled */
 	netif_tx_wake_all_queues(bp->dev);
 
-	/* Initialize the receive filter. */
-	bnx2x_set_rx_mode(bp->dev);
+	/*
+	 * Should not call netif_carrier_on since it will be called if the link
+	 * is up when checking for link state
+	 */
 }
 
 static void bnx2x_update_min_max(struct bnx2x *bp)
@@ -2605,21 +2614,23 @@ static void bnx2x_update_min_max(struct bnx2x *bp)
 
 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
 {
-	int func = BP_FUNC(bp);
-
 	DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
-	bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
 
 	if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
 
+		/*
+		 * This is the only place besides the function initialization
+		 * where the bp->flags can change so it is done without any
+		 * locks
+		 */
 		if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
 			DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
-			bp->state = BNX2X_STATE_DISABLED;
+			bp->flags |= MF_FUNC_DIS;
 
 			bnx2x_e1h_disable(bp);
 		} else {
 			DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
-			bp->state = BNX2X_STATE_OPEN;
+			bp->flags &= ~MF_FUNC_DIS;
 
 			bnx2x_e1h_enable(bp);
 		}
@@ -2638,11 +2649,40 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
 		bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
 }
 
+/* must be called under the spq lock */
+static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
+{
+	struct eth_spe *next_spe = bp->spq_prod_bd;
+
+	if (bp->spq_prod_bd == bp->spq_last_bd) {
+		bp->spq_prod_bd = bp->spq;
+		bp->spq_prod_idx = 0;
+		DP(NETIF_MSG_TIMER, "end of spq\n");
+	} else {
+		bp->spq_prod_bd++;
+		bp->spq_prod_idx++;
+	}
+	return next_spe;
+}
+
+/* must be called under the spq lock */
+static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
+{
+	int func = BP_FUNC(bp);
+
+	/* Make sure that BD data is updated before writing the producer */
+	wmb();
+
+	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
+	       bp->spq_prod_idx);
+	mmiowb();
+}
+
 /* the slow path queue is odd since completions arrive on the fastpath ring */
 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
 			 u32 data_hi, u32 data_lo, int common)
 {
-	int func = BP_FUNC(bp);
+	struct eth_spe *spe;
 
 	DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
 	   "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
@@ -2664,38 +2704,23 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
 		return -EBUSY;
 	}
 
+	spe = bnx2x_sp_get_next(bp);
+
 	/* CID needs port number to be encoded int it */
-	bp->spq_prod_bd->hdr.conn_and_cmd_data =
+	spe->hdr.conn_and_cmd_data =
 			cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
 				     HW_CID(bp, cid)));
-	bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
+	spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
 	if (common)
-		bp->spq_prod_bd->hdr.type |=
+		spe->hdr.type |=
 			cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
 
-	bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
-	bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
+	spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
+	spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
 
 	bp->spq_left--;
 
-	if (bp->spq_prod_bd == bp->spq_last_bd) {
-		bp->spq_prod_bd = bp->spq;
-		bp->spq_prod_idx = 0;
-		DP(NETIF_MSG_TIMER, "end of spq\n");
-
-	} else {
-		bp->spq_prod_bd++;
-		bp->spq_prod_idx++;
-	}
-
-	/* Make sure that BD data is updated before writing the producer */
-	wmb();
-
-	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
-	       bp->spq_prod_idx);
-
-	mmiowb();
-
+	bnx2x_sp_prod_update(bp);
 	spin_unlock_bh(&bp->spq_lock);
 	return 0;
 }
@@ -3024,6 +3049,8 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
 			int func = BP_FUNC(bp);
 
 			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+			bp->mf_config = SHMEM_RD(bp,
+					   mf_cfg.func_mf_config[func].config);
 			val = SHMEM_RD(bp, func_mb[func].drv_status);
 			if (val & DRV_STATUS_DCC_EVENT_MASK)
 				bnx2x_dcc_event(bp,
@@ -3227,6 +3254,17 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
 		return IRQ_HANDLED;
 #endif
 
+#ifdef BCM_CNIC
+	{
+		struct cnic_ops *c_ops;
+
+		rcu_read_lock();
+		c_ops = rcu_dereference(bp->cnic_ops);
+		if (c_ops)
+			c_ops->cnic_handler(bp->cnic_data, NULL);
+		rcu_read_unlock();
+	}
+#endif
 	queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
 
 	return IRQ_HANDLED;
@@ -3958,7 +3996,7 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
 	estats->no_buff_discard_hi = 0;
 	estats->no_buff_discard_lo = 0;
 
-	for_each_rx_queue(bp, i) {
+	for_each_queue(bp, i) {
 		struct bnx2x_fastpath *fp = &bp->fp[i];
 		int cl_id = fp->cl_id;
 		struct tstorm_per_client_stats *tclient =
@@ -4175,7 +4213,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
 	nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
 
 	nstats->rx_dropped = estats->mac_discard;
-	for_each_rx_queue(bp, i)
+	for_each_queue(bp, i)
 		nstats->rx_dropped +=
 			le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
 
@@ -4229,7 +4267,7 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp)
 	estats->rx_err_discard_pkt = 0;
 	estats->rx_skb_alloc_failed = 0;
 	estats->hw_csum_err = 0;
-	for_each_rx_queue(bp, i) {
+	for_each_queue(bp, i) {
 		struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
 
 		estats->driver_xoff += qstats->driver_xoff;
@@ -4260,7 +4298,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
 
 	if (bp->msglevel & NETIF_MSG_TIMER) {
 		struct bnx2x_fastpath *fp0_rx = bp->fp;
-		struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
+		struct bnx2x_fastpath *fp0_tx = bp->fp;
 		struct tstorm_per_client_stats *old_tclient =
 							&bp->fp->old_tclient;
 		struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
@@ -4640,8 +4678,7 @@ static void bnx2x_timer(unsigned long data)
 		}
 	}
 
-	if ((bp->state == BNX2X_STATE_OPEN) ||
-	    (bp->state == BNX2X_STATE_DISABLED))
+	if (bp->state == BNX2X_STATE_OPEN)
 		bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
 
 timer_restart:
@@ -4860,21 +4897,21 @@ static void bnx2x_update_coalesce(struct bnx2x *bp)
 		REG_WR8(bp, BAR_CSTRORM_INTMEM +
 			CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
 						      U_SB_ETH_RX_CQ_INDEX),
-			bp->rx_ticks/12);
+			bp->rx_ticks/(4 * BNX2X_BTR));
 		REG_WR16(bp, BAR_CSTRORM_INTMEM +
 			 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
 						       U_SB_ETH_RX_CQ_INDEX),
-			 (bp->rx_ticks/12) ? 0 : 1);
+			 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
 
 		/* HC_INDEX_C_ETH_TX_CQ_CONS */
 		REG_WR8(bp, BAR_CSTRORM_INTMEM +
 			CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
 						      C_SB_ETH_TX_CQ_INDEX),
-			bp->tx_ticks/12);
+			bp->tx_ticks/(4 * BNX2X_BTR));
 		REG_WR16(bp, BAR_CSTRORM_INTMEM +
 			 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
 						       C_SB_ETH_TX_CQ_INDEX),
-			 (bp->tx_ticks/12) ? 0 : 1);
+			 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
 	}
 }
 
@@ -4916,7 +4953,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
 
 	if (bp->flags & TPA_ENABLE_FLAG) {
 
-		for_each_rx_queue(bp, j) {
+		for_each_queue(bp, j) {
 			struct bnx2x_fastpath *fp = &bp->fp[j];
 
 			for (i = 0; i < max_agg_queues; i++) {
@@ -4939,16 +4976,13 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
 		}
 	}
 
-	for_each_rx_queue(bp, j) {
+	for_each_queue(bp, j) {
 		struct bnx2x_fastpath *fp = &bp->fp[j];
 
 		fp->rx_bd_cons = 0;
 		fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
 		fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
 
-		/* Mark queue as Rx */
-		fp->is_rx_queue = 1;
-
 		/* "next page" elements initialization */
 		/* SGE ring */
 		for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
@@ -5054,7 +5088,7 @@ static void bnx2x_init_tx_ring(struct bnx2x *bp)
 {
 	int i, j;
 
-	for_each_tx_queue(bp, j) {
+	for_each_queue(bp, j) {
 		struct bnx2x_fastpath *fp = &bp->fp[j];
 
 		for (i = 1; i <= NUM_TX_RINGS; i++) {
@@ -5080,10 +5114,6 @@ static void bnx2x_init_tx_ring(struct bnx2x *bp)
 		fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
 		fp->tx_pkt = 0;
 	}
-
-	/* clean tx statistics */
-	for_each_rx_queue(bp, i)
-		bnx2x_fp(bp, i, tx_pkt) = 0;
 }
 
 static void bnx2x_init_sp_ring(struct bnx2x *bp)
@@ -5112,7 +5142,8 @@ static void bnx2x_init_context(struct bnx2x *bp)
 {
 	int i;
 
-	for_each_rx_queue(bp, i) {
+	/* Rx */
+	for_each_queue(bp, i) {
 		struct eth_context *context = bnx2x_sp(bp, context[i].eth);
 		struct bnx2x_fastpath *fp = &bp->fp[i];
 		u8 cl_id = fp->cl_id;
@@ -5164,10 +5195,11 @@ static void bnx2x_init_context(struct bnx2x *bp)
 					       ETH_CONNECTION_TYPE);
 	}
 
-	for_each_tx_queue(bp, i) {
+	/* Tx */
+	for_each_queue(bp, i) {
 		struct bnx2x_fastpath *fp = &bp->fp[i];
 		struct eth_context *context =
-			bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
+			bnx2x_sp(bp, context[i].eth);
 
 		context->cstorm_st_context.sb_index_number =
 						C_SB_ETH_TX_CQ_INDEX;
@@ -5195,7 +5227,7 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
 	for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
 		REG_WR8(bp, BAR_TSTRORM_INTMEM +
 			TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
-			bp->fp->cl_id + (i % bp->num_rx_queues));
+			bp->fp->cl_id + (i % bp->num_queues));
 }
 
 static void bnx2x_set_client_config(struct bnx2x *bp)
@@ -5235,7 +5267,7 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
 {
 	struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
 	int mode = bp->rx_mode;
-	int mask = (1 << BP_L_ID(bp));
+	int mask = bp->rx_mode_cl_mask;
 	int func = BP_FUNC(bp);
 	int port = BP_PORT(bp);
 	int i;
@@ -5348,6 +5380,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
 	       (*(u32 *)&tstorm_config));
 
 	bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
+	bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
 	bnx2x_set_storm_rx_mode(bp);
 
 	for_each_queue(bp, i) {
@@ -5438,7 +5471,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
 		min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
 			  SGE_PAGE_SIZE * PAGES_PER_SGE),
 		    (u32)0xffff);
-	for_each_rx_queue(bp, i) {
+	for_each_queue(bp, i) {
 		struct bnx2x_fastpath *fp = &bp->fp[i];
 
 		REG_WR(bp, BAR_USTRORM_INTMEM +
@@ -5473,7 +5506,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
 		rx_pause.cqe_thr_high = 350;
 		rx_pause.sge_thr_high = 0;
 
-		for_each_rx_queue(bp, i) {
+		for_each_queue(bp, i) {
 			struct bnx2x_fastpath *fp = &bp->fp[i];
 
 			if (!fp->disable_tpa) {
@@ -5504,20 +5537,18 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
 		bp->link_vars.line_speed = SPEED_10000;
 		bnx2x_init_port_minmax(bp);
 
+		if (!BP_NOMCP(bp))
+			bp->mf_config =
+			      SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
 		bnx2x_calc_vn_weight_sum(bp);
 
 		for (vn = VN_0; vn < E1HVN_MAX; vn++)
 			bnx2x_init_vn_minmax(bp, 2*vn + port);
 
 		/* Enable rate shaping and fairness */
-		bp->cmng.flags.cmng_enables =
+		bp->cmng.flags.cmng_enables |=
 					CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
-		if (bp->vn_weight_sum)
-			bp->cmng.flags.cmng_enables |=
-					CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
-		else
-			DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
-			   "  fairness will be disabled\n");
+
 	} else {
 		/* rate shaping and fairness are disabled */
 		DP(NETIF_MSG_IFUP,
@@ -5565,10 +5596,11 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
 		fp->state = BNX2X_FP_STATE_CLOSED;
 		fp->index = i;
 		fp->cl_id = BP_L_ID(bp) + i;
+#ifdef BCM_CNIC
+		fp->sb_id = fp->cl_id + 1;
+#else
 		fp->sb_id = fp->cl_id;
-		/* Suitable Rx and Tx SBs are served by the same client */
-		if (i >= bp->num_rx_queues)
-			fp->cl_id -= bp->num_rx_queues;
+#endif
 		DP(NETIF_MSG_IFUP,
 		   "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
 		   i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
@@ -5867,7 +5899,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
 	msleep(50);
 	bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
 	bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
-#ifndef BCM_ISCSI
+#ifndef BCM_CNIC
 	/* set NIC mode */
 	REG_WR(bp, PRS_REG_NIC_MODE, 1);
 #endif
@@ -6006,6 +6038,9 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
 static int bnx2x_init_common(struct bnx2x *bp)
 {
 	u32 val, i;
+#ifdef BCM_CNIC
+	u32 wb_write[2];
+#endif
 
 	DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
 
@@ -6048,7 +6083,7 @@ static int bnx2x_init_common(struct bnx2x *bp)
 #endif
 
 	REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
-#ifdef BCM_ISCSI
+#ifdef BCM_CNIC
 	REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
 	REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
 	REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
@@ -6091,11 +6126,26 @@ static int bnx2x_init_common(struct bnx2x *bp)
 	bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
 
 	bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
+
+#ifdef BCM_CNIC
+	wb_write[0] = 0;
+	wb_write[1] = 0;
+	for (i = 0; i < 64; i++) {
+		REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
+		bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
+
+		if (CHIP_IS_E1H(bp)) {
+			REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
+			bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
+					  wb_write, 2);
+		}
+	}
+#endif
 	/* soft reset pulse */
 	REG_WR(bp, QM_REG_SOFT_RESET, 1);
 	REG_WR(bp, QM_REG_SOFT_RESET, 0);
 
-#ifdef BCM_ISCSI
+#ifdef BCM_CNIC
 	bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
 #endif
 
@@ -6109,8 +6159,10 @@ static int bnx2x_init_common(struct bnx2x *bp)
 	bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
 	bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
 	REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
+#ifndef BCM_CNIC
 	/* set NIC mode */
 	REG_WR(bp, PRS_REG_NIC_MODE, 1);
+#endif
 	if (CHIP_IS_E1H(bp))
 		REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
 
@@ -6145,6 +6197,18 @@ static int bnx2x_init_common(struct bnx2x *bp)
 		/* TODO: replace with something meaningful */
 	}
 	bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
+#ifdef BCM_CNIC
+	REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
+	REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
+	REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
+	REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
+	REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
+	REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
+	REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
+	REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
+	REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
+	REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
+#endif
 	REG_WR(bp, SRC_REG_SOFT_RST, 0);
 
 	if (sizeof(union cdu_context) != 1024)
@@ -6261,38 +6325,14 @@ static int bnx2x_init_port(struct bnx2x *bp)
 	bnx2x_init_block(bp, TCM_BLOCK, init_stage);
 	bnx2x_init_block(bp, UCM_BLOCK, init_stage);
 	bnx2x_init_block(bp, CCM_BLOCK, init_stage);
-#ifdef BCM_ISCSI
-	/* Port0  1
-	 * Port1  385 */
-	i++;
-	wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
-	wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
-	REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
-	REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
-
-	/* Port0  2
-	 * Port1  386 */
-	i++;
-	wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
-	wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
-	REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
-	REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
-
-	/* Port0  3
-	 * Port1  387 */
-	i++;
-	wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
-	wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
-	REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
-	REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
-#endif
 	bnx2x_init_block(bp, XCM_BLOCK, init_stage);
 
-#ifdef BCM_ISCSI
-	REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
-	REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
+#ifdef BCM_CNIC
+	REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
 
 	bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
+	REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
+	REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
 #endif
 	bnx2x_init_block(bp, DQ_BLOCK, init_stage);
 
@@ -6350,18 +6390,8 @@ static int bnx2x_init_port(struct bnx2x *bp)
 	msleep(5);
 	REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
 
-#ifdef BCM_ISCSI
-	/* tell the searcher where the T2 table is */
-	REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
-
-	wb_write[0] = U64_LO(bp->t2_mapping);
-	wb_write[1] = U64_HI(bp->t2_mapping);
-	REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
-	wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
-	wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
-	REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
-
-	REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
+#ifdef BCM_CNIC
+	bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
 #endif
 	bnx2x_init_block(bp, CDU_BLOCK, init_stage);
 	bnx2x_init_block(bp, CFC_BLOCK, init_stage);
@@ -6470,7 +6500,12 @@ static int bnx2x_init_port(struct bnx2x *bp)
 #define PXP_ONE_ILT(x)		(((x) << 10) | x)
 #define PXP_ILT_RANGE(f, l)	(((l) << 10) | f)
 
+#ifdef BCM_CNIC
+#define CNIC_ILT_LINES		127
+#define CNIC_CTX_PER_ILT	16
+#else
 #define CNIC_ILT_LINES		0
+#endif
 
 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
 {
@@ -6509,6 +6544,46 @@ static int bnx2x_init_func(struct bnx2x *bp)
 		REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
 		       PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
 
+#ifdef BCM_CNIC
+	i += 1 + CNIC_ILT_LINES;
+	bnx2x_ilt_wr(bp, i, bp->timers_mapping);
+	if (CHIP_IS_E1(bp))
+		REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
+	else {
+		REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
+		REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
+	}
+
+	i++;
+	bnx2x_ilt_wr(bp, i, bp->qm_mapping);
+	if (CHIP_IS_E1(bp))
+		REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
+	else {
+		REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
+		REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
+	}
+
+	i++;
+	bnx2x_ilt_wr(bp, i, bp->t1_mapping);
+	if (CHIP_IS_E1(bp))
+		REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
+	else {
+		REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
+		REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
+	}
+
+	/* tell the searcher where the T2 table is */
+	REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
+
+	bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
+		    U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
+
+	bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
+		    U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
+		    U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
+
+	REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
+#endif
 
 	if (CHIP_IS_E1H(bp)) {
 		bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
@@ -6593,6 +6668,9 @@ static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
 	bnx2x_zero_def_sb(bp);
 	for_each_queue(bp, i)
 		bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
+#ifdef BCM_CNIC
+	bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
+#endif
 
 init_hw_err:
 	bnx2x_gunzip_end(bp);
@@ -6632,7 +6710,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
 			       sizeof(struct host_status_block));
 	}
 	/* Rx */
-	for_each_rx_queue(bp, i) {
+	for_each_queue(bp, i) {
 
 		/* fastpath rx rings: rx_buf rx_desc rx_comp */
 		BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
@@ -6652,7 +6730,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
 			       BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
 	}
 	/* Tx */
-	for_each_tx_queue(bp, i) {
+	for_each_queue(bp, i) {
 
 		/* fastpath tx rings: tx_buf tx_desc */
 		BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
@@ -6668,11 +6746,13 @@ static void bnx2x_free_mem(struct bnx2x *bp)
 	BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
 		       sizeof(struct bnx2x_slowpath));
 
-#ifdef BCM_ISCSI
+#ifdef BCM_CNIC
 	BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
 	BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
 	BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
 	BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
+	BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
+		       sizeof(struct host_status_block));
 #endif
 	BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
 
@@ -6712,7 +6792,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
 				sizeof(struct host_status_block));
 	}
 	/* Rx */
-	for_each_rx_queue(bp, i) {
+	for_each_queue(bp, i) {
 
 		/* fastpath rx rings: rx_buf rx_desc rx_comp */
 		BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
@@ -6734,7 +6814,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
 				BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
 	}
 	/* Tx */
-	for_each_tx_queue(bp, i) {
+	for_each_queue(bp, i) {
 
 		/* fastpath tx rings: tx_buf tx_desc */
 		BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
@@ -6751,32 +6831,26 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
 	BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
 			sizeof(struct bnx2x_slowpath));
 
-#ifdef BCM_ISCSI
+#ifdef BCM_CNIC
 	BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
 
-	/* Initialize T1 */
-	for (i = 0; i < 64*1024; i += 64) {
-		*(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
-		*(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
-	}
-
 	/* allocate searcher T2 table
 	   we allocate 1/4 of alloc num for T2
 	  (which is not entered into the ILT) */
 	BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
 
-	/* Initialize T2 */
+	/* Initialize T2 (for 1024 connections) */
 	for (i = 0; i < 16*1024; i += 64)
-		* (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
-
-	/* now fixup the last line in the block to point to the next block */
-	*(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
+		*(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
 
-	/* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
+	/* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
 	BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
 
 	/* QM queues (128*MAX_CONN) */
 	BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
+
+	BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
+			sizeof(struct host_status_block));
 #endif
 
 	/* Slow path ring */
@@ -6796,7 +6870,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
 {
 	int i;
 
-	for_each_tx_queue(bp, i) {
+	for_each_queue(bp, i) {
 		struct bnx2x_fastpath *fp = &bp->fp[i];
 
 		u16 bd_cons = fp->tx_bd_cons;
@@ -6814,7 +6888,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
 {
 	int i, j;
 
-	for_each_rx_queue(bp, j) {
+	for_each_queue(bp, j) {
 		struct bnx2x_fastpath *fp = &bp->fp[j];
 
 		for (i = 0; i < NUM_RX_BD; i++) {
@@ -6852,6 +6926,9 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp)
 	DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
 	   bp->msix_table[0].vector);
 
+#ifdef BCM_CNIC
+	offset++;
+#endif
 	for_each_queue(bp, i) {
 		DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
 		   "state %x\n", i, bp->msix_table[i + offset].vector,
@@ -6885,6 +6962,12 @@ static int bnx2x_enable_msix(struct bnx2x *bp)
 	bp->msix_table[0].entry = igu_vec;
 	DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
 
+#ifdef BCM_CNIC
+	igu_vec = BP_L_ID(bp) + offset;
+	bp->msix_table[1].entry = igu_vec;
+	DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
+	offset++;
+#endif
 	for_each_queue(bp, i) {
 		igu_vec = BP_L_ID(bp) + offset + i;
 		bp->msix_table[i + offset].entry = igu_vec;
@@ -6915,14 +6998,13 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
 		return -EBUSY;
 	}
 
+#ifdef BCM_CNIC
+	offset++;
+#endif
 	for_each_queue(bp, i) {
 		struct bnx2x_fastpath *fp = &bp->fp[i];
-
-		if (i < bp->num_rx_queues)
-			sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
-		else
-			sprintf(fp->name, "%s-tx-%d",
-				bp->dev->name, i - bp->num_rx_queues);
+		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
+			 bp->dev->name, i);
 
 		rc = request_irq(bp->msix_table[i + offset].vector,
 				 bnx2x_msix_fp_int, 0, fp->name, fp);
@@ -6981,7 +7063,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp)
 {
 	int i;
 
-	for_each_rx_queue(bp, i)
+	for_each_queue(bp, i)
 		napi_enable(&bnx2x_fp(bp, i, napi));
 }
 
@@ -6989,7 +7071,7 @@ static void bnx2x_napi_disable(struct bnx2x *bp)
 {
 	int i;
 
-	for_each_rx_queue(bp, i)
+	for_each_queue(bp, i)
 		napi_disable(&bnx2x_fp(bp, i, napi));
 }
 
@@ -7015,14 +7097,25 @@ static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
 	bnx2x_int_disable_sync(bp, disable_hw);
 	bnx2x_napi_disable(bp);
 	netif_tx_disable(bp->dev);
-	bp->dev->trans_start = jiffies;	/* prevent tx timeout */
 }
 
 /*
  * Init service functions
  */
 
-static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
+/**
+ * Sets a MAC in a CAM for a few L2 Clients for E1 chip
+ *
+ * @param bp driver descriptor
+ * @param set set or clear an entry (1 or 0)
+ * @param mac pointer to a buffer containing a MAC
+ * @param cl_bit_vec bit vector of clients to register a MAC for
+ * @param cam_offset offset in a CAM to use
+ * @param with_bcast set broadcast MAC as well
+ */
+static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
+				      u32 cl_bit_vec, u8 cam_offset,
+				      u8 with_bcast)
 {
 	struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
 	int port = BP_PORT(bp);
@@ -7031,25 +7124,25 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
 	 * unicasts 0-31:port0 32-63:port1
 	 * multicast 64-127:port0 128-191:port1
 	 */
-	config->hdr.length = 2;
-	config->hdr.offset = port ? 32 : 0;
-	config->hdr.client_id = bp->fp->cl_id;
+	config->hdr.length = 1 + (with_bcast ? 1 : 0);
+	config->hdr.offset = cam_offset;
+	config->hdr.client_id = 0xff;
 	config->hdr.reserved1 = 0;
 
 	/* primary MAC */
 	config->config_table[0].cam_entry.msb_mac_addr =
-					swab16(*(u16 *)&bp->dev->dev_addr[0]);
+					swab16(*(u16 *)&mac[0]);
 	config->config_table[0].cam_entry.middle_mac_addr =
-					swab16(*(u16 *)&bp->dev->dev_addr[2]);
+					swab16(*(u16 *)&mac[2]);
 	config->config_table[0].cam_entry.lsb_mac_addr =
-					swab16(*(u16 *)&bp->dev->dev_addr[4]);
+					swab16(*(u16 *)&mac[4]);
 	config->config_table[0].cam_entry.flags = cpu_to_le16(port);
 	if (set)
 		config->config_table[0].target_table_entry.flags = 0;
 	else
 		CAM_INVALIDATE(config->config_table[0]);
 	config->config_table[0].target_table_entry.clients_bit_vector =
-						cpu_to_le32(1 << BP_L_ID(bp));
+						cpu_to_le32(cl_bit_vec);
 	config->config_table[0].target_table_entry.vlan_id = 0;
 
 	DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
@@ -7059,47 +7152,58 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
 	   config->config_table[0].cam_entry.lsb_mac_addr);
 
 	/* broadcast */
-	config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
-	config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
-	config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
-	config->config_table[1].cam_entry.flags = cpu_to_le16(port);
-	if (set)
-		config->config_table[1].target_table_entry.flags =
-				TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
-	else
-		CAM_INVALIDATE(config->config_table[1]);
-	config->config_table[1].target_table_entry.clients_bit_vector =
-						cpu_to_le32(1 << BP_L_ID(bp));
-	config->config_table[1].target_table_entry.vlan_id = 0;
+	if (with_bcast) {
+		config->config_table[1].cam_entry.msb_mac_addr =
+			cpu_to_le16(0xffff);
+		config->config_table[1].cam_entry.middle_mac_addr =
+			cpu_to_le16(0xffff);
+		config->config_table[1].cam_entry.lsb_mac_addr =
+			cpu_to_le16(0xffff);
+		config->config_table[1].cam_entry.flags = cpu_to_le16(port);
+		if (set)
+			config->config_table[1].target_table_entry.flags =
+					TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
+		else
+			CAM_INVALIDATE(config->config_table[1]);
+		config->config_table[1].target_table_entry.clients_bit_vector =
+							cpu_to_le32(cl_bit_vec);
+		config->config_table[1].target_table_entry.vlan_id = 0;
+	}
 
 	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
 		      U64_HI(bnx2x_sp_mapping(bp, mac_config)),
 		      U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
 }
 
-static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
+/**
+ * Sets a MAC in a CAM for a few L2 Clients for E1H chip
+ *
+ * @param bp driver descriptor
+ * @param set set or clear an entry (1 or 0)
+ * @param mac pointer to a buffer containing a MAC
+ * @param cl_bit_vec bit vector of clients to register a MAC for
+ * @param cam_offset offset in a CAM to use
+ */
+static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
+				       u32 cl_bit_vec, u8 cam_offset)
 {
 	struct mac_configuration_cmd_e1h *config =
 		(struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
 
-	/* CAM allocation for E1H
-	 * unicasts: by func number
-	 * multicast: 20+FUNC*20, 20 each
-	 */
 	config->hdr.length = 1;
-	config->hdr.offset = BP_FUNC(bp);
-	config->hdr.client_id = bp->fp->cl_id;
+	config->hdr.offset = cam_offset;
+	config->hdr.client_id = 0xff;
 	config->hdr.reserved1 = 0;
 
 	/* primary MAC */
 	config->config_table[0].msb_mac_addr =
-					swab16(*(u16 *)&bp->dev->dev_addr[0]);
+					swab16(*(u16 *)&mac[0]);
 	config->config_table[0].middle_mac_addr =
-					swab16(*(u16 *)&bp->dev->dev_addr[2]);
+					swab16(*(u16 *)&mac[2]);
 	config->config_table[0].lsb_mac_addr =
-					swab16(*(u16 *)&bp->dev->dev_addr[4]);
+					swab16(*(u16 *)&mac[4]);
 	config->config_table[0].clients_bit_vector =
-					cpu_to_le32(1 << BP_L_ID(bp));
+					cpu_to_le32(cl_bit_vec);
 	config->config_table[0].vlan_id = 0;
 	config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
 	if (set)
@@ -7108,11 +7212,11 @@ static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
 		config->config_table[0].flags =
 				MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
 
-	DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
+	DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
 	   (set ? "setting" : "clearing"),
 	   config->config_table[0].msb_mac_addr,
 	   config->config_table[0].middle_mac_addr,
-	   config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
+	   config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
 
 	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
 		      U64_HI(bnx2x_sp_mapping(bp, mac_config)),
@@ -7164,6 +7268,69 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
 	return -EBUSY;
 }
 
+static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
+{
+	bp->set_mac_pending++;
+	smp_wmb();
+
+	bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
+				   (1 << bp->fp->cl_id), BP_FUNC(bp));
+
+	/* Wait for a completion */
+	bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
+}
+
+static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
+{
+	bp->set_mac_pending++;
+	smp_wmb();
+
+	bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
+				  (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
+				  1);
+
+	/* Wait for a completion */
+	bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
+}
+
+#ifdef BCM_CNIC
+/**
+ * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
+ * MAC(s). This function will wait until the ramdord completion
+ * returns.
+ *
+ * @param bp driver handle
+ * @param set set or clear the CAM entry
+ *
+ * @return 0 if cussess, -ENODEV if ramrod doesn't return.
+ */
+static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
+{
+	u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
+
+	bp->set_mac_pending++;
+	smp_wmb();
+
+	/* Send a SET_MAC ramrod */
+	if (CHIP_IS_E1(bp))
+		bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
+				  cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
+				  1);
+	else
+		/* CAM allocation for E1H
+		* unicasts: by func number
+		* multicast: 20+FUNC*20, 20 each
+		*/
+		bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
+				   cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
+
+	/* Wait for a completion when setting */
+	bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
+
+	return 0;
+}
+#endif
+
 static int bnx2x_setup_leading(struct bnx2x *bp)
 {
 	int rc;
@@ -7199,96 +7366,67 @@ static int bnx2x_setup_multi(struct bnx2x *bp, int index)
 
 static int bnx2x_poll(struct napi_struct *napi, int budget);
 
-static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
-				    int *num_tx_queues_out)
+static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
 {
-	int _num_rx_queues = 0, _num_tx_queues = 0;
 
 	switch (bp->multi_mode) {
 	case ETH_RSS_MODE_DISABLED:
-		_num_rx_queues = 1;
-		_num_tx_queues = 1;
+		bp->num_queues = 1;
 		break;
 
 	case ETH_RSS_MODE_REGULAR:
-		if (num_rx_queues)
-			_num_rx_queues = min_t(u32, num_rx_queues,
-					       BNX2X_MAX_QUEUES(bp));
+		if (num_queues)
+			bp->num_queues = min_t(u32, num_queues,
+						  BNX2X_MAX_QUEUES(bp));
 		else
-			_num_rx_queues = min_t(u32, num_online_cpus(),
-					       BNX2X_MAX_QUEUES(bp));
-
-		if (num_tx_queues)
-			_num_tx_queues = min_t(u32, num_tx_queues,
-					       BNX2X_MAX_QUEUES(bp));
-		else
-			_num_tx_queues = min_t(u32, num_online_cpus(),
-					       BNX2X_MAX_QUEUES(bp));
-
-		/* There must be not more Tx queues than Rx queues */
-		if (_num_tx_queues > _num_rx_queues) {
-			BNX2X_ERR("number of tx queues (%d) > "
-				  "number of rx queues (%d)"
-				  "  defaulting to %d\n",
-				  _num_tx_queues, _num_rx_queues,
-				  _num_rx_queues);
-			_num_tx_queues = _num_rx_queues;
-		}
+			bp->num_queues = min_t(u32, num_online_cpus(),
+						  BNX2X_MAX_QUEUES(bp));
 		break;
 
 
 	default:
-		_num_rx_queues = 1;
-		_num_tx_queues = 1;
+		bp->num_queues = 1;
 		break;
 	}
-
-	*num_rx_queues_out = _num_rx_queues;
-	*num_tx_queues_out = _num_tx_queues;
 }
 
-static int bnx2x_set_int_mode(struct bnx2x *bp)
+static int bnx2x_set_num_queues(struct bnx2x *bp)
 {
 	int rc = 0;
 
 	switch (int_mode) {
 	case INT_MODE_INTx:
 	case INT_MODE_MSI:
-		bp->num_rx_queues = 1;
-		bp->num_tx_queues = 1;
+		bp->num_queues = 1;
 		DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
 		break;
 
 	case INT_MODE_MSIX:
 	default:
-		/* Set interrupt mode according to bp->multi_mode value */
-		bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
-					&bp->num_tx_queues);
+		/* Set number of queues according to bp->multi_mode value */
+		bnx2x_set_num_queues_msix(bp);
 
-		DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
-		   bp->num_rx_queues, bp->num_tx_queues);
+		DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
+		   bp->num_queues);
 
 		/* if we can't use MSI-X we only need one fp,
 		 * so try to enable MSI-X with the requested number of fp's
 		 * and fallback to MSI or legacy INTx with one fp
 		 */
 		rc = bnx2x_enable_msix(bp);
-		if (rc) {
+		if (rc)
 			/* failed to enable MSI-X */
-			if (bp->multi_mode)
-				BNX2X_ERR("Multi requested but failed to "
-					  "enable MSI-X (rx %d tx %d), "
-					  "set number of queues to 1\n",
-					  bp->num_rx_queues, bp->num_tx_queues);
-			bp->num_rx_queues = 1;
-			bp->num_tx_queues = 1;
-		}
+			bp->num_queues = 1;
 		break;
 	}
-	bp->dev->real_num_tx_queues = bp->num_tx_queues;
+	bp->dev->real_num_tx_queues = bp->num_queues;
 	return rc;
 }
 
+#ifdef BCM_CNIC
+static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
+static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
+#endif
 
 /* must be called with rtnl_lock */
 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
@@ -7303,16 +7441,16 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
 
 	bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
 
-	rc = bnx2x_set_int_mode(bp);
+	rc = bnx2x_set_num_queues(bp);
 
 	if (bnx2x_alloc_mem(bp))
 		return -ENOMEM;
 
-	for_each_rx_queue(bp, i)
+	for_each_queue(bp, i)
 		bnx2x_fp(bp, i, disable_tpa) =
 					((bp->flags & TPA_ENABLE_FLAG) == 0);
 
-	for_each_rx_queue(bp, i)
+	for_each_queue(bp, i)
 		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
 			       bnx2x_poll, 128);
 
@@ -7326,7 +7464,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
 		}
 	} else {
 		/* Fall to INTx if failed to enable MSI-X due to lack of
-		   memory (in bnx2x_set_int_mode()) */
+		   memory (in bnx2x_set_num_queues()) */
 		if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
 			bnx2x_enable_msi(bp);
 		bnx2x_ack_int(bp);
@@ -7427,20 +7565,37 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
 	if (CHIP_IS_E1H(bp))
 		if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
 			DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
-			bp->state = BNX2X_STATE_DISABLED;
+			bp->flags |= MF_FUNC_DIS;
 		}
 
 	if (bp->state == BNX2X_STATE_OPEN) {
+#ifdef BCM_CNIC
+		/* Enable Timer scan */
+		REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
+#endif
 		for_each_nondefault_queue(bp, i) {
 			rc = bnx2x_setup_multi(bp, i);
 			if (rc)
+#ifdef BCM_CNIC
+				goto load_error4;
+#else
 				goto load_error3;
+#endif
 		}
 
 		if (CHIP_IS_E1(bp))
-			bnx2x_set_mac_addr_e1(bp, 1);
+			bnx2x_set_eth_mac_addr_e1(bp, 1);
 		else
-			bnx2x_set_mac_addr_e1h(bp, 1);
+			bnx2x_set_eth_mac_addr_e1h(bp, 1);
+#ifdef BCM_CNIC
+		/* Set iSCSI L2 MAC */
+		mutex_lock(&bp->cnic_mutex);
+		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
+			bnx2x_set_iscsi_eth_mac_addr(bp, 1);
+			bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
+		}
+		mutex_unlock(&bp->cnic_mutex);
+#endif
 	}
 
 	if (bp->port.pmf)
@@ -7481,9 +7636,19 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
 	/* start the timer */
 	mod_timer(&bp->timer, jiffies + bp->current_interval);
 
+#ifdef BCM_CNIC
+	bnx2x_setup_cnic_irq_info(bp);
+	if (bp->state == BNX2X_STATE_OPEN)
+		bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
+#endif
 
 	return 0;
 
+#ifdef BCM_CNIC
+load_error4:
+	/* Disable Timer scan */
+	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
+#endif
 load_error3:
 	bnx2x_int_disable_sync(bp, 1);
 	if (!BP_NOMCP(bp)) {
@@ -7493,14 +7658,14 @@ load_error3:
 	bp->port.pmf = 0;
 	/* Free SKBs, SGEs, TPA pool and driver internals */
 	bnx2x_free_skbs(bp);
-	for_each_rx_queue(bp, i)
+	for_each_queue(bp, i)
 		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
 load_error2:
 	/* Release IRQs */
 	bnx2x_free_irq(bp);
 load_error1:
 	bnx2x_napi_disable(bp);
-	for_each_rx_queue(bp, i)
+	for_each_queue(bp, i)
 		netif_napi_del(&bnx2x_fp(bp, i, napi));
 	bnx2x_free_mem(bp);
 
@@ -7591,6 +7756,19 @@ static void bnx2x_reset_func(struct bnx2x *bp)
 	REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
 	REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
 
+#ifdef BCM_CNIC
+	/* Disable Timer scan */
+	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
+	/*
+	 * Wait for at least 10ms and up to 2 second for the timers scan to
+	 * complete
+	 */
+	for (i = 0; i < 200; i++) {
+		msleep(10);
+		if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
+			break;
+	}
+#endif
 	/* Clear ILT */
 	base = FUNC_ILT_BASE(func);
 	for (i = base; i < base + ILT_PER_FUNC; i++)
@@ -7657,6 +7835,9 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
 	u32 reset_code = 0;
 	int i, cnt, rc;
 
+#ifdef BCM_CNIC
+	bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
+#endif
 	bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
 
 	/* Set "drop all" */
@@ -7675,7 +7856,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
 	bnx2x_free_irq(bp);
 
 	/* Wait until tx fastpath tasks complete */
-	for_each_tx_queue(bp, i) {
+	for_each_queue(bp, i) {
 		struct bnx2x_fastpath *fp = &bp->fp[i];
 
 		cnt = 1000;
@@ -7703,7 +7884,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
 		struct mac_configuration_cmd *config =
 						bnx2x_sp(bp, mcast_config);
 
-		bnx2x_set_mac_addr_e1(bp, 0);
+		bnx2x_set_eth_mac_addr_e1(bp, 0);
 
 		for (i = 0; i < config->hdr.length; i++)
 			CAM_INVALIDATE(config->config_table[i]);
@@ -7716,6 +7897,9 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
 		config->hdr.client_id = bp->fp->cl_id;
 		config->hdr.reserved1 = 0;
 
+		bp->set_mac_pending++;
+		smp_wmb();
+
 		bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
 			      U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
 			      U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
@@ -7723,13 +7907,22 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
 	} else { /* E1H */
 		REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
 
-		bnx2x_set_mac_addr_e1h(bp, 0);
+		bnx2x_set_eth_mac_addr_e1h(bp, 0);
 
 		for (i = 0; i < MC_HASH_SIZE; i++)
 			REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
 
 		REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
 	}
+#ifdef BCM_CNIC
+	/* Clear iSCSI L2 MAC */
+	mutex_lock(&bp->cnic_mutex);
+	if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
+		bnx2x_set_iscsi_eth_mac_addr(bp, 0);
+		bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
+	}
+	mutex_unlock(&bp->cnic_mutex);
+#endif
 
 	if (unload_mode == UNLOAD_NORMAL)
 		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
@@ -7806,9 +7999,9 @@ unload_error:
 
 	/* Free SKBs, SGEs, TPA pool and driver internals */
 	bnx2x_free_skbs(bp);
-	for_each_rx_queue(bp, i)
+	for_each_queue(bp, i)
 		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
-	for_each_rx_queue(bp, i)
+	for_each_queue(bp, i)
 		netif_napi_del(&bnx2x_fp(bp, i, napi));
 	bnx2x_free_mem(bp);
 
@@ -8506,6 +8699,14 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
 		       bp->link_params.req_flow_ctrl, bp->port.advertising);
 }
 
+static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
+{
+	mac_hi = cpu_to_be16(mac_hi);
+	mac_lo = cpu_to_be32(mac_lo);
+	memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
+	memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
+}
+
 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
 {
 	int port = BP_PORT(bp);
@@ -8587,14 +8788,15 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
 
 	val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
 	val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
-	bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
-	bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
-	bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
-	bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
-	bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
-	bp->dev->dev_addr[5] = (u8)(val & 0xff);
+	bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
 	memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
 	memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
+
+#ifdef BCM_CNIC
+	val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
+	val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
+	bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
+#endif
 }
 
 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
@@ -8690,6 +8892,10 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
 	smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
 
 	mutex_init(&bp->port.phy_mutex);
+	mutex_init(&bp->fw_mb_mutex);
+#ifdef BCM_CNIC
+	mutex_init(&bp->cnic_mutex);
+#endif
 
 	INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
 	INIT_WORK(&bp->reset_task, bnx2x_reset_task);
@@ -8738,8 +8944,9 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
 
 	bp->rx_csum = 1;
 
-	bp->tx_ticks = 50;
-	bp->rx_ticks = 25;
+	/* make sure that the numbers are in the right granularity */
+	bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
+	bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
 
 	timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
 	bp->current_interval = (poll ? poll : timer_interval);
@@ -8765,20 +8972,23 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 	cmd->supported = bp->port.supported;
 	cmd->advertising = bp->port.advertising;
 
-	if (netif_carrier_ok(dev)) {
+	if ((bp->state == BNX2X_STATE_OPEN) &&
+	    !(bp->flags & MF_FUNC_DIS) &&
+	    (bp->link_vars.link_up)) {
 		cmd->speed = bp->link_vars.line_speed;
 		cmd->duplex = bp->link_vars.duplex;
-	} else {
-		cmd->speed = bp->link_params.req_line_speed;
-		cmd->duplex = bp->link_params.req_duplex;
-	}
-	if (IS_E1HMF(bp)) {
-		u16 vn_max_rate;
+		if (IS_E1HMF(bp)) {
+			u16 vn_max_rate;
 
-		vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
+			vn_max_rate =
+				((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
 				FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
-		if (vn_max_rate < cmd->speed)
-			cmd->speed = vn_max_rate;
+			if (vn_max_rate < cmd->speed)
+				cmd->speed = vn_max_rate;
+		}
+	} else {
+		cmd->speed = -1;
+		cmd->duplex = -1;
 	}
 
 	if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
@@ -9163,6 +9373,9 @@ static u32 bnx2x_get_link(struct net_device *dev)
 {
 	struct bnx2x *bp = netdev_priv(dev);
 
+	if (bp->flags & MF_FUNC_DIS)
+		return 0;
+
 	return bp->link_vars.link_up;
 }
 
@@ -9567,8 +9780,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
 
 	} else if (eeprom->magic == 0x50485952) {
 		/* 'PHYR' (0x50485952): re-init link after FW upgrade */
-		if ((bp->state == BNX2X_STATE_OPEN) ||
-		    (bp->state == BNX2X_STATE_DISABLED)) {
+		if (bp->state == BNX2X_STATE_OPEN) {
 			bnx2x_acquire_phy_lock(bp);
 			rc |= bnx2x_link_reset(&bp->link_params,
 					       &bp->link_vars, 1);
@@ -9818,11 +10030,6 @@ static const struct {
 	{ "idle check (online)" }
 };
 
-static int bnx2x_self_test_count(struct net_device *dev)
-{
-	return BNX2X_NUM_TESTS;
-}
-
 static int bnx2x_test_registers(struct bnx2x *bp)
 {
 	int idx, i, rc = -ENODEV;
@@ -9990,7 +10197,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
 	struct sk_buff *skb;
 	unsigned char *packet;
 	struct bnx2x_fastpath *fp_rx = &bp->fp[0];
-	struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
+	struct bnx2x_fastpath *fp_tx = &bp->fp[0];
 	u16 tx_start_idx, tx_idx;
 	u16 rx_start_idx, rx_idx;
 	u16 pkt_prod, bd_prod;
@@ -10067,13 +10274,12 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
 
 	fp_tx->tx_db.data.prod += 2;
 	barrier();
-	DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
+	DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
 
 	mmiowb();
 
 	num_pkts++;
 	fp_tx->tx_bd_prod += 2; /* start + pbd */
-	bp->dev->trans_start = jiffies;
 
 	udelay(100);
 
@@ -10223,14 +10429,16 @@ static int bnx2x_test_intr(struct bnx2x *bp)
 	config->hdr.client_id = bp->fp->cl_id;
 	config->hdr.reserved1 = 0;
 
+	bp->set_mac_pending++;
+	smp_wmb();
 	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
 			   U64_HI(bnx2x_sp_mapping(bp, mac_config)),
 			   U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
 	if (rc == 0) {
-		bp->set_mac_pending++;
 		for (i = 0; i < 10; i++) {
 			if (!bp->set_mac_pending)
 				break;
+			smp_rmb();
 			msleep_interruptible(10);
 		}
 		if (i == 10)
@@ -10264,7 +10472,7 @@ static void bnx2x_self_test(struct net_device *dev,
 		/* disable input for TX port IF */
 		REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
 
-		link_up = bp->link_vars.link_up;
+		link_up = (bnx2x_link_test(bp) == 0);
 		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
 		bnx2x_nic_load(bp, LOAD_DIAG);
 		/* wait until link state is restored */
@@ -10436,6 +10644,36 @@ static const struct {
 #define IS_E1HMF_MODE_STAT(bp) \
 			(IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
 
+static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int i, num_stats;
+
+	switch(stringset) {
+	case ETH_SS_STATS:
+		if (is_multi(bp)) {
+			num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
+			if (!IS_E1HMF_MODE_STAT(bp))
+				num_stats += BNX2X_NUM_STATS;
+		} else {
+			if (IS_E1HMF_MODE_STAT(bp)) {
+				num_stats = 0;
+				for (i = 0; i < BNX2X_NUM_STATS; i++)
+					if (IS_FUNC_STAT(i))
+						num_stats++;
+			} else
+				num_stats = BNX2X_NUM_STATS;
+		}
+		return num_stats;
+
+	case ETH_SS_TEST:
+		return BNX2X_NUM_TESTS;
+
+	default:
+		return -EINVAL;
+	}
+}
+
 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 {
 	struct bnx2x *bp = netdev_priv(dev);
@@ -10445,7 +10683,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 	case ETH_SS_STATS:
 		if (is_multi(bp)) {
 			k = 0;
-			for_each_rx_queue(bp, i) {
+			for_each_queue(bp, i) {
 				for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
 					sprintf(buf + (k + j)*ETH_GSTRING_LEN,
 						bnx2x_q_stats_arr[j].string, i);
@@ -10473,28 +10711,6 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 	}
 }
 
-static int bnx2x_get_stats_count(struct net_device *dev)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-	int i, num_stats;
-
-	if (is_multi(bp)) {
-		num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
-		if (!IS_E1HMF_MODE_STAT(bp))
-			num_stats += BNX2X_NUM_STATS;
-	} else {
-		if (IS_E1HMF_MODE_STAT(bp)) {
-			num_stats = 0;
-			for (i = 0; i < BNX2X_NUM_STATS; i++)
-				if (IS_FUNC_STAT(i))
-					num_stats++;
-		} else
-			num_stats = BNX2X_NUM_STATS;
-	}
-
-	return num_stats;
-}
-
 static void bnx2x_get_ethtool_stats(struct net_device *dev,
 				    struct ethtool_stats *stats, u64 *buf)
 {
@@ -10504,7 +10720,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
 
 	if (is_multi(bp)) {
 		k = 0;
-		for_each_rx_queue(bp, i) {
+		for_each_queue(bp, i) {
 			hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
 			for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
 				if (bnx2x_q_stats_arr[j].size == 0) {
@@ -10570,7 +10786,6 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
 static int bnx2x_phys_id(struct net_device *dev, u32 data)
 {
 	struct bnx2x *bp = netdev_priv(dev);
-	int port = BP_PORT(bp);
 	int i;
 
 	if (!netif_running(dev))
@@ -10584,13 +10799,10 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
 
 	for (i = 0; i < (data * 2); i++) {
 		if ((i % 2) == 0)
-			bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
-				      bp->link_params.hw_led_mode,
-				      bp->link_params.chip_id);
+			bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
+				      SPEED_1000);
 		else
-			bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
-				      bp->link_params.hw_led_mode,
-				      bp->link_params.chip_id);
+			bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
 
 		msleep_interruptible(500);
 		if (signal_pending(current))
@@ -10598,10 +10810,8 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
 	}
 
 	if (bp->link_vars.link_up)
-		bnx2x_set_led(bp, port, LED_MODE_OPER,
-			      bp->link_vars.line_speed,
-			      bp->link_params.hw_led_mode,
-			      bp->link_params.chip_id);
+		bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
+			      bp->link_vars.line_speed);
 
 	return 0;
 }
@@ -10637,11 +10847,10 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
 	.set_sg			= ethtool_op_set_sg,
 	.get_tso		= ethtool_op_get_tso,
 	.set_tso		= bnx2x_set_tso,
-	.self_test_count	= bnx2x_self_test_count,
 	.self_test		= bnx2x_self_test,
+	.get_sset_count		= bnx2x_get_sset_count,
 	.get_strings		= bnx2x_get_strings,
 	.phys_id		= bnx2x_phys_id,
-	.get_stats_count	= bnx2x_get_stats_count,
 	.get_ethtool_stats	= bnx2x_get_ethtool_stats,
 };
 
@@ -10707,54 +10916,60 @@ static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
 
 static int bnx2x_poll(struct napi_struct *napi, int budget)
 {
+	int work_done = 0;
 	struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
 						 napi);
 	struct bnx2x *bp = fp->bp;
-	int work_done = 0;
 
+	while (1) {
 #ifdef BNX2X_STOP_ON_ERROR
-	if (unlikely(bp->panic))
-		goto poll_panic;
+		if (unlikely(bp->panic)) {
+			napi_complete(napi);
+			return 0;
+		}
 #endif
 
-	prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
-	prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
-
-	bnx2x_update_fpsb_idx(fp);
-
-	if (bnx2x_has_rx_work(fp)) {
-		work_done = bnx2x_rx_int(fp, budget);
+		if (bnx2x_has_tx_work(fp))
+			bnx2x_tx_int(fp);
 
-		/* must not complete if we consumed full budget */
-		if (work_done >= budget)
-			goto poll_again;
-	}
+		if (bnx2x_has_rx_work(fp)) {
+			work_done += bnx2x_rx_int(fp, budget - work_done);
 
-	/* bnx2x_has_rx_work() reads the status block, thus we need to
-	 * ensure that status block indices have been actually read
-	 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
-	 * so that we won't write the "newer" value of the status block to IGU
-	 * (if there was a DMA right after bnx2x_has_rx_work and
-	 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
-	 * may be postponed to right before bnx2x_ack_sb). In this case
-	 * there will never be another interrupt until there is another update
-	 * of the status block, while there is still unhandled work.
-	 */
-	rmb();
+			/* must not complete if we consumed full budget */
+			if (work_done >= budget)
+				break;
+		}
 
-	if (!bnx2x_has_rx_work(fp)) {
-#ifdef BNX2X_STOP_ON_ERROR
-poll_panic:
-#endif
-		napi_complete(napi);
+		/* Fall out from the NAPI loop if needed */
+		if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+			bnx2x_update_fpsb_idx(fp);
+		/* bnx2x_has_rx_work() reads the status block, thus we need
+		 * to ensure that status block indices have been actually read
+		 * (bnx2x_update_fpsb_idx) prior to this check
+		 * (bnx2x_has_rx_work) so that we won't write the "newer"
+		 * value of the status block to IGU (if there was a DMA right
+		 * after bnx2x_has_rx_work and if there is no rmb, the memory
+		 * reading (bnx2x_update_fpsb_idx) may be postponed to right
+		 * before bnx2x_ack_sb). In this case there will never be
+		 * another interrupt until there is another update of the
+		 * status block, while there is still unhandled work.
+		 */
+			rmb();
 
-		bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
-			     le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
-		bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
-			     le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
+			if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+				napi_complete(napi);
+				/* Re-enable interrupts */
+				bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
+					     le16_to_cpu(fp->fp_c_idx),
+					     IGU_INT_NOP, 1);
+				bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
+					     le16_to_cpu(fp->fp_u_idx),
+					     IGU_INT_ENABLE, 1);
+				break;
+			}
+		}
 	}
 
-poll_again:
 	return work_done;
 }
 
@@ -10843,10 +11058,10 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
 	}
 
 	if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
-		rc |= XMIT_GSO_V4;
+		rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
 
 	else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
-		rc |= XMIT_GSO_V6;
+		rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
 
 	return rc;
 }
@@ -10939,7 +11154,7 @@ exit_lbl:
 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct bnx2x *bp = netdev_priv(dev);
-	struct bnx2x_fastpath *fp, *fp_stat;
+	struct bnx2x_fastpath *fp;
 	struct netdev_queue *txq;
 	struct sw_tx_bd *tx_buf;
 	struct eth_tx_start_bd *tx_start_bd;
@@ -10961,11 +11176,10 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	fp_index = skb_get_queue_mapping(skb);
 	txq = netdev_get_tx_queue(dev, fp_index);
 
-	fp = &bp->fp[fp_index + bp->num_rx_queues];
-	fp_stat = &bp->fp[fp_index];
+	fp = &bp->fp[fp_index];
 
 	if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
-		fp_stat->eth_q_stats.driver_xoff++;
+		fp->eth_q_stats.driver_xoff++;
 		netif_tx_stop_queue(txq);
 		BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
 		return NETDEV_TX_BUSY;
@@ -11191,7 +11405,7 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
 	fp->tx_db.data.prod += nbd;
 	barrier();
-	DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
+	DOORBELL(bp, fp->index, fp->tx_db.raw);
 
 	mmiowb();
 
@@ -11202,11 +11416,11 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
 		/* We want bnx2x_tx_int to "see" the updated tx_bd_prod
 		   if we put Tx into XOFF state. */
 		smp_mb();
-		fp_stat->eth_q_stats.driver_xoff++;
+		fp->eth_q_stats.driver_xoff++;
 		if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
 			netif_tx_wake_queue(txq);
 	}
-	fp_stat->tx_pkt++;
+	fp->tx_pkt++;
 
 	return NETDEV_TX_OK;
 }
@@ -11321,6 +11535,9 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
 			config->hdr.client_id = bp->fp->cl_id;
 			config->hdr.reserved1 = 0;
 
+			bp->set_mac_pending++;
+			smp_wmb();
+
 			bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
 				   U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
 				   U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
@@ -11370,9 +11587,9 @@ static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 	if (netif_running(dev)) {
 		if (CHIP_IS_E1(bp))
-			bnx2x_set_mac_addr_e1(bp, 1);
+			bnx2x_set_eth_mac_addr_e1(bp, 1);
 		else
-			bnx2x_set_mac_addr_e1h(bp, 1);
+			bnx2x_set_eth_mac_addr_e1h(bp, 1);
 	}
 
 	return 0;
@@ -11830,21 +12047,14 @@ static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
 
 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
 {
-	char fw_file_name[40] = {0};
+	const char *fw_file_name;
 	struct bnx2x_fw_file_hdr *fw_hdr;
-	int rc, offset;
+	int rc;
 
-	/* Create a FW file name */
 	if (CHIP_IS_E1(bp))
-		offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
+		fw_file_name = FW_FILE_NAME_E1;
 	else
-		offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
-
-	sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
-		BCM_5710_FW_MAJOR_VERSION,
-		BCM_5710_FW_MINOR_VERSION,
-		BCM_5710_FW_REVISION_VERSION,
-		BCM_5710_FW_ENGINEERING_VERSION);
+		fw_file_name = FW_FILE_NAME_E1H;
 
 	printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
 
@@ -12098,9 +12308,9 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
 
 	/* Free SKBs, SGEs, TPA pool and driver internals */
 	bnx2x_free_skbs(bp);
-	for_each_rx_queue(bp, i)
+	for_each_queue(bp, i)
 		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
-	for_each_rx_queue(bp, i)
+	for_each_queue(bp, i)
 		netif_napi_del(&bnx2x_fp(bp, i, napi));
 	bnx2x_free_mem(bp);
 
@@ -12276,4 +12486,287 @@ static void __exit bnx2x_cleanup(void)
 module_init(bnx2x_init);
 module_exit(bnx2x_cleanup);
 
+#ifdef BCM_CNIC
+
+/* count denotes the number of new completions we have seen */
+static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
+{
+	struct eth_spe *spe;
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return;
+#endif
+
+	spin_lock_bh(&bp->spq_lock);
+	bp->cnic_spq_pending -= count;
+
+	for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
+	     bp->cnic_spq_pending++) {
+
+		if (!bp->cnic_kwq_pending)
+			break;
+
+		spe = bnx2x_sp_get_next(bp);
+		*spe = *bp->cnic_kwq_cons;
+
+		bp->cnic_kwq_pending--;
+
+		DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
+		   bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
+
+		if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
+			bp->cnic_kwq_cons = bp->cnic_kwq;
+		else
+			bp->cnic_kwq_cons++;
+	}
+	bnx2x_sp_prod_update(bp);
+	spin_unlock_bh(&bp->spq_lock);
+}
+
+static int bnx2x_cnic_sp_queue(struct net_device *dev,
+			       struct kwqe_16 *kwqes[], u32 count)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int i;
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return -EIO;
+#endif
+
+	spin_lock_bh(&bp->spq_lock);
+
+	for (i = 0; i < count; i++) {
+		struct eth_spe *spe = (struct eth_spe *)kwqes[i];
+
+		if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
+			break;
+
+		*bp->cnic_kwq_prod = *spe;
+
+		bp->cnic_kwq_pending++;
+
+		DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
+		   spe->hdr.conn_and_cmd_data, spe->hdr.type,
+		   spe->data.mac_config_addr.hi,
+		   spe->data.mac_config_addr.lo,
+		   bp->cnic_kwq_pending);
+
+		if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
+			bp->cnic_kwq_prod = bp->cnic_kwq;
+		else
+			bp->cnic_kwq_prod++;
+	}
+
+	spin_unlock_bh(&bp->spq_lock);
+
+	if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
+		bnx2x_cnic_sp_post(bp, 0);
+
+	return i;
+}
+
+static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
+{
+	struct cnic_ops *c_ops;
+	int rc = 0;
+
+	mutex_lock(&bp->cnic_mutex);
+	c_ops = bp->cnic_ops;
+	if (c_ops)
+		rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
+	mutex_unlock(&bp->cnic_mutex);
+
+	return rc;
+}
+
+static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
+{
+	struct cnic_ops *c_ops;
+	int rc = 0;
+
+	rcu_read_lock();
+	c_ops = rcu_dereference(bp->cnic_ops);
+	if (c_ops)
+		rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
+	rcu_read_unlock();
+
+	return rc;
+}
+
+/*
+ * for commands that have no data
+ */
+static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
+{
+	struct cnic_ctl_info ctl = {0};
+
+	ctl.cmd = cmd;
+
+	return bnx2x_cnic_ctl_send(bp, &ctl);
+}
+
+static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
+{
+	struct cnic_ctl_info ctl;
+
+	/* first we tell CNIC and only then we count this as a completion */
+	ctl.cmd = CNIC_CTL_COMPLETION_CMD;
+	ctl.data.comp.cid = cid;
+
+	bnx2x_cnic_ctl_send_bh(bp, &ctl);
+	bnx2x_cnic_sp_post(bp, 1);
+}
+
+static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int rc = 0;
+
+	switch (ctl->cmd) {
+	case DRV_CTL_CTXTBL_WR_CMD: {
+		u32 index = ctl->data.io.offset;
+		dma_addr_t addr = ctl->data.io.dma_addr;
+
+		bnx2x_ilt_wr(bp, index, addr);
+		break;
+	}
+
+	case DRV_CTL_COMPLETION_CMD: {
+		int count = ctl->data.comp.comp_count;
+
+		bnx2x_cnic_sp_post(bp, count);
+		break;
+	}
+
+	/* rtnl_lock is held.  */
+	case DRV_CTL_START_L2_CMD: {
+		u32 cli = ctl->data.ring.client_id;
+
+		bp->rx_mode_cl_mask |= (1 << cli);
+		bnx2x_set_storm_rx_mode(bp);
+		break;
+	}
+
+	/* rtnl_lock is held.  */
+	case DRV_CTL_STOP_L2_CMD: {
+		u32 cli = ctl->data.ring.client_id;
+
+		bp->rx_mode_cl_mask &= ~(1 << cli);
+		bnx2x_set_storm_rx_mode(bp);
+		break;
+	}
+
+	default:
+		BNX2X_ERR("unknown command %x\n", ctl->cmd);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
+{
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	if (bp->flags & USING_MSIX_FLAG) {
+		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
+		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
+		cp->irq_arr[0].vector = bp->msix_table[1].vector;
+	} else {
+		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
+		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
+	}
+	cp->irq_arr[0].status_blk = bp->cnic_sb;
+	cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
+	cp->irq_arr[1].status_blk = bp->def_status_blk;
+	cp->irq_arr[1].status_blk_num = DEF_SB_ID;
+
+	cp->num_irq = 2;
+}
+
+static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
+			       void *data)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	if (ops == NULL)
+		return -EINVAL;
+
+	if (atomic_read(&bp->intr_sem) != 0)
+		return -EBUSY;
+
+	bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!bp->cnic_kwq)
+		return -ENOMEM;
+
+	bp->cnic_kwq_cons = bp->cnic_kwq;
+	bp->cnic_kwq_prod = bp->cnic_kwq;
+	bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
+
+	bp->cnic_spq_pending = 0;
+	bp->cnic_kwq_pending = 0;
+
+	bp->cnic_data = data;
+
+	cp->num_irq = 0;
+	cp->drv_state = CNIC_DRV_STATE_REGD;
+
+	bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
+
+	bnx2x_setup_cnic_irq_info(bp);
+	bnx2x_set_iscsi_eth_mac_addr(bp, 1);
+	bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
+	rcu_assign_pointer(bp->cnic_ops, ops);
+
+	return 0;
+}
+
+static int bnx2x_unregister_cnic(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	mutex_lock(&bp->cnic_mutex);
+	if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
+		bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
+		bnx2x_set_iscsi_eth_mac_addr(bp, 0);
+	}
+	cp->drv_state = 0;
+	rcu_assign_pointer(bp->cnic_ops, NULL);
+	mutex_unlock(&bp->cnic_mutex);
+	synchronize_rcu();
+	kfree(bp->cnic_kwq);
+	bp->cnic_kwq = NULL;
+
+	return 0;
+}
+
+struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	cp->drv_owner = THIS_MODULE;
+	cp->chip_id = CHIP_ID(bp);
+	cp->pdev = bp->pdev;
+	cp->io_base = bp->regview;
+	cp->io_base2 = bp->doorbells;
+	cp->max_kwqe_pending = 8;
+	cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
+	cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
+	cp->ctx_tbl_len = CNIC_ILT_LINES;
+	cp->starting_cid = BCM_CNIC_CID_START;
+	cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
+	cp->drv_ctl = bnx2x_drv_ctl;
+	cp->drv_register_cnic = bnx2x_register_cnic;
+	cp->drv_unregister_cnic = bnx2x_unregister_cnic;
+
+	return cp;
+}
+EXPORT_SYMBOL(bnx2x_cnic_probe);
+
+#endif /* BCM_CNIC */
 
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h
index aa76cbada5e2..b668173ffcb4 100644
--- a/drivers/net/bnx2x_reg.h
+++ b/drivers/net/bnx2x_reg.h
@@ -4772,18 +4772,28 @@
 #define PCI_ID_VAL2					0x438
 
 
-#define MDIO_REG_BANK_CL73_IEEEB0			0x0
-#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL		0x0
+#define MDIO_REG_BANK_CL73_IEEEB0	0x0
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL	0x0
 #define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN	0x0200
 #define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN		0x1000
 #define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_MAIN_RST	0x8000
 
-#define MDIO_REG_BANK_CL73_IEEEB1			0x10
-#define MDIO_CL73_IEEEB1_AN_ADV2				0x01
+#define MDIO_REG_BANK_CL73_IEEEB1	0x10
+#define MDIO_CL73_IEEEB1_AN_ADV1		0x00
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE			0x0400
+#define MDIO_CL73_IEEEB1_AN_ADV1_ASYMMETRIC		0x0800
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH		0x0C00
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK		0x0C00
+#define MDIO_CL73_IEEEB1_AN_ADV2		0x01
 #define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M		0x0000
 #define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX		0x0020
 #define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4		0x0040
 #define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR		0x0080
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1		0x03
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE		0x0400
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_ASYMMETRIC		0x0800
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_BOTH		0x0C00
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK		0x0C00
 
 #define MDIO_REG_BANK_RX0				0x80b0
 #define MDIO_RX0_RX_STATUS				0x10
@@ -4910,6 +4920,8 @@
 
 
 #define MDIO_REG_BANK_10G_PARALLEL_DETECT		0x8130
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS		0x10
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK		0x8000
 #define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL		0x11
 #define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN	0x1
 #define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK		0x13
@@ -4934,6 +4946,8 @@
 #define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_1G			0x0010
 #define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_100M			0x0008
 #define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_10M			0x0000
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2			0x15
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED 		0x0002
 #define MDIO_SERDES_DIGITAL_MISC1				0x18
 #define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_MASK			0xE000
 #define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_25M			0x0000
@@ -5115,6 +5129,7 @@ Theotherbitsarereservedandshouldbezero*/
 #define MDIO_PMA_REG_8481_LED1_MASK	0xa82c
 #define MDIO_PMA_REG_8481_LED2_MASK	0xa82f
 #define MDIO_PMA_REG_8481_LED3_MASK	0xa832
+#define MDIO_PMA_REG_8481_LED3_BLINK	0xa834
 #define MDIO_PMA_REG_8481_SIGNAL_MASK	0xa835
 #define MDIO_PMA_REG_8481_LINK_SIGNAL	0xa83b
 
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index c3fa31c9f2a7..d69e6838f21e 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -446,6 +446,48 @@ static u16 __ad_timer_to_ticks(u16 timer_type, u16 par)
 /////////////////////////////////////////////////////////////////////////////////
 
 /**
+ * __choose_matched - update a port's matched variable from a received lacpdu
+ * @lacpdu: the lacpdu we've received
+ * @port: the port we're looking at
+ *
+ * Update the value of the matched variable, using parameter values from a
+ * newly received lacpdu. Parameter values for the partner carried in the
+ * received PDU are compared with the corresponding operational parameter
+ * values for the actor. Matched is set to TRUE if all of these parameters
+ * match and the PDU parameter partner_state.aggregation has the same value as
+ * actor_oper_port_state.aggregation and lacp will actively maintain the link
+ * in the aggregation. Matched is also set to TRUE if the value of
+ * actor_state.aggregation in the received PDU is set to FALSE, i.e., indicates
+ * an individual link and lacp will actively maintain the link. Otherwise,
+ * matched is set to FALSE. LACP is considered to be actively maintaining the
+ * link if either the PDU's actor_state.lacp_activity variable is TRUE or both
+ * the actor's actor_oper_port_state.lacp_activity and the PDU's
+ * partner_state.lacp_activity variables are TRUE.
+ *
+ * Note: the AD_PORT_MATCHED "variable" is not specified by 802.3ad; it is
+ * used here to implement the language from 802.3ad 43.4.9 that requires
+ * recordPDU to "match" the LACPDU parameters to the stored values.
+ */
+static void __choose_matched(struct lacpdu *lacpdu, struct port *port)
+{
+	// check if all parameters are alike
+	if (((ntohs(lacpdu->partner_port) == port->actor_port_number) &&
+	     (ntohs(lacpdu->partner_port_priority) == port->actor_port_priority) &&
+	     !MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) &&
+	     (ntohs(lacpdu->partner_system_priority) == port->actor_system_priority) &&
+	     (ntohs(lacpdu->partner_key) == port->actor_oper_port_key) &&
+	     ((lacpdu->partner_state & AD_STATE_AGGREGATION) == (port->actor_oper_port_state & AD_STATE_AGGREGATION))) ||
+	    // or this is individual link(aggregation == FALSE)
+	    ((lacpdu->actor_state & AD_STATE_AGGREGATION) == 0)
+		) {
+		// update the state machine Matched variable
+		port->sm_vars |= AD_PORT_MATCHED;
+	} else {
+		port->sm_vars &= ~AD_PORT_MATCHED;
+	}
+}
+
+/**
  * __record_pdu - record parameters from a received lacpdu
  * @lacpdu: the lacpdu we've received
  * @port: the port we're looking at
@@ -459,6 +501,7 @@ static void __record_pdu(struct lacpdu *lacpdu, struct port *port)
 	if (lacpdu && port) {
 		struct port_params *partner = &port->partner_oper;
 
+		__choose_matched(lacpdu, port);
 		// record the new parameter values for the partner operational
 		partner->port_number = ntohs(lacpdu->actor_port);
 		partner->port_priority = ntohs(lacpdu->actor_port_priority);
@@ -518,12 +561,12 @@ static void __update_selected(struct lacpdu *lacpdu, struct port *port)
 		const struct port_params *partner = &port->partner_oper;
 
 		// check if any parameter is different
-		if (ntohs(lacpdu->actor_port) != partner->port_number
-		    || ntohs(lacpdu->actor_port_priority) != partner->port_priority
-		    || MAC_ADDRESS_COMPARE(&lacpdu->actor_system, &partner->system)
-		    || ntohs(lacpdu->actor_system_priority) != partner->system_priority
-		    || ntohs(lacpdu->actor_key) != partner->key
-		    || (lacpdu->actor_state & AD_STATE_AGGREGATION) != (partner->port_state & AD_STATE_AGGREGATION)) {
+		if (ntohs(lacpdu->actor_port) != partner->port_number ||
+		    ntohs(lacpdu->actor_port_priority) != partner->port_priority ||
+		    MAC_ADDRESS_COMPARE(&lacpdu->actor_system, &partner->system) ||
+		    ntohs(lacpdu->actor_system_priority) != partner->system_priority ||
+		    ntohs(lacpdu->actor_key) != partner->key ||
+		    (lacpdu->actor_state & AD_STATE_AGGREGATION) != (partner->port_state & AD_STATE_AGGREGATION)) {
 			// update the state machine Selected variable
 			port->sm_vars &= ~AD_PORT_SELECTED;
 		}
@@ -549,12 +592,12 @@ static void __update_default_selected(struct port *port)
 		const struct port_params *oper = &port->partner_oper;
 
 		// check if any parameter is different
-		if (admin->port_number != oper->port_number
-		    || admin->port_priority != oper->port_priority
-		    || MAC_ADDRESS_COMPARE(&admin->system, &oper->system)
-		    || admin->system_priority != oper->system_priority
-		    || admin->key != oper->key
-		    || (admin->port_state & AD_STATE_AGGREGATION)
+		if (admin->port_number != oper->port_number ||
+		    admin->port_priority != oper->port_priority ||
+		    MAC_ADDRESS_COMPARE(&admin->system, &oper->system) ||
+		    admin->system_priority != oper->system_priority ||
+		    admin->key != oper->key ||
+		    (admin->port_state & AD_STATE_AGGREGATION)
 			!= (oper->port_state & AD_STATE_AGGREGATION)) {
 			// update the state machine Selected variable
 			port->sm_vars &= ~AD_PORT_SELECTED;
@@ -563,47 +606,6 @@ static void __update_default_selected(struct port *port)
 }
 
 /**
- * __choose_matched - update a port's matched variable from a received lacpdu
- * @lacpdu: the lacpdu we've received
- * @port: the port we're looking at
- *
- * Update the value of the matched variable, using parameter values from a
- * newly received lacpdu. Parameter values for the partner carried in the
- * received PDU are compared with the corresponding operational parameter
- * values for the actor. Matched is set to TRUE if all of these parameters
- * match and the PDU parameter partner_state.aggregation has the same value as
- * actor_oper_port_state.aggregation and lacp will actively maintain the link
- * in the aggregation. Matched is also set to TRUE if the value of
- * actor_state.aggregation in the received PDU is set to FALSE, i.e., indicates
- * an individual link and lacp will actively maintain the link. Otherwise,
- * matched is set to FALSE. LACP is considered to be actively maintaining the
- * link if either the PDU's actor_state.lacp_activity variable is TRUE or both
- * the actor's actor_oper_port_state.lacp_activity and the PDU's
- * partner_state.lacp_activity variables are TRUE.
- */
-static void __choose_matched(struct lacpdu *lacpdu, struct port *port)
-{
-	// validate lacpdu and port
-	if (lacpdu && port) {
-		// check if all parameters are alike
-		if (((ntohs(lacpdu->partner_port) == port->actor_port_number) &&
-		     (ntohs(lacpdu->partner_port_priority) == port->actor_port_priority) &&
-		     !MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) &&
-		     (ntohs(lacpdu->partner_system_priority) == port->actor_system_priority) &&
-		     (ntohs(lacpdu->partner_key) == port->actor_oper_port_key) &&
-		     ((lacpdu->partner_state & AD_STATE_AGGREGATION) == (port->actor_oper_port_state & AD_STATE_AGGREGATION))) ||
-		    // or this is individual link(aggregation == FALSE)
-		    ((lacpdu->actor_state & AD_STATE_AGGREGATION) == 0)
-		   ) {
-			// update the state machine Matched variable
-			port->sm_vars |= AD_PORT_MATCHED;
-		} else {
-			port->sm_vars &= ~AD_PORT_MATCHED;
-		}
-	}
-}
-
-/**
  * __update_ntt - update a port's ntt variable from a received lacpdu
  * @lacpdu: the lacpdu we've received
  * @port: the port we're looking at
@@ -1134,7 +1136,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
 			__update_selected(lacpdu, port);
 			__update_ntt(lacpdu, port);
 			__record_pdu(lacpdu, port);
-			__choose_matched(lacpdu, port);
 			port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT));
 			port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
 			// verify that if the aggregator is enabled, the port is enabled too.
@@ -1956,7 +1957,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
 	struct port *port, *prev_port, *temp_port;
 	struct aggregator *aggregator, *new_aggregator, *temp_aggregator;
 	int select_new_active_agg = 0;
-	
+
 	// find the aggregator related to this slave
 	aggregator = &(SLAVE_AD_INFO(slave).aggregator);
 
@@ -2024,7 +2025,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
 
 				// clear the aggregator
 				ad_clear_agg(aggregator);
-				
+
 				if (select_new_active_agg) {
 					ad_agg_selection_logic(__get_first_agg(port));
 				}
@@ -2075,7 +2076,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
 			}
 		}
 	}
-	port->slave=NULL;	
+	port->slave=NULL;
 }
 
 /**
@@ -2301,7 +2302,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
 }
 
 /*
- * set link state for bonding master: if we have an active 
+ * set link state for bonding master: if we have an active
  * aggregator, we're up, if not, we're down.  Presumes that we cannot
  * have an active aggregator if there are no slaves with link up.
  *
@@ -2395,7 +2396,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
 		goto out;
 	}
 
-	slave_agg_no = bond->xmit_hash_policy(skb, dev, slaves_in_agg);
+	slave_agg_no = bond->xmit_hash_policy(skb, slaves_in_agg);
 
 	bond_for_each_slave(bond, slave, i) {
 		struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
@@ -2445,9 +2446,6 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
 	struct slave *slave = NULL;
 	int ret = NET_RX_DROP;
 
-	if (dev_net(dev) != &init_net)
-		goto out;
-
 	if (!(dev->flags & IFF_MASTER))
 		goto out;
 
@@ -2468,4 +2466,3 @@ out:
 
 	return ret;
 }
-
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 9b5936f072dc..0d30d1e5e53f 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -355,9 +355,6 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
 	struct arp_pkt *arp = (struct arp_pkt *)skb->data;
 	int res = NET_RX_DROP;
 
-	if (dev_net(bond_dev) != &init_net)
-		goto out;
-
 	while (bond_dev->priv_flags & IFF_802_1Q_VLAN)
 		bond_dev = vlan_dev_real_dev(bond_dev);
 
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c
index 83921abae12d..b72e1dc8cf8f 100644
--- a/drivers/net/bonding/bond_ipv6.c
+++ b/drivers/net/bonding/bond_ipv6.c
@@ -25,6 +25,7 @@
 #include <net/ipv6.h>
 #include <net/ndisc.h>
 #include <net/addrconf.h>
+#include <net/netns/generic.h>
 #include "bonding.h"
 
 /*
@@ -152,11 +153,9 @@ static int bond_inet6addr_event(struct notifier_block *this,
 	struct net_device *vlan_dev, *event_dev = ifa->idev->dev;
 	struct bonding *bond;
 	struct vlan_entry *vlan;
+	struct bond_net *bn = net_generic(dev_net(event_dev), bond_net_id);
 
-	if (dev_net(event_dev) != &init_net)
-		return NOTIFY_DONE;
-
-	list_for_each_entry(bond, &bond_dev_list, bond_list) {
+	list_for_each_entry(bond, &bn->dev_list, bond_list) {
 		if (bond->dev == event_dev) {
 			switch (event) {
 			case NETDEV_UP:
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 40fb5eefc72e..af9b9c4eb496 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -75,6 +75,7 @@
 #include <linux/jiffies.h>
 #include <net/route.h>
 #include <net/net_namespace.h>
+#include <net/netns/generic.h>
 #include "bonding.h"
 #include "bond_3ad.h"
 #include "bond_alb.h"
@@ -94,6 +95,7 @@ static int downdelay;
 static int use_carrier	= 1;
 static char *mode;
 static char *primary;
+static char *primary_reselect;
 static char *lacp_rate;
 static char *ad_select;
 static char *xmit_hash_policy;
@@ -126,6 +128,14 @@ MODULE_PARM_DESC(mode, "Mode of operation : 0 for balance-rr, "
 		       "6 for balance-alb");
 module_param(primary, charp, 0);
 MODULE_PARM_DESC(primary, "Primary network device to use");
+module_param(primary_reselect, charp, 0);
+MODULE_PARM_DESC(primary_reselect, "Reselect primary slave "
+				   "once it comes up; "
+				   "0 for always (default), "
+				   "1 for only if speed of primary is "
+				   "better, "
+				   "2 for only on active slave "
+				   "failure");
 module_param(lacp_rate, charp, 0);
 MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner "
 			    "(slow/fast)");
@@ -148,11 +158,7 @@ MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the
 static const char * const version =
 	DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n";
 
-LIST_HEAD(bond_dev_list);
-
-#ifdef CONFIG_PROC_FS
-static struct proc_dir_entry *bond_proc_dir;
-#endif
+int bond_net_id __read_mostly;
 
 static __be32 arp_target[BOND_MAX_ARP_TARGETS];
 static int arp_ip_count;
@@ -200,6 +206,13 @@ const struct bond_parm_tbl fail_over_mac_tbl[] = {
 {	NULL,			-1},
 };
 
+const struct bond_parm_tbl pri_reselect_tbl[] = {
+{	"always",		BOND_PRI_RESELECT_ALWAYS},
+{	"better",		BOND_PRI_RESELECT_BETTER},
+{	"failure",		BOND_PRI_RESELECT_FAILURE},
+{	NULL,			-1},
+};
+
 struct bond_parm_tbl ad_select_tbl[] = {
 {	"stable",	BOND_AD_STABLE},
 {	"bandwidth",	BOND_AD_BANDWIDTH},
@@ -211,7 +224,7 @@ struct bond_parm_tbl ad_select_tbl[] = {
 
 static void bond_send_gratuitous_arp(struct bonding *bond);
 static int bond_init(struct net_device *bond_dev);
-static void bond_deinit(struct net_device *bond_dev);
+static void bond_uninit(struct net_device *bond_dev);
 
 /*---------------------------- General routines -----------------------------*/
 
@@ -1070,6 +1083,25 @@ out:
 
 }
 
+static bool bond_should_change_active(struct bonding *bond)
+{
+	struct slave *prim = bond->primary_slave;
+	struct slave *curr = bond->curr_active_slave;
+
+	if (!prim || !curr || curr->link != BOND_LINK_UP)
+		return true;
+	if (bond->force_primary) {
+		bond->force_primary = false;
+		return true;
+	}
+	if (bond->params.primary_reselect == BOND_PRI_RESELECT_BETTER &&
+	    (prim->speed < curr->speed ||
+	     (prim->speed == curr->speed && prim->duplex <= curr->duplex)))
+		return false;
+	if (bond->params.primary_reselect == BOND_PRI_RESELECT_FAILURE)
+		return false;
+	return true;
+}
 
 /**
  * find_best_interface - select the best available slave to be the active one
@@ -1084,7 +1116,7 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
 	int mintime = bond->params.updelay;
 	int i;
 
-	new_active = old_active = bond->curr_active_slave;
+	new_active = bond->curr_active_slave;
 
 	if (!new_active) { /* there were no active slaves left */
 		if (bond->slave_cnt > 0)   /* found one slave */
@@ -1094,7 +1126,8 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
 	}
 
 	if ((bond->primary_slave) &&
-	    bond->primary_slave->link == BOND_LINK_UP) {
+	    bond->primary_slave->link == BOND_LINK_UP &&
+	    bond_should_change_active(bond)) {
 		new_active = bond->primary_slave;
 	}
 
@@ -1678,8 +1711,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 
 	if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
 		/* if there is a primary slave, remember it */
-		if (strcmp(bond->params.primary, new_slave->dev->name) == 0)
+		if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
 			bond->primary_slave = new_slave;
+			bond->force_primary = true;
+		}
 	}
 
 	write_lock_bh(&bond->curr_slave_lock);
@@ -1817,8 +1852,8 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
 	}
 
 	if (!bond->params.fail_over_mac) {
-		if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr)
-		    && bond->slave_cnt > 1)
+		if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr) &&
+		    bond->slave_cnt > 1)
 			pr_warning(DRV_NAME
 			       ": %s: Warning: the permanent HWaddr of %s - "
 			       "%pM - is still in use by %s. "
@@ -1965,25 +2000,6 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
 }
 
 /*
-* Destroy a bonding device.
-* Must be under rtnl_lock when this function is called.
-*/
-static void bond_uninit(struct net_device *bond_dev)
-{
-	struct bonding *bond = netdev_priv(bond_dev);
-
-	bond_deinit(bond_dev);
-	bond_destroy_sysfs_entry(bond);
-
-	if (bond->wq)
-		destroy_workqueue(bond->wq);
-
-	netif_addr_lock_bh(bond_dev);
-	bond_mc_list_destroy(bond);
-	netif_addr_unlock_bh(bond_dev);
-}
-
-/*
 * First release a slave and than destroy the bond if no more slaves are left.
 * Must be under rtnl_lock when this function is called.
 */
@@ -2567,7 +2583,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
 		fl.fl4_dst = targets[i];
 		fl.fl4_tos = RTO_ONLINK;
 
-		rv = ip_route_output_key(&init_net, &rt, &fl);
+		rv = ip_route_output_key(dev_net(bond->dev), &rt, &fl);
 		if (rv) {
 			if (net_ratelimit()) {
 				pr_warning(DRV_NAME
@@ -2675,9 +2691,6 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
 	unsigned char *arp_ptr;
 	__be32 sip, tip;
 
-	if (dev_net(dev) != &init_net)
-		goto out;
-
 	if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER))
 		goto out;
 
@@ -3201,11 +3214,14 @@ static void bond_info_show_master(struct seq_file *seq)
 	}
 
 	if (USES_PRIMARY(bond->params.mode)) {
-		seq_printf(seq, "Primary Slave: %s\n",
+		seq_printf(seq, "Primary Slave: %s",
 			   (bond->primary_slave) ?
 			   bond->primary_slave->dev->name : "None");
+		if (bond->primary_slave)
+			seq_printf(seq, " (primary_reselect %s)",
+		   pri_reselect_tbl[bond->params.primary_reselect].modename);
 
-		seq_printf(seq, "Currently Active Slave: %s\n",
+		seq_printf(seq, "\nCurrently Active Slave: %s\n",
 			   (curr) ? curr->dev->name : "None");
 	}
 
@@ -3334,13 +3350,14 @@ static const struct file_operations bond_info_fops = {
 	.release = seq_release,
 };
 
-static int bond_create_proc_entry(struct bonding *bond)
+static void bond_create_proc_entry(struct bonding *bond)
 {
 	struct net_device *bond_dev = bond->dev;
+	struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
 
-	if (bond_proc_dir) {
+	if (bn->proc_dir) {
 		bond->proc_entry = proc_create_data(bond_dev->name,
-						    S_IRUGO, bond_proc_dir,
+						    S_IRUGO, bn->proc_dir,
 						    &bond_info_fops, bond);
 		if (bond->proc_entry == NULL)
 			pr_warning(DRV_NAME
@@ -3349,14 +3366,15 @@ static int bond_create_proc_entry(struct bonding *bond)
 		else
 			memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
 	}
-
-	return 0;
 }
 
 static void bond_remove_proc_entry(struct bonding *bond)
 {
-	if (bond_proc_dir && bond->proc_entry) {
-		remove_proc_entry(bond->proc_file_name, bond_proc_dir);
+	struct net_device *bond_dev = bond->dev;
+	struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
+
+	if (bn->proc_dir && bond->proc_entry) {
+		remove_proc_entry(bond->proc_file_name, bn->proc_dir);
 		memset(bond->proc_file_name, 0, IFNAMSIZ);
 		bond->proc_entry = NULL;
 	}
@@ -3365,11 +3383,11 @@ static void bond_remove_proc_entry(struct bonding *bond)
 /* Create the bonding directory under /proc/net, if doesn't exist yet.
  * Caller must hold rtnl_lock.
  */
-static void bond_create_proc_dir(void)
+static void bond_create_proc_dir(struct bond_net *bn)
 {
-	if (!bond_proc_dir) {
-		bond_proc_dir = proc_mkdir(DRV_NAME, init_net.proc_net);
-		if (!bond_proc_dir)
+	if (!bn->proc_dir) {
+		bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
+		if (!bn->proc_dir)
 			pr_warning(DRV_NAME
 				": Warning: cannot create /proc/net/%s\n",
 				DRV_NAME);
@@ -3379,17 +3397,17 @@ static void bond_create_proc_dir(void)
 /* Destroy the bonding directory under /proc/net, if empty.
  * Caller must hold rtnl_lock.
  */
-static void bond_destroy_proc_dir(void)
+static void bond_destroy_proc_dir(struct bond_net *bn)
 {
-	if (bond_proc_dir) {
-		remove_proc_entry(DRV_NAME, init_net.proc_net);
-		bond_proc_dir = NULL;
+	if (bn->proc_dir) {
+		remove_proc_entry(DRV_NAME, bn->net->proc_net);
+		bn->proc_dir = NULL;
 	}
 }
 
 #else /* !CONFIG_PROC_FS */
 
-static int bond_create_proc_entry(struct bonding *bond)
+static void bond_create_proc_entry(struct bonding *bond)
 {
 }
 
@@ -3397,11 +3415,11 @@ static void bond_remove_proc_entry(struct bonding *bond)
 {
 }
 
-static void bond_create_proc_dir(void)
+static void bond_create_proc_dir(struct bond_net *bn)
 {
 }
 
-static void bond_destroy_proc_dir(void)
+static void bond_destroy_proc_dir(struct bond_net *bn)
 {
 }
 
@@ -3418,9 +3436,6 @@ static int bond_event_changename(struct bonding *bond)
 	bond_remove_proc_entry(bond);
 	bond_create_proc_entry(bond);
 
-	bond_destroy_sysfs_entry(bond);
-	bond_create_sysfs_entry(bond);
-
 	return NOTIFY_DONE;
 }
 
@@ -3432,9 +3447,6 @@ static int bond_master_netdev_event(unsigned long event,
 	switch (event) {
 	case NETDEV_CHANGENAME:
 		return bond_event_changename(event_bond);
-	case NETDEV_UNREGISTER:
-		bond_release_all(event_bond->dev);
-		break;
 	default:
 		break;
 	}
@@ -3526,9 +3538,6 @@ static int bond_netdev_event(struct notifier_block *this,
 {
 	struct net_device *event_dev = (struct net_device *)ptr;
 
-	if (dev_net(event_dev) != &init_net)
-		return NOTIFY_DONE;
-
 	pr_debug("event_dev: %s, event: %lx\n",
 		(event_dev ? event_dev->name : "None"),
 		event);
@@ -3561,13 +3570,11 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
 {
 	struct in_ifaddr *ifa = ptr;
 	struct net_device *vlan_dev, *event_dev = ifa->ifa_dev->dev;
+	struct bond_net *bn = net_generic(dev_net(event_dev), bond_net_id);
 	struct bonding *bond;
 	struct vlan_entry *vlan;
 
-	if (dev_net(ifa->ifa_dev->dev) != &init_net)
-		return NOTIFY_DONE;
-
-	list_for_each_entry(bond, &bond_dev_list, bond_list) {
+	list_for_each_entry(bond, &bn->dev_list, bond_list) {
 		if (bond->dev == event_dev) {
 			switch (event) {
 			case NETDEV_UP:
@@ -3657,8 +3664,7 @@ void bond_unregister_arp(struct bonding *bond)
  * Hash for the output device based upon layer 2 and layer 3 data. If
  * the packet is not IP mimic bond_xmit_hash_policy_l2()
  */
-static int bond_xmit_hash_policy_l23(struct sk_buff *skb,
-				     struct net_device *bond_dev, int count)
+static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
 {
 	struct ethhdr *data = (struct ethhdr *)skb->data;
 	struct iphdr *iph = ip_hdr(skb);
@@ -3676,8 +3682,7 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb,
  * the packet is a frag or not TCP or UDP, just use layer 3 data.  If it is
  * altogether not IP, mimic bond_xmit_hash_policy_l2()
  */
-static int bond_xmit_hash_policy_l34(struct sk_buff *skb,
-				    struct net_device *bond_dev, int count)
+static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
 {
 	struct ethhdr *data = (struct ethhdr *)skb->data;
 	struct iphdr *iph = ip_hdr(skb);
@@ -3701,8 +3706,7 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb,
 /*
  * Hash for the output device based upon layer 2 data
  */
-static int bond_xmit_hash_policy_l2(struct sk_buff *skb,
-				   struct net_device *bond_dev, int count)
+static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
 {
 	struct ethhdr *data = (struct ethhdr *)skb->data;
 
@@ -3939,7 +3943,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
 	if (!capable(CAP_NET_ADMIN))
 		return -EPERM;
 
-	slave_dev = dev_get_by_name(&init_net, ifr->ifr_slave);
+	slave_dev = dev_get_by_name(dev_net(bond_dev), ifr->ifr_slave);
 
 	pr_debug("slave_dev=%p: \n", slave_dev);
 
@@ -4295,7 +4299,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
 	if (!BOND_IS_OK(bond))
 		goto out;
 
-	slave_no = bond->xmit_hash_policy(skb, bond_dev, bond->slave_cnt);
+	slave_no = bond->xmit_hash_policy(skb, bond->slave_cnt);
 
 	bond_for_each_slave(bond, slave, i) {
 		slave_no--;
@@ -4576,37 +4580,29 @@ static void bond_work_cancel_all(struct bonding *bond)
 		cancel_delayed_work(&bond->ad_work);
 }
 
-/* De-initialize device specific data.
- * Caller must hold rtnl_lock.
- */
-static void bond_deinit(struct net_device *bond_dev)
+/*
+* Destroy a bonding device.
+* Must be under rtnl_lock when this function is called.
+*/
+static void bond_uninit(struct net_device *bond_dev)
 {
 	struct bonding *bond = netdev_priv(bond_dev);
 
+	/* Release the bonded slaves */
+	bond_release_all(bond_dev);
+
 	list_del(&bond->bond_list);
 
 	bond_work_cancel_all(bond);
 
 	bond_remove_proc_entry(bond);
-}
-
-/* Unregister and free all bond devices.
- * Caller must hold rtnl_lock.
- */
-static void bond_free_all(void)
-{
-	struct bonding *bond, *nxt;
 
-	list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list) {
-		struct net_device *bond_dev = bond->dev;
-
-		bond_work_cancel_all(bond);
-		/* Release the bonded slaves */
-		bond_release_all(bond_dev);
-		unregister_netdevice(bond_dev);
-	}
+	if (bond->wq)
+		destroy_workqueue(bond->wq);
 
-	bond_destroy_proc_dir();
+	netif_addr_lock_bh(bond_dev);
+	bond_mc_list_destroy(bond);
+	netif_addr_unlock_bh(bond_dev);
 }
 
 /*------------------------- Module initialization ---------------------------*/
@@ -4646,7 +4642,7 @@ int bond_parse_parm(const char *buf, const struct bond_parm_tbl *tbl)
 
 static int bond_check_params(struct bond_params *params)
 {
-	int arp_validate_value, fail_over_mac_value;
+	int arp_validate_value, fail_over_mac_value, primary_reselect_value;
 
 	/*
 	 * Convert string parameters.
@@ -4665,7 +4661,8 @@ static int bond_check_params(struct bond_params *params)
 		if ((bond_mode != BOND_MODE_XOR) &&
 		    (bond_mode != BOND_MODE_8023AD)) {
 			pr_info(DRV_NAME
-			       ": xor_mode param is irrelevant in mode %s\n",
+				": xmit_hash_policy param is irrelevant in"
+				" mode %s\n",
 			       bond_mode_name(bond_mode));
 		} else {
 			xmit_hashtype = bond_parse_parm(xmit_hash_policy,
@@ -4945,6 +4942,20 @@ static int bond_check_params(struct bond_params *params)
 		primary = NULL;
 	}
 
+	if (primary && primary_reselect) {
+		primary_reselect_value = bond_parse_parm(primary_reselect,
+							 pri_reselect_tbl);
+		if (primary_reselect_value == -1) {
+			pr_err(DRV_NAME
+			       ": Error: Invalid primary_reselect \"%s\"\n",
+			       primary_reselect ==
+					NULL ? "NULL" : primary_reselect);
+			return -EINVAL;
+		}
+	} else {
+		primary_reselect_value = BOND_PRI_RESELECT_ALWAYS;
+	}
+
 	if (fail_over_mac) {
 		fail_over_mac_value = bond_parse_parm(fail_over_mac,
 						      fail_over_mac_tbl);
@@ -4976,6 +4987,7 @@ static int bond_check_params(struct bond_params *params)
 	params->use_carrier = use_carrier;
 	params->lacp_fast = lacp_fast;
 	params->primary[0] = 0;
+	params->primary_reselect = primary_reselect_value;
 	params->fail_over_mac = fail_over_mac_value;
 
 	if (primary) {
@@ -5012,6 +5024,7 @@ static void bond_set_lockdep_class(struct net_device *dev)
 static int bond_init(struct net_device *bond_dev)
 {
 	struct bonding *bond = netdev_priv(bond_dev);
+	struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
 
 	pr_debug("Begin bond_init for %s\n", bond_dev->name);
 
@@ -5024,30 +5037,41 @@ static int bond_init(struct net_device *bond_dev)
 	netif_carrier_off(bond_dev);
 
 	bond_create_proc_entry(bond);
-	list_add_tail(&bond->bond_list, &bond_dev_list);
+	list_add_tail(&bond->bond_list, &bn->dev_list);
 
+	bond_prepare_sysfs_group(bond);
+	return 0;
+}
+
+static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+	if (tb[IFLA_ADDRESS]) {
+		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+			return -EINVAL;
+		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+			return -EADDRNOTAVAIL;
+	}
 	return 0;
 }
 
+static struct rtnl_link_ops bond_link_ops __read_mostly = {
+	.kind		= "bond",
+	.priv_size	= sizeof(struct bonding),
+	.setup		= bond_setup,
+	.validate	= bond_validate,
+};
+
 /* Create a new bond based on the specified name and bonding parameters.
  * If name is NULL, obtain a suitable "bond%d" name for us.
  * Caller must NOT hold rtnl_lock; we need to release it here before we
  * set up our sysfs entries.
  */
-int bond_create(const char *name)
+int bond_create(struct net *net, const char *name)
 {
 	struct net_device *bond_dev;
 	int res;
 
 	rtnl_lock();
-	/* Check to see if the bond already exists. */
-	/* FIXME: pass netns from caller */
-	if (name && __dev_get_by_name(&init_net, name)) {
-		pr_err(DRV_NAME ": cannot add bond %s; already exists\n",
-		       name);
-		res = -EEXIST;
-		goto out_rtnl;
-	}
 
 	bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "",
 				bond_setup);
@@ -5055,9 +5079,12 @@ int bond_create(const char *name)
 		pr_err(DRV_NAME ": %s: eek! can't alloc netdev!\n",
 		       name);
 		res = -ENOMEM;
-		goto out_rtnl;
+		goto out;
 	}
 
+	dev_net_set(bond_dev, net);
+	bond_dev->rtnl_link_ops = &bond_link_ops;
+
 	if (!name) {
 		res = dev_alloc_name(bond_dev, "bond%d");
 		if (res < 0)
@@ -5065,27 +5092,41 @@ int bond_create(const char *name)
 	}
 
 	res = register_netdevice(bond_dev);
-	if (res < 0)
-		goto out_bond;
-
-	res = bond_create_sysfs_entry(netdev_priv(bond_dev));
-	if (res < 0)
-		goto out_unreg;
 
+out:
 	rtnl_unlock();
-	return 0;
-
-out_unreg:
-	unregister_netdevice(bond_dev);
-out_bond:
-	bond_deinit(bond_dev);
+	return res;
 out_netdev:
 	free_netdev(bond_dev);
-out_rtnl:
-	rtnl_unlock();
-	return res;
+	goto out;
+}
+
+static int bond_net_init(struct net *net)
+{
+	struct bond_net *bn = net_generic(net, bond_net_id);
+
+	bn->net = net;
+	INIT_LIST_HEAD(&bn->dev_list);
+
+	bond_create_proc_dir(bn);
+	
+	return 0;
 }
 
+static void bond_net_exit(struct net *net)
+{
+	struct bond_net *bn = net_generic(net, bond_net_id);
+
+	bond_destroy_proc_dir(bn);
+}
+
+static struct pernet_operations bond_net_ops = {
+	.init = bond_net_init,
+	.exit = bond_net_exit,
+	.id   = &bond_net_id,
+	.size = sizeof(struct bond_net),
+};
+
 static int __init bonding_init(void)
 {
 	int i;
@@ -5097,10 +5138,16 @@ static int __init bonding_init(void)
 	if (res)
 		goto out;
 
-	bond_create_proc_dir();
+	res = register_pernet_subsys(&bond_net_ops);
+	if (res)
+		goto out;
+
+	res = rtnl_link_register(&bond_link_ops);
+	if (res)
+		goto err_link;
 
 	for (i = 0; i < max_bonds; i++) {
-		res = bond_create(NULL);
+		res = bond_create(&init_net, NULL);
 		if (res)
 			goto err;
 	}
@@ -5112,14 +5159,13 @@ static int __init bonding_init(void)
 	register_netdevice_notifier(&bond_netdev_notifier);
 	register_inetaddr_notifier(&bond_inetaddr_notifier);
 	bond_register_ipv6_notifier();
-
-	goto out;
-err:
-	rtnl_lock();
-	bond_free_all();
-	rtnl_unlock();
 out:
 	return res;
+err:
+	rtnl_link_unregister(&bond_link_ops);
+err_link:
+	unregister_pernet_subsys(&bond_net_ops);
+	goto out;
 
 }
 
@@ -5131,9 +5177,8 @@ static void __exit bonding_exit(void)
 
 	bond_destroy_sysfs();
 
-	rtnl_lock();
-	bond_free_all();
-	rtnl_unlock();
+	rtnl_link_unregister(&bond_link_ops);
+	unregister_pernet_subsys(&bond_net_ops);
 }
 
 module_init(bonding_init);
@@ -5142,3 +5187,4 @@ MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION);
 MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");
+MODULE_ALIAS_RTNL_LINK("bond");
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 8762a27a2a18..4e00b4f83641 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -36,6 +36,8 @@
 #include <linux/rtnetlink.h>
 #include <linux/etherdevice.h>
 #include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <linux/nsproxy.h>
 
 #include "bonding.h"
 
@@ -48,12 +50,14 @@
  */
 static ssize_t bonding_show_bonds(struct class *cls, char *buf)
 {
+	struct net *net = current->nsproxy->net_ns;
+	struct bond_net *bn = net_generic(net, bond_net_id);
 	int res = 0;
 	struct bonding *bond;
 
 	rtnl_lock();
 
-	list_for_each_entry(bond, &bond_dev_list, bond_list) {
+	list_for_each_entry(bond, &bn->dev_list, bond_list) {
 		if (res > (PAGE_SIZE - IFNAMSIZ)) {
 			/* not enough space for another interface name */
 			if ((PAGE_SIZE - res) > 10)
@@ -70,11 +74,12 @@ static ssize_t bonding_show_bonds(struct class *cls, char *buf)
 	return res;
 }
 
-static struct net_device *bond_get_by_name(const char *ifname)
+static struct net_device *bond_get_by_name(struct net *net, const char *ifname)
 {
+	struct bond_net *bn = net_generic(net, bond_net_id);
 	struct bonding *bond;
 
-	list_for_each_entry(bond, &bond_dev_list, bond_list) {
+	list_for_each_entry(bond, &bn->dev_list, bond_list) {
 		if (strncmp(bond->dev->name, ifname, IFNAMSIZ) == 0)
 			return bond->dev;
 	}
@@ -92,6 +97,7 @@ static struct net_device *bond_get_by_name(const char *ifname)
 static ssize_t bonding_store_bonds(struct class *cls,
 				   const char *buffer, size_t count)
 {
+	struct net *net = current->nsproxy->net_ns;
 	char command[IFNAMSIZ + 1] = {0, };
 	char *ifname;
 	int rv, res = count;
@@ -105,7 +111,7 @@ static ssize_t bonding_store_bonds(struct class *cls,
 	if (command[0] == '+') {
 		pr_info(DRV_NAME
 			": %s is being created...\n", ifname);
-		rv = bond_create(ifname);
+		rv = bond_create(net, ifname);
 		if (rv) {
 			pr_info(DRV_NAME ": Bond creation failed.\n");
 			res = rv;
@@ -114,7 +120,7 @@ static ssize_t bonding_store_bonds(struct class *cls,
 		struct net_device *bond_dev;
 
 		rtnl_lock();
-		bond_dev = bond_get_by_name(ifname);
+		bond_dev = bond_get_by_name(net, ifname);
 		if (bond_dev) {
 			pr_info(DRV_NAME ": %s is being deleted...\n",
 				ifname);
@@ -239,8 +245,7 @@ static ssize_t bonding_store_slaves(struct device *d,
 		/* Got a slave name in ifname.  Is it already in the list? */
 		found = 0;
 
-		/* FIXME: get netns from sysfs object */
-		dev = __dev_get_by_name(&init_net, ifname);
+		dev = __dev_get_by_name(dev_net(bond->dev), ifname);
 		if (!dev) {
 			pr_info(DRV_NAME
 			       ": %s: Interface %s does not exist!\n",
@@ -1214,6 +1219,58 @@ static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR,
 		   bonding_show_primary, bonding_store_primary);
 
 /*
+ * Show and set the primary_reselect flag.
+ */
+static ssize_t bonding_show_primary_reselect(struct device *d,
+					     struct device_attribute *attr,
+					     char *buf)
+{
+	struct bonding *bond = to_bond(d);
+
+	return sprintf(buf, "%s %d\n",
+		       pri_reselect_tbl[bond->params.primary_reselect].modename,
+		       bond->params.primary_reselect);
+}
+
+static ssize_t bonding_store_primary_reselect(struct device *d,
+					      struct device_attribute *attr,
+					      const char *buf, size_t count)
+{
+	int new_value, ret = count;
+	struct bonding *bond = to_bond(d);
+
+	if (!rtnl_trylock())
+		return restart_syscall();
+
+	new_value = bond_parse_parm(buf, pri_reselect_tbl);
+	if (new_value < 0)  {
+		pr_err(DRV_NAME
+		       ": %s: Ignoring invalid primary_reselect value %.*s.\n",
+		       bond->dev->name,
+		       (int) strlen(buf) - 1, buf);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	bond->params.primary_reselect = new_value;
+	pr_info(DRV_NAME ": %s: setting primary_reselect to %s (%d).\n",
+		bond->dev->name, pri_reselect_tbl[new_value].modename,
+		new_value);
+
+	read_lock(&bond->lock);
+	write_lock_bh(&bond->curr_slave_lock);
+	bond_select_active_slave(bond);
+	write_unlock_bh(&bond->curr_slave_lock);
+	read_unlock(&bond->lock);
+out:
+	rtnl_unlock();
+	return ret;
+}
+static DEVICE_ATTR(primary_reselect, S_IRUGO | S_IWUSR,
+		   bonding_show_primary_reselect,
+		   bonding_store_primary_reselect);
+
+/*
  * Show and set the use_carrier flag.
  */
 static ssize_t bonding_show_carrier(struct device *d,
@@ -1502,6 +1559,7 @@ static struct attribute *per_bond_attrs[] = {
 	&dev_attr_num_unsol_na.attr,
 	&dev_attr_miimon.attr,
 	&dev_attr_primary.attr,
+	&dev_attr_primary_reselect.attr,
 	&dev_attr_use_carrier.attr,
 	&dev_attr_active_slave.attr,
 	&dev_attr_mii_status.attr,
@@ -1564,24 +1622,8 @@ void bond_destroy_sysfs(void)
  * Initialize sysfs for each bond.  This sets up and registers
  * the 'bondctl' directory for each individual bond under /sys/class/net.
  */
-int bond_create_sysfs_entry(struct bonding *bond)
+void bond_prepare_sysfs_group(struct bonding *bond)
 {
-	struct net_device *dev = bond->dev;
-	int err;
-
-	err = sysfs_create_group(&(dev->dev.kobj), &bonding_group);
-	if (err)
-		pr_emerg("eek! didn't create group!\n");
-
-	return err;
-}
-/*
- * Remove sysfs entries for each bond.
- */
-void bond_destroy_sysfs_entry(struct bonding *bond)
-{
-	struct net_device *dev = bond->dev;
-
-	sysfs_remove_group(&(dev->dev.kobj), &bonding_group);
+	bond->dev->sysfs_groups[0] = &bonding_group;
 }
 
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 68247714466f..558ec1352527 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -23,15 +23,13 @@
 #include "bond_3ad.h"
 #include "bond_alb.h"
 
-#define DRV_VERSION	"3.5.0"
-#define DRV_RELDATE	"November 4, 2008"
+#define DRV_VERSION	"3.6.0"
+#define DRV_RELDATE	"September 26, 2009"
 #define DRV_NAME	"bonding"
 #define DRV_DESCRIPTION	"Ethernet Channel Bonding Driver"
 
 #define BOND_MAX_ARP_TARGETS	16
 
-extern struct list_head bond_dev_list;
-
 #define IS_UP(dev)					   \
 	      ((((dev)->flags & IFF_UP) == IFF_UP)	&& \
 	       netif_running(dev)			&& \
@@ -131,6 +129,7 @@ struct bond_params {
 	int lacp_fast;
 	int ad_select;
 	char primary[IFNAMSIZ];
+	int primary_reselect;
 	__be32 arp_targets[BOND_MAX_ARP_TARGETS];
 };
 
@@ -190,6 +189,7 @@ struct bonding {
 	struct   slave *curr_active_slave;
 	struct   slave *current_arp_slave;
 	struct   slave *primary_slave;
+	bool     force_primary;
 	s32      slave_cnt; /* never change this value outside the attach/detach wrappers */
 	rwlock_t lock;
 	rwlock_t curr_slave_lock;
@@ -204,7 +204,7 @@ struct bonding {
 #endif /* CONFIG_PROC_FS */
 	struct   list_head bond_list;
 	struct   dev_mc_list *mc_list;
-	int      (*xmit_hash_policy)(struct sk_buff *, struct net_device *, int);
+	int      (*xmit_hash_policy)(struct sk_buff *, int);
 	__be32   master_ip;
 	u16      flags;
 	u16      rr_tx_counter;
@@ -254,10 +254,14 @@ static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
 
 static inline bool bond_is_lb(const struct bonding *bond)
 {
-	return bond->params.mode == BOND_MODE_TLB
-		|| bond->params.mode == BOND_MODE_ALB;
+	return (bond->params.mode == BOND_MODE_TLB ||
+		bond->params.mode == BOND_MODE_ALB);
 }
 
+#define BOND_PRI_RESELECT_ALWAYS	0
+#define BOND_PRI_RESELECT_BETTER	1
+#define BOND_PRI_RESELECT_FAILURE	2
+
 #define BOND_FOM_NONE			0
 #define BOND_FOM_ACTIVE			1
 #define BOND_FOM_FOLLOW			2
@@ -321,12 +325,11 @@ static inline void bond_unset_master_alb_flags(struct bonding *bond)
 
 struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
 int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
-int bond_create(const char *name);
+int bond_create(struct net *net, const char *name);
 int  bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev);
 int bond_create_sysfs(void);
 void bond_destroy_sysfs(void);
-void bond_destroy_sysfs_entry(struct bonding *bond);
-int bond_create_sysfs_entry(struct bonding *bond);
+void bond_prepare_sysfs_group(struct bonding *bond);
 int bond_create_slave_symlinks(struct net_device *master, struct net_device *slave);
 void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave);
 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
@@ -341,13 +344,22 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
 void bond_register_arp(struct bonding *);
 void bond_unregister_arp(struct bonding *);
 
+struct bond_net {
+	struct net *		net;	/* Associated network namespace */
+	struct list_head	dev_list;
+#ifdef CONFIG_PROC_FS
+	struct proc_dir_entry *	proc_dir;
+#endif
+};
+
 /* exported from bond_main.c */
-extern struct list_head bond_dev_list;
+extern int bond_net_id;
 extern const struct bond_parm_tbl bond_lacp_tbl[];
 extern const struct bond_parm_tbl bond_mode_tbl[];
 extern const struct bond_parm_tbl xmit_hashtype_tbl[];
 extern const struct bond_parm_tbl arp_validate_tbl[];
 extern const struct bond_parm_tbl fail_over_mac_tbl[];
+extern const struct bond_parm_tbl pri_reselect_tbl[];
 extern struct bond_parm_tbl ad_select_tbl[];
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -370,4 +382,3 @@ static inline void bond_unregister_ipv6_notifier(void)
 #endif
 
 #endif /* _LINUX_BONDING_H */
-
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 772f6d2489ce..bb803fa1e6a7 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -41,6 +41,21 @@ config CAN_AT91
 	---help---
 	  This is a driver for the SoC CAN controller in Atmel's AT91SAM9263.
 
+config CAN_TI_HECC
+	depends on CAN_DEV && ARCH_OMAP3
+	tristate "TI High End CAN Controller"
+	---help---
+	  Driver for TI HECC (High End CAN Controller) module found on many
+	  TI devices. The device specifications are available from www.ti.com
+
+config CAN_MCP251X
+	tristate "Microchip MCP251x SPI CAN controllers"
+	depends on CAN_DEV && SPI
+	---help---
+	  Driver for the Microchip MCP251x SPI CAN controllers.
+
+source "drivers/net/can/mscan/Kconfig"
+
 source "drivers/net/can/sja1000/Kconfig"
 
 source "drivers/net/can/usb/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 0dea62721f2f..56899fef1c6a 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -10,6 +10,9 @@ can-dev-y			:= dev.o
 obj-y				+= usb/
 
 obj-$(CONFIG_CAN_SJA1000)	+= sja1000/
+obj-$(CONFIG_CAN_MSCAN)		+= mscan/
 obj-$(CONFIG_CAN_AT91)		+= at91_can.o
+obj-$(CONFIG_CAN_TI_HECC)	+= ti_hecc.o
+obj-$(CONFIG_CAN_MCP251X)	+= mcp251x.o
 
 ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index f67ae285a35a..cbe3fce53e3b 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -221,38 +221,6 @@ static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
 	set_mb_mode_prio(priv, mb, mode, 0);
 }
 
-static struct sk_buff *alloc_can_skb(struct net_device *dev,
-		struct can_frame **cf)
-{
-	struct sk_buff *skb;
-
-	skb = netdev_alloc_skb(dev, sizeof(struct can_frame));
-	if (unlikely(!skb))
-		return NULL;
-
-	skb->protocol = htons(ETH_P_CAN);
-	skb->ip_summed = CHECKSUM_UNNECESSARY;
-	*cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
-
-	return skb;
-}
-
-static struct sk_buff *alloc_can_err_skb(struct net_device *dev,
-		struct can_frame **cf)
-{
-	struct sk_buff *skb;
-
-	skb = alloc_can_skb(dev, cf);
-	if (unlikely(!skb))
-		return NULL;
-
-	memset(*cf, 0, sizeof(struct can_frame));
-	(*cf)->can_id = CAN_ERR_FLAG;
-	(*cf)->can_dlc = CAN_ERR_DLC;
-
-	return skb;
-}
-
 /*
  * Swtich transceiver on or off
  */
@@ -1087,7 +1055,7 @@ static int __init at91_can_probe(struct platform_device *pdev)
 		goto exit_release;
 	}
 
-	dev = alloc_candev(sizeof(struct at91_priv));
+	dev = alloc_candev(sizeof(struct at91_priv), AT91_MB_TX_NUM);
 	if (!dev) {
 		err = -ENOMEM;
 		goto exit_iounmap;
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 2868fe842a41..c1bb29f0322b 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -245,7 +245,7 @@ static void can_flush_echo_skb(struct net_device *dev)
 	struct net_device_stats *stats = &dev->stats;
 	int i;
 
-	for (i = 0; i < CAN_ECHO_SKB_MAX; i++) {
+	for (i = 0; i < priv->echo_skb_max; i++) {
 		if (priv->echo_skb[i]) {
 			kfree_skb(priv->echo_skb[i]);
 			priv->echo_skb[i] = NULL;
@@ -262,10 +262,13 @@ static void can_flush_echo_skb(struct net_device *dev)
  * of the device driver. The driver must protect access to
  * priv->echo_skb, if necessary.
  */
-void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, int idx)
+void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+		      unsigned int idx)
 {
 	struct can_priv *priv = netdev_priv(dev);
 
+	BUG_ON(idx >= priv->echo_skb_max);
+
 	/* check flag whether this packet has to be looped back */
 	if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK) {
 		kfree_skb(skb);
@@ -311,10 +314,12 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
  * is handled in the device driver. The driver must protect
  * access to priv->echo_skb, if necessary.
  */
-void can_get_echo_skb(struct net_device *dev, int idx)
+void can_get_echo_skb(struct net_device *dev, unsigned int idx)
 {
 	struct can_priv *priv = netdev_priv(dev);
 
+	BUG_ON(idx >= priv->echo_skb_max);
+
 	if (priv->echo_skb[idx]) {
 		netif_rx(priv->echo_skb[idx]);
 		priv->echo_skb[idx] = NULL;
@@ -327,10 +332,12 @@ EXPORT_SYMBOL_GPL(can_get_echo_skb);
   *
   * The function is typically called when TX failed.
   */
-void can_free_echo_skb(struct net_device *dev, int idx)
+void can_free_echo_skb(struct net_device *dev, unsigned int idx)
 {
 	struct can_priv *priv = netdev_priv(dev);
 
+	BUG_ON(idx >= priv->echo_skb_max);
+
 	if (priv->echo_skb[idx]) {
 		kfree_skb(priv->echo_skb[idx]);
 		priv->echo_skb[idx] = NULL;
@@ -359,17 +366,12 @@ void can_restart(unsigned long data)
 	can_flush_echo_skb(dev);
 
 	/* send restart message upstream */
-	skb = dev_alloc_skb(sizeof(struct can_frame));
+	skb = alloc_can_err_skb(dev, &cf);
 	if (skb == NULL) {
 		err = -ENOMEM;
 		goto restart;
 	}
-	skb->dev = dev;
-	skb->protocol = htons(ETH_P_CAN);
-	cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
-	memset(cf, 0, sizeof(struct can_frame));
-	cf->can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED;
-	cf->can_dlc = CAN_ERR_DLC;
+	cf->can_id |= CAN_ERR_RESTARTED;
 
 	netif_rx(skb);
 
@@ -442,20 +444,66 @@ static void can_setup(struct net_device *dev)
 	dev->features = NETIF_F_NO_CSUM;
 }
 
+struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
+{
+	struct sk_buff *skb;
+
+	skb = netdev_alloc_skb(dev, sizeof(struct can_frame));
+	if (unlikely(!skb))
+		return NULL;
+
+	skb->protocol = htons(ETH_P_CAN);
+	skb->pkt_type = PACKET_BROADCAST;
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+	*cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
+	memset(*cf, 0, sizeof(struct can_frame));
+
+	return skb;
+}
+EXPORT_SYMBOL_GPL(alloc_can_skb);
+
+struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
+{
+	struct sk_buff *skb;
+
+	skb = alloc_can_skb(dev, cf);
+	if (unlikely(!skb))
+		return NULL;
+
+	(*cf)->can_id = CAN_ERR_FLAG;
+	(*cf)->can_dlc = CAN_ERR_DLC;
+
+	return skb;
+}
+EXPORT_SYMBOL_GPL(alloc_can_err_skb);
+
 /*
  * Allocate and setup space for the CAN network device
  */
-struct net_device *alloc_candev(int sizeof_priv)
+struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
 {
 	struct net_device *dev;
 	struct can_priv *priv;
+	int size;
 
-	dev = alloc_netdev(sizeof_priv, "can%d", can_setup);
+	if (echo_skb_max)
+		size = ALIGN(sizeof_priv, sizeof(struct sk_buff *)) +
+			echo_skb_max * sizeof(struct sk_buff *);
+	else
+		size = sizeof_priv;
+
+	dev = alloc_netdev(size, "can%d", can_setup);
 	if (!dev)
 		return NULL;
 
 	priv = netdev_priv(dev);
 
+	if (echo_skb_max) {
+		priv->echo_skb_max = echo_skb_max;
+		priv->echo_skb = (void *)priv +
+			ALIGN(sizeof_priv, sizeof(struct sk_buff *));
+	}
+
 	priv->state = CAN_STATE_STOPPED;
 
 	init_timer(&priv->restart_timer);
@@ -647,7 +695,7 @@ nla_put_failure:
 	return -EMSGSIZE;
 }
 
-static int can_newlink(struct net_device *dev,
+static int can_newlink(struct net *src_net, struct net_device *dev,
 		       struct nlattr *tb[], struct nlattr *data[])
 {
 	return -EOPNOTSUPP;
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
new file mode 100644
index 000000000000..78b1b69b2921
--- /dev/null
+++ b/drivers/net/can/mcp251x.c
@@ -0,0 +1,1166 @@
+/*
+ * CAN bus driver for Microchip 251x CAN Controller with SPI Interface
+ *
+ * MCP2510 support and bug fixes by Christian Pellegrin
+ * <chripell@evolware.org>
+ *
+ * Copyright 2009 Christian Pellegrin EVOL S.r.l.
+ *
+ * Copyright 2007 Raymarine UK, Ltd. All Rights Reserved.
+ * Written under contract by:
+ *   Chris Elston, Katalix Systems, Ltd.
+ *
+ * Based on Microchip MCP251x CAN controller driver written by
+ * David Vrabel, Copyright 2006 Arcom Control Systems Ltd.
+ *
+ * Based on CAN bus driver for the CCAN controller written by
+ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix
+ * - Simon Kallweit, intefo AG
+ * Copyright 2007
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ *
+ *
+ * Your platform definition file should specify something like:
+ *
+ * static struct mcp251x_platform_data mcp251x_info = {
+ *         .oscillator_frequency = 8000000,
+ *         .board_specific_setup = &mcp251x_setup,
+ *         .model = CAN_MCP251X_MCP2510,
+ *         .power_enable = mcp251x_power_enable,
+ *         .transceiver_enable = NULL,
+ * };
+ *
+ * static struct spi_board_info spi_board_info[] = {
+ *         {
+ *                 .modalias = "mcp251x",
+ *                 .platform_data = &mcp251x_info,
+ *                 .irq = IRQ_EINT13,
+ *                 .max_speed_hz = 2*1000*1000,
+ *                 .chip_select = 2,
+ *         },
+ * };
+ *
+ * Please see mcp251x.h for a description of the fields in
+ * struct mcp251x_platform_data.
+ *
+ */
+
+#include <linux/can.h>
+#include <linux/can/core.h>
+#include <linux/can/dev.h>
+#include <linux/can/platform/mcp251x.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/freezer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/uaccess.h>
+
+/* SPI interface instruction set */
+#define INSTRUCTION_WRITE	0x02
+#define INSTRUCTION_READ	0x03
+#define INSTRUCTION_BIT_MODIFY	0x05
+#define INSTRUCTION_LOAD_TXB(n)	(0x40 + 2 * (n))
+#define INSTRUCTION_READ_RXB(n)	(((n) == 0) ? 0x90 : 0x94)
+#define INSTRUCTION_RESET	0xC0
+
+/* MPC251x registers */
+#define CANSTAT	      0x0e
+#define CANCTRL	      0x0f
+#  define CANCTRL_REQOP_MASK	    0xe0
+#  define CANCTRL_REQOP_CONF	    0x80
+#  define CANCTRL_REQOP_LISTEN_ONLY 0x60
+#  define CANCTRL_REQOP_LOOPBACK    0x40
+#  define CANCTRL_REQOP_SLEEP	    0x20
+#  define CANCTRL_REQOP_NORMAL	    0x00
+#  define CANCTRL_OSM		    0x08
+#  define CANCTRL_ABAT		    0x10
+#define TEC	      0x1c
+#define REC	      0x1d
+#define CNF1	      0x2a
+#  define CNF1_SJW_SHIFT   6
+#define CNF2	      0x29
+#  define CNF2_BTLMODE	   0x80
+#  define CNF2_SAM         0x40
+#  define CNF2_PS1_SHIFT   3
+#define CNF3	      0x28
+#  define CNF3_SOF	   0x08
+#  define CNF3_WAKFIL	   0x04
+#  define CNF3_PHSEG2_MASK 0x07
+#define CANINTE	      0x2b
+#  define CANINTE_MERRE 0x80
+#  define CANINTE_WAKIE 0x40
+#  define CANINTE_ERRIE 0x20
+#  define CANINTE_TX2IE 0x10
+#  define CANINTE_TX1IE 0x08
+#  define CANINTE_TX0IE 0x04
+#  define CANINTE_RX1IE 0x02
+#  define CANINTE_RX0IE 0x01
+#define CANINTF	      0x2c
+#  define CANINTF_MERRF 0x80
+#  define CANINTF_WAKIF 0x40
+#  define CANINTF_ERRIF 0x20
+#  define CANINTF_TX2IF 0x10
+#  define CANINTF_TX1IF 0x08
+#  define CANINTF_TX0IF 0x04
+#  define CANINTF_RX1IF 0x02
+#  define CANINTF_RX0IF 0x01
+#define EFLG	      0x2d
+#  define EFLG_EWARN	0x01
+#  define EFLG_RXWAR	0x02
+#  define EFLG_TXWAR	0x04
+#  define EFLG_RXEP	0x08
+#  define EFLG_TXEP	0x10
+#  define EFLG_TXBO	0x20
+#  define EFLG_RX0OVR	0x40
+#  define EFLG_RX1OVR	0x80
+#define TXBCTRL(n)  (((n) * 0x10) + 0x30 + TXBCTRL_OFF)
+#  define TXBCTRL_ABTF	0x40
+#  define TXBCTRL_MLOA	0x20
+#  define TXBCTRL_TXERR 0x10
+#  define TXBCTRL_TXREQ 0x08
+#define TXBSIDH(n)  (((n) * 0x10) + 0x30 + TXBSIDH_OFF)
+#  define SIDH_SHIFT    3
+#define TXBSIDL(n)  (((n) * 0x10) + 0x30 + TXBSIDL_OFF)
+#  define SIDL_SID_MASK    7
+#  define SIDL_SID_SHIFT   5
+#  define SIDL_EXIDE_SHIFT 3
+#  define SIDL_EID_SHIFT   16
+#  define SIDL_EID_MASK    3
+#define TXBEID8(n)  (((n) * 0x10) + 0x30 + TXBEID8_OFF)
+#define TXBEID0(n)  (((n) * 0x10) + 0x30 + TXBEID0_OFF)
+#define TXBDLC(n)   (((n) * 0x10) + 0x30 + TXBDLC_OFF)
+#  define DLC_RTR_SHIFT    6
+#define TXBCTRL_OFF 0
+#define TXBSIDH_OFF 1
+#define TXBSIDL_OFF 2
+#define TXBEID8_OFF 3
+#define TXBEID0_OFF 4
+#define TXBDLC_OFF  5
+#define TXBDAT_OFF  6
+#define RXBCTRL(n)  (((n) * 0x10) + 0x60 + RXBCTRL_OFF)
+#  define RXBCTRL_BUKT	0x04
+#  define RXBCTRL_RXM0	0x20
+#  define RXBCTRL_RXM1	0x40
+#define RXBSIDH(n)  (((n) * 0x10) + 0x60 + RXBSIDH_OFF)
+#  define RXBSIDH_SHIFT 3
+#define RXBSIDL(n)  (((n) * 0x10) + 0x60 + RXBSIDL_OFF)
+#  define RXBSIDL_IDE   0x08
+#  define RXBSIDL_EID   3
+#  define RXBSIDL_SHIFT 5
+#define RXBEID8(n)  (((n) * 0x10) + 0x60 + RXBEID8_OFF)
+#define RXBEID0(n)  (((n) * 0x10) + 0x60 + RXBEID0_OFF)
+#define RXBDLC(n)   (((n) * 0x10) + 0x60 + RXBDLC_OFF)
+#  define RXBDLC_LEN_MASK  0x0f
+#  define RXBDLC_RTR       0x40
+#define RXBCTRL_OFF 0
+#define RXBSIDH_OFF 1
+#define RXBSIDL_OFF 2
+#define RXBEID8_OFF 3
+#define RXBEID0_OFF 4
+#define RXBDLC_OFF  5
+#define RXBDAT_OFF  6
+
+#define GET_BYTE(val, byte)			\
+	(((val) >> ((byte) * 8)) & 0xff)
+#define SET_BYTE(val, byte)			\
+	(((val) & 0xff) << ((byte) * 8))
+
+/*
+ * Buffer size required for the largest SPI transfer (i.e., reading a
+ * frame)
+ */
+#define CAN_FRAME_MAX_DATA_LEN	8
+#define SPI_TRANSFER_BUF_LEN	(6 + CAN_FRAME_MAX_DATA_LEN)
+#define CAN_FRAME_MAX_BITS	128
+
+#define TX_ECHO_SKB_MAX	1
+
+#define DEVICE_NAME "mcp251x"
+
+static int mcp251x_enable_dma; /* Enable SPI DMA. Default: 0 (Off) */
+module_param(mcp251x_enable_dma, int, S_IRUGO);
+MODULE_PARM_DESC(mcp251x_enable_dma, "Enable SPI DMA. Default: 0 (Off)");
+
+static struct can_bittiming_const mcp251x_bittiming_const = {
+	.name = DEVICE_NAME,
+	.tseg1_min = 3,
+	.tseg1_max = 16,
+	.tseg2_min = 2,
+	.tseg2_max = 8,
+	.sjw_max = 4,
+	.brp_min = 1,
+	.brp_max = 64,
+	.brp_inc = 1,
+};
+
+struct mcp251x_priv {
+	struct can_priv	   can;
+	struct net_device *net;
+	struct spi_device *spi;
+
+	struct mutex spi_lock; /* SPI buffer lock */
+	u8 *spi_tx_buf;
+	u8 *spi_rx_buf;
+	dma_addr_t spi_tx_dma;
+	dma_addr_t spi_rx_dma;
+
+	struct sk_buff *tx_skb;
+	int tx_len;
+	struct workqueue_struct *wq;
+	struct work_struct tx_work;
+	struct work_struct irq_work;
+	struct completion awake;
+	int wake;
+	int force_quit;
+	int after_suspend;
+#define AFTER_SUSPEND_UP 1
+#define AFTER_SUSPEND_DOWN 2
+#define AFTER_SUSPEND_POWER 4
+#define AFTER_SUSPEND_RESTART 8
+	int restart_tx;
+};
+
+static void mcp251x_clean(struct net_device *net)
+{
+	struct mcp251x_priv *priv = netdev_priv(net);
+
+	net->stats.tx_errors++;
+	if (priv->tx_skb)
+		dev_kfree_skb(priv->tx_skb);
+	if (priv->tx_len)
+		can_free_echo_skb(priv->net, 0);
+	priv->tx_skb = NULL;
+	priv->tx_len = 0;
+}
+
+/*
+ * Note about handling of error return of mcp251x_spi_trans: accessing
+ * registers via SPI is not really different conceptually than using
+ * normal I/O assembler instructions, although it's much more
+ * complicated from a practical POV. So it's not advisable to always
+ * check the return value of this function. Imagine that every
+ * read{b,l}, write{b,l} and friends would be bracketed in "if ( < 0)
+ * error();", it would be a great mess (well there are some situation
+ * when exception handling C++ like could be useful after all). So we
+ * just check that transfers are OK at the beginning of our
+ * conversation with the chip and to avoid doing really nasty things
+ * (like injecting bogus packets in the network stack).
+ */
+static int mcp251x_spi_trans(struct spi_device *spi, int len)
+{
+	struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
+	struct spi_transfer t = {
+		.tx_buf = priv->spi_tx_buf,
+		.rx_buf = priv->spi_rx_buf,
+		.len = len,
+		.cs_change = 0,
+	};
+	struct spi_message m;
+	int ret;
+
+	spi_message_init(&m);
+
+	if (mcp251x_enable_dma) {
+		t.tx_dma = priv->spi_tx_dma;
+		t.rx_dma = priv->spi_rx_dma;
+		m.is_dma_mapped = 1;
+	}
+
+	spi_message_add_tail(&t, &m);
+
+	ret = spi_sync(spi, &m);
+	if (ret)
+		dev_err(&spi->dev, "spi transfer failed: ret = %d\n", ret);
+	return ret;
+}
+
+static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg)
+{
+	struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
+	u8 val = 0;
+
+	mutex_lock(&priv->spi_lock);
+
+	priv->spi_tx_buf[0] = INSTRUCTION_READ;
+	priv->spi_tx_buf[1] = reg;
+
+	mcp251x_spi_trans(spi, 3);
+	val = priv->spi_rx_buf[2];
+
+	mutex_unlock(&priv->spi_lock);
+
+	return val;
+}
+
+static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val)
+{
+	struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
+
+	mutex_lock(&priv->spi_lock);
+
+	priv->spi_tx_buf[0] = INSTRUCTION_WRITE;
+	priv->spi_tx_buf[1] = reg;
+	priv->spi_tx_buf[2] = val;
+
+	mcp251x_spi_trans(spi, 3);
+
+	mutex_unlock(&priv->spi_lock);
+}
+
+static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
+			       u8 mask, uint8_t val)
+{
+	struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
+
+	mutex_lock(&priv->spi_lock);
+
+	priv->spi_tx_buf[0] = INSTRUCTION_BIT_MODIFY;
+	priv->spi_tx_buf[1] = reg;
+	priv->spi_tx_buf[2] = mask;
+	priv->spi_tx_buf[3] = val;
+
+	mcp251x_spi_trans(spi, 4);
+
+	mutex_unlock(&priv->spi_lock);
+}
+
+static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
+				int len, int tx_buf_idx)
+{
+	struct mcp251x_platform_data *pdata = spi->dev.platform_data;
+	struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
+
+	if (pdata->model == CAN_MCP251X_MCP2510) {
+		int i;
+
+		for (i = 1; i < TXBDAT_OFF + len; i++)
+			mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx) + i,
+					  buf[i]);
+	} else {
+		mutex_lock(&priv->spi_lock);
+		memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len);
+		mcp251x_spi_trans(spi, TXBDAT_OFF + len);
+		mutex_unlock(&priv->spi_lock);
+	}
+}
+
+static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
+			  int tx_buf_idx)
+{
+	u32 sid, eid, exide, rtr;
+	u8 buf[SPI_TRANSFER_BUF_LEN];
+
+	exide = (frame->can_id & CAN_EFF_FLAG) ? 1 : 0; /* Extended ID Enable */
+	if (exide)
+		sid = (frame->can_id & CAN_EFF_MASK) >> 18;
+	else
+		sid = frame->can_id & CAN_SFF_MASK; /* Standard ID */
+	eid = frame->can_id & CAN_EFF_MASK; /* Extended ID */
+	rtr = (frame->can_id & CAN_RTR_FLAG) ? 1 : 0; /* Remote transmission */
+
+	buf[TXBCTRL_OFF] = INSTRUCTION_LOAD_TXB(tx_buf_idx);
+	buf[TXBSIDH_OFF] = sid >> SIDH_SHIFT;
+	buf[TXBSIDL_OFF] = ((sid & SIDL_SID_MASK) << SIDL_SID_SHIFT) |
+		(exide << SIDL_EXIDE_SHIFT) |
+		((eid >> SIDL_EID_SHIFT) & SIDL_EID_MASK);
+	buf[TXBEID8_OFF] = GET_BYTE(eid, 1);
+	buf[TXBEID0_OFF] = GET_BYTE(eid, 0);
+	buf[TXBDLC_OFF] = (rtr << DLC_RTR_SHIFT) | frame->can_dlc;
+	memcpy(buf + TXBDAT_OFF, frame->data, frame->can_dlc);
+	mcp251x_hw_tx_frame(spi, buf, frame->can_dlc, tx_buf_idx);
+	mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx), TXBCTRL_TXREQ);
+}
+
+static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
+				int buf_idx)
+{
+	struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
+	struct mcp251x_platform_data *pdata = spi->dev.platform_data;
+
+	if (pdata->model == CAN_MCP251X_MCP2510) {
+		int i, len;
+
+		for (i = 1; i < RXBDAT_OFF; i++)
+			buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
+		len = buf[RXBDLC_OFF] & RXBDLC_LEN_MASK;
+		if (len > 8)
+			len = 8;
+		for (; i < (RXBDAT_OFF + len); i++)
+			buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
+	} else {
+		mutex_lock(&priv->spi_lock);
+
+		priv->spi_tx_buf[RXBCTRL_OFF] = INSTRUCTION_READ_RXB(buf_idx);
+		mcp251x_spi_trans(spi, SPI_TRANSFER_BUF_LEN);
+		memcpy(buf, priv->spi_rx_buf, SPI_TRANSFER_BUF_LEN);
+
+		mutex_unlock(&priv->spi_lock);
+	}
+}
+
+static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
+{
+	struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
+	struct sk_buff *skb;
+	struct can_frame *frame;
+	u8 buf[SPI_TRANSFER_BUF_LEN];
+
+	skb = alloc_can_skb(priv->net, &frame);
+	if (!skb) {
+		dev_err(&spi->dev, "cannot allocate RX skb\n");
+		priv->net->stats.rx_dropped++;
+		return;
+	}
+
+	mcp251x_hw_rx_frame(spi, buf, buf_idx);
+	if (buf[RXBSIDL_OFF] & RXBSIDL_IDE) {
+		/* Extended ID format */
+		frame->can_id = CAN_EFF_FLAG;
+		frame->can_id |=
+			/* Extended ID part */
+			SET_BYTE(buf[RXBSIDL_OFF] & RXBSIDL_EID, 2) |
+			SET_BYTE(buf[RXBEID8_OFF], 1) |
+			SET_BYTE(buf[RXBEID0_OFF], 0) |
+			/* Standard ID part */
+			(((buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
+			  (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT)) << 18);
+		/* Remote transmission request */
+		if (buf[RXBDLC_OFF] & RXBDLC_RTR)
+			frame->can_id |= CAN_RTR_FLAG;
+	} else {
+		/* Standard ID format */
+		frame->can_id =
+			(buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
+			(buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT);
+	}
+	/* Data length */
+	frame->can_dlc = buf[RXBDLC_OFF] & RXBDLC_LEN_MASK;
+	if (frame->can_dlc > 8) {
+		dev_warn(&spi->dev, "invalid frame recevied\n");
+		priv->net->stats.rx_errors++;
+		dev_kfree_skb(skb);
+		return;
+	}
+	memcpy(frame->data, buf + RXBDAT_OFF, frame->can_dlc);
+
+	priv->net->stats.rx_packets++;
+	priv->net->stats.rx_bytes += frame->can_dlc;
+	netif_rx(skb);
+}
+
+static void mcp251x_hw_sleep(struct spi_device *spi)
+{
+	mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_SLEEP);
+}
+
+static void mcp251x_hw_wakeup(struct spi_device *spi)
+{
+	struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
+
+	priv->wake = 1;
+
+	/* Can only wake up by generating a wake-up interrupt. */
+	mcp251x_write_bits(spi, CANINTE, CANINTE_WAKIE, CANINTE_WAKIE);
+	mcp251x_write_bits(spi, CANINTF, CANINTF_WAKIF, CANINTF_WAKIF);
+
+	/* Wait until the device is awake */
+	if (!wait_for_completion_timeout(&priv->awake, HZ))
+		dev_err(&spi->dev, "MCP251x didn't wake-up\n");
+}
+
+static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
+					   struct net_device *net)
+{
+	struct mcp251x_priv *priv = netdev_priv(net);
+	struct spi_device *spi = priv->spi;
+
+	if (priv->tx_skb || priv->tx_len) {
+		dev_warn(&spi->dev, "hard_xmit called while tx busy\n");
+		netif_stop_queue(net);
+		return NETDEV_TX_BUSY;
+	}
+
+	if (skb->len != sizeof(struct can_frame)) {
+		dev_err(&spi->dev, "dropping packet - bad length\n");
+		dev_kfree_skb(skb);
+		net->stats.tx_dropped++;
+		return NETDEV_TX_OK;
+	}
+
+	netif_stop_queue(net);
+	priv->tx_skb = skb;
+	net->trans_start = jiffies;
+	queue_work(priv->wq, &priv->tx_work);
+
+	return NETDEV_TX_OK;
+}
+
+static int mcp251x_do_set_mode(struct net_device *net, enum can_mode mode)
+{
+	struct mcp251x_priv *priv = netdev_priv(net);
+
+	switch (mode) {
+	case CAN_MODE_START:
+		/* We have to delay work since SPI I/O may sleep */
+		priv->can.state = CAN_STATE_ERROR_ACTIVE;
+		priv->restart_tx = 1;
+		if (priv->can.restart_ms == 0)
+			priv->after_suspend = AFTER_SUSPEND_RESTART;
+		queue_work(priv->wq, &priv->irq_work);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+static void mcp251x_set_normal_mode(struct spi_device *spi)
+{
+	struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
+	unsigned long timeout;
+
+	/* Enable interrupts */
+	mcp251x_write_reg(spi, CANINTE,
+			  CANINTE_ERRIE | CANINTE_TX2IE | CANINTE_TX1IE |
+			  CANINTE_TX0IE | CANINTE_RX1IE | CANINTE_RX0IE |
+			  CANINTF_MERRF);
+
+	if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
+		/* Put device into loopback mode */
+		mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LOOPBACK);
+	} else {
+		/* Put device into normal mode */
+		mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_NORMAL);
+
+		/* Wait for the device to enter normal mode */
+		timeout = jiffies + HZ;
+		while (mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) {
+			schedule();
+			if (time_after(jiffies, timeout)) {
+				dev_err(&spi->dev, "MCP251x didn't"
+					" enter in normal mode\n");
+				return;
+			}
+		}
+	}
+	priv->can.state = CAN_STATE_ERROR_ACTIVE;
+}
+
+static int mcp251x_do_set_bittiming(struct net_device *net)
+{
+	struct mcp251x_priv *priv = netdev_priv(net);
+	struct can_bittiming *bt = &priv->can.bittiming;
+	struct spi_device *spi = priv->spi;
+
+	mcp251x_write_reg(spi, CNF1, ((bt->sjw - 1) << CNF1_SJW_SHIFT) |
+			  (bt->brp - 1));
+	mcp251x_write_reg(spi, CNF2, CNF2_BTLMODE |
+			  (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES ?
+			   CNF2_SAM : 0) |
+			  ((bt->phase_seg1 - 1) << CNF2_PS1_SHIFT) |
+			  (bt->prop_seg - 1));
+	mcp251x_write_bits(spi, CNF3, CNF3_PHSEG2_MASK,
+			   (bt->phase_seg2 - 1));
+	dev_info(&spi->dev, "CNF: 0x%02x 0x%02x 0x%02x\n",
+		 mcp251x_read_reg(spi, CNF1),
+		 mcp251x_read_reg(spi, CNF2),
+		 mcp251x_read_reg(spi, CNF3));
+
+	return 0;
+}
+
+static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
+			 struct spi_device *spi)
+{
+	mcp251x_do_set_bittiming(net);
+
+	/* Enable RX0->RX1 buffer roll over and disable filters */
+	mcp251x_write_bits(spi, RXBCTRL(0),
+			   RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1,
+			   RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1);
+	mcp251x_write_bits(spi, RXBCTRL(1),
+			   RXBCTRL_RXM0 | RXBCTRL_RXM1,
+			   RXBCTRL_RXM0 | RXBCTRL_RXM1);
+	return 0;
+}
+
+static void mcp251x_hw_reset(struct spi_device *spi)
+{
+	struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
+	int ret;
+
+	mutex_lock(&priv->spi_lock);
+
+	priv->spi_tx_buf[0] = INSTRUCTION_RESET;
+
+	ret = spi_write(spi, priv->spi_tx_buf, 1);
+
+	mutex_unlock(&priv->spi_lock);
+
+	if (ret)
+		dev_err(&spi->dev, "reset failed: ret = %d\n", ret);
+	/* Wait for reset to finish */
+	mdelay(10);
+}
+
+static int mcp251x_hw_probe(struct spi_device *spi)
+{
+	int st1, st2;
+
+	mcp251x_hw_reset(spi);
+
+	/*
+	 * Please note that these are "magic values" based on after
+	 * reset defaults taken from data sheet which allows us to see
+	 * if we really have a chip on the bus (we avoid common all
+	 * zeroes or all ones situations)
+	 */
+	st1 = mcp251x_read_reg(spi, CANSTAT) & 0xEE;
+	st2 = mcp251x_read_reg(spi, CANCTRL) & 0x17;
+
+	dev_dbg(&spi->dev, "CANSTAT 0x%02x CANCTRL 0x%02x\n", st1, st2);
+
+	/* Check for power up default values */
+	return (st1 == 0x80 && st2 == 0x07) ? 1 : 0;
+}
+
+static irqreturn_t mcp251x_can_isr(int irq, void *dev_id)
+{
+	struct net_device *net = (struct net_device *)dev_id;
+	struct mcp251x_priv *priv = netdev_priv(net);
+
+	/* Schedule bottom half */
+	if (!work_pending(&priv->irq_work))
+		queue_work(priv->wq, &priv->irq_work);
+
+	return IRQ_HANDLED;
+}
+
+static int mcp251x_open(struct net_device *net)
+{
+	struct mcp251x_priv *priv = netdev_priv(net);
+	struct spi_device *spi = priv->spi;
+	struct mcp251x_platform_data *pdata = spi->dev.platform_data;
+	int ret;
+
+	ret = open_candev(net);
+	if (ret) {
+		dev_err(&spi->dev, "unable to set initial baudrate!\n");
+		return ret;
+	}
+
+	if (pdata->transceiver_enable)
+		pdata->transceiver_enable(1);
+
+	priv->force_quit = 0;
+	priv->tx_skb = NULL;
+	priv->tx_len = 0;
+
+	ret = request_irq(spi->irq, mcp251x_can_isr,
+			  IRQF_TRIGGER_FALLING, DEVICE_NAME, net);
+	if (ret) {
+		dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
+		if (pdata->transceiver_enable)
+			pdata->transceiver_enable(0);
+		close_candev(net);
+		return ret;
+	}
+
+	mcp251x_hw_wakeup(spi);
+	mcp251x_hw_reset(spi);
+	ret = mcp251x_setup(net, priv, spi);
+	if (ret) {
+		free_irq(spi->irq, net);
+		mcp251x_hw_sleep(spi);
+		if (pdata->transceiver_enable)
+			pdata->transceiver_enable(0);
+		close_candev(net);
+		return ret;
+	}
+	mcp251x_set_normal_mode(spi);
+	netif_wake_queue(net);
+
+	return 0;
+}
+
+static int mcp251x_stop(struct net_device *net)
+{
+	struct mcp251x_priv *priv = netdev_priv(net);
+	struct spi_device *spi = priv->spi;
+	struct mcp251x_platform_data *pdata = spi->dev.platform_data;
+
+	close_candev(net);
+
+	/* Disable and clear pending interrupts */
+	mcp251x_write_reg(spi, CANINTE, 0x00);
+	mcp251x_write_reg(spi, CANINTF, 0x00);
+
+	priv->force_quit = 1;
+	free_irq(spi->irq, net);
+	flush_workqueue(priv->wq);
+
+	mcp251x_write_reg(spi, TXBCTRL(0), 0);
+	if (priv->tx_skb || priv->tx_len)
+		mcp251x_clean(net);
+
+	mcp251x_hw_sleep(spi);
+
+	if (pdata->transceiver_enable)
+		pdata->transceiver_enable(0);
+
+	priv->can.state = CAN_STATE_STOPPED;
+
+	return 0;
+}
+
+static void mcp251x_tx_work_handler(struct work_struct *ws)
+{
+	struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
+						 tx_work);
+	struct spi_device *spi = priv->spi;
+	struct net_device *net = priv->net;
+	struct can_frame *frame;
+
+	if (priv->tx_skb) {
+		frame = (struct can_frame *)priv->tx_skb->data;
+
+		if (priv->can.state == CAN_STATE_BUS_OFF) {
+			mcp251x_clean(net);
+			netif_wake_queue(net);
+			return;
+		}
+		if (frame->can_dlc > CAN_FRAME_MAX_DATA_LEN)
+			frame->can_dlc = CAN_FRAME_MAX_DATA_LEN;
+		mcp251x_hw_tx(spi, frame, 0);
+		priv->tx_len = 1 + frame->can_dlc;
+		can_put_echo_skb(priv->tx_skb, net, 0);
+		priv->tx_skb = NULL;
+	}
+}
+
+static void mcp251x_irq_work_handler(struct work_struct *ws)
+{
+	struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
+						 irq_work);
+	struct spi_device *spi = priv->spi;
+	struct net_device *net = priv->net;
+	u8 txbnctrl;
+	u8 intf;
+	enum can_state new_state;
+
+	if (priv->after_suspend) {
+		mdelay(10);
+		mcp251x_hw_reset(spi);
+		mcp251x_setup(net, priv, spi);
+		if (priv->after_suspend & AFTER_SUSPEND_RESTART) {
+			mcp251x_set_normal_mode(spi);
+		} else if (priv->after_suspend & AFTER_SUSPEND_UP) {
+			netif_device_attach(net);
+			/* Clean since we lost tx buffer */
+			if (priv->tx_skb || priv->tx_len) {
+				mcp251x_clean(net);
+				netif_wake_queue(net);
+			}
+			mcp251x_set_normal_mode(spi);
+		} else {
+			mcp251x_hw_sleep(spi);
+		}
+		priv->after_suspend = 0;
+	}
+
+	if (priv->can.restart_ms == 0 && priv->can.state == CAN_STATE_BUS_OFF)
+		return;
+
+	while (!priv->force_quit && !freezing(current)) {
+		u8 eflag = mcp251x_read_reg(spi, EFLG);
+		int can_id = 0, data1 = 0;
+
+		mcp251x_write_reg(spi, EFLG, 0x00);
+
+		if (priv->restart_tx) {
+			priv->restart_tx = 0;
+			mcp251x_write_reg(spi, TXBCTRL(0), 0);
+			if (priv->tx_skb || priv->tx_len)
+				mcp251x_clean(net);
+			netif_wake_queue(net);
+			can_id |= CAN_ERR_RESTARTED;
+		}
+
+		if (priv->wake) {
+			/* Wait whilst the device wakes up */
+			mdelay(10);
+			priv->wake = 0;
+		}
+
+		intf = mcp251x_read_reg(spi, CANINTF);
+		mcp251x_write_bits(spi, CANINTF, intf, 0x00);
+
+		/* Update can state */
+		if (eflag & EFLG_TXBO) {
+			new_state = CAN_STATE_BUS_OFF;
+			can_id |= CAN_ERR_BUSOFF;
+		} else if (eflag & EFLG_TXEP) {
+			new_state = CAN_STATE_ERROR_PASSIVE;
+			can_id |= CAN_ERR_CRTL;
+			data1 |= CAN_ERR_CRTL_TX_PASSIVE;
+		} else if (eflag & EFLG_RXEP) {
+			new_state = CAN_STATE_ERROR_PASSIVE;
+			can_id |= CAN_ERR_CRTL;
+			data1 |= CAN_ERR_CRTL_RX_PASSIVE;
+		} else if (eflag & EFLG_TXWAR) {
+			new_state = CAN_STATE_ERROR_WARNING;
+			can_id |= CAN_ERR_CRTL;
+			data1 |= CAN_ERR_CRTL_TX_WARNING;
+		} else if (eflag & EFLG_RXWAR) {
+			new_state = CAN_STATE_ERROR_WARNING;
+			can_id |= CAN_ERR_CRTL;
+			data1 |= CAN_ERR_CRTL_RX_WARNING;
+		} else {
+			new_state = CAN_STATE_ERROR_ACTIVE;
+		}
+
+		/* Update can state statistics */
+		switch (priv->can.state) {
+		case CAN_STATE_ERROR_ACTIVE:
+			if (new_state >= CAN_STATE_ERROR_WARNING &&
+			    new_state <= CAN_STATE_BUS_OFF)
+				priv->can.can_stats.error_warning++;
+		case CAN_STATE_ERROR_WARNING:	/* fallthrough */
+			if (new_state >= CAN_STATE_ERROR_PASSIVE &&
+			    new_state <= CAN_STATE_BUS_OFF)
+				priv->can.can_stats.error_passive++;
+			break;
+		default:
+			break;
+		}
+		priv->can.state = new_state;
+
+		if ((intf & CANINTF_ERRIF) || (can_id & CAN_ERR_RESTARTED)) {
+			struct sk_buff *skb;
+			struct can_frame *frame;
+
+			/* Create error frame */
+			skb = alloc_can_err_skb(net, &frame);
+			if (skb) {
+				/* Set error frame flags based on bus state */
+				frame->can_id = can_id;
+				frame->data[1] = data1;
+
+				/* Update net stats for overflows */
+				if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR)) {
+					if (eflag & EFLG_RX0OVR)
+						net->stats.rx_over_errors++;
+					if (eflag & EFLG_RX1OVR)
+						net->stats.rx_over_errors++;
+					frame->can_id |= CAN_ERR_CRTL;
+					frame->data[1] |=
+						CAN_ERR_CRTL_RX_OVERFLOW;
+				}
+
+				netif_rx(skb);
+			} else {
+				dev_info(&spi->dev,
+					 "cannot allocate error skb\n");
+			}
+		}
+
+		if (priv->can.state == CAN_STATE_BUS_OFF) {
+			if (priv->can.restart_ms == 0) {
+				can_bus_off(net);
+				mcp251x_hw_sleep(spi);
+				return;
+			}
+		}
+
+		if (intf == 0)
+			break;
+
+		if (intf & CANINTF_WAKIF)
+			complete(&priv->awake);
+
+		if (intf & CANINTF_MERRF) {
+			/* If there are pending Tx buffers, restart queue */
+			txbnctrl = mcp251x_read_reg(spi, TXBCTRL(0));
+			if (!(txbnctrl & TXBCTRL_TXREQ)) {
+				if (priv->tx_skb || priv->tx_len)
+					mcp251x_clean(net);
+				netif_wake_queue(net);
+			}
+		}
+
+		if (intf & (CANINTF_TX2IF | CANINTF_TX1IF | CANINTF_TX0IF)) {
+			net->stats.tx_packets++;
+			net->stats.tx_bytes += priv->tx_len - 1;
+			if (priv->tx_len) {
+				can_get_echo_skb(net, 0);
+				priv->tx_len = 0;
+			}
+			netif_wake_queue(net);
+		}
+
+		if (intf & CANINTF_RX0IF)
+			mcp251x_hw_rx(spi, 0);
+
+		if (intf & CANINTF_RX1IF)
+			mcp251x_hw_rx(spi, 1);
+	}
+}
+
+static const struct net_device_ops mcp251x_netdev_ops = {
+	.ndo_open = mcp251x_open,
+	.ndo_stop = mcp251x_stop,
+	.ndo_start_xmit = mcp251x_hard_start_xmit,
+};
+
+static int __devinit mcp251x_can_probe(struct spi_device *spi)
+{
+	struct net_device *net;
+	struct mcp251x_priv *priv;
+	struct mcp251x_platform_data *pdata = spi->dev.platform_data;
+	int ret = -ENODEV;
+
+	if (!pdata)
+		/* Platform data is required for osc freq */
+		goto error_out;
+
+	/* Allocate can/net device */
+	net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX);
+	if (!net) {
+		ret = -ENOMEM;
+		goto error_alloc;
+	}
+
+	net->netdev_ops = &mcp251x_netdev_ops;
+	net->flags |= IFF_ECHO;
+
+	priv = netdev_priv(net);
+	priv->can.bittiming_const = &mcp251x_bittiming_const;
+	priv->can.do_set_mode = mcp251x_do_set_mode;
+	priv->can.clock.freq = pdata->oscillator_frequency / 2;
+	priv->net = net;
+	dev_set_drvdata(&spi->dev, priv);
+
+	priv->spi = spi;
+	mutex_init(&priv->spi_lock);
+
+	/* If requested, allocate DMA buffers */
+	if (mcp251x_enable_dma) {
+		spi->dev.coherent_dma_mask = ~0;
+
+		/*
+		 * Minimum coherent DMA allocation is PAGE_SIZE, so allocate
+		 * that much and share it between Tx and Rx DMA buffers.
+		 */
+		priv->spi_tx_buf = dma_alloc_coherent(&spi->dev,
+						      PAGE_SIZE,
+						      &priv->spi_tx_dma,
+						      GFP_DMA);
+
+		if (priv->spi_tx_buf) {
+			priv->spi_rx_buf = (u8 *)(priv->spi_tx_buf +
+						  (PAGE_SIZE / 2));
+			priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma +
+							(PAGE_SIZE / 2));
+		} else {
+			/* Fall back to non-DMA */
+			mcp251x_enable_dma = 0;
+		}
+	}
+
+	/* Allocate non-DMA buffers */
+	if (!mcp251x_enable_dma) {
+		priv->spi_tx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL);
+		if (!priv->spi_tx_buf) {
+			ret = -ENOMEM;
+			goto error_tx_buf;
+		}
+		priv->spi_rx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL);
+		if (!priv->spi_tx_buf) {
+			ret = -ENOMEM;
+			goto error_rx_buf;
+		}
+	}
+
+	if (pdata->power_enable)
+		pdata->power_enable(1);
+
+	/* Call out to platform specific setup */
+	if (pdata->board_specific_setup)
+		pdata->board_specific_setup(spi);
+
+	SET_NETDEV_DEV(net, &spi->dev);
+
+	priv->wq = create_freezeable_workqueue("mcp251x_wq");
+
+	INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
+	INIT_WORK(&priv->irq_work, mcp251x_irq_work_handler);
+
+	init_completion(&priv->awake);
+
+	/* Configure the SPI bus */
+	spi->mode = SPI_MODE_0;
+	spi->bits_per_word = 8;
+	spi_setup(spi);
+
+	if (!mcp251x_hw_probe(spi)) {
+		dev_info(&spi->dev, "Probe failed\n");
+		goto error_probe;
+	}
+	mcp251x_hw_sleep(spi);
+
+	if (pdata->transceiver_enable)
+		pdata->transceiver_enable(0);
+
+	ret = register_candev(net);
+	if (!ret) {
+		dev_info(&spi->dev, "probed\n");
+		return ret;
+	}
+error_probe:
+	if (!mcp251x_enable_dma)
+		kfree(priv->spi_rx_buf);
+error_rx_buf:
+	if (!mcp251x_enable_dma)
+		kfree(priv->spi_tx_buf);
+error_tx_buf:
+	free_candev(net);
+	if (mcp251x_enable_dma)
+		dma_free_coherent(&spi->dev, PAGE_SIZE,
+				  priv->spi_tx_buf, priv->spi_tx_dma);
+error_alloc:
+	if (pdata->power_enable)
+		pdata->power_enable(0);
+	dev_err(&spi->dev, "probe failed\n");
+error_out:
+	return ret;
+}
+
+static int __devexit mcp251x_can_remove(struct spi_device *spi)
+{
+	struct mcp251x_platform_data *pdata = spi->dev.platform_data;
+	struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
+	struct net_device *net = priv->net;
+
+	unregister_candev(net);
+	free_candev(net);
+
+	priv->force_quit = 1;
+	flush_workqueue(priv->wq);
+	destroy_workqueue(priv->wq);
+
+	if (mcp251x_enable_dma) {
+		dma_free_coherent(&spi->dev, PAGE_SIZE,
+				  priv->spi_tx_buf, priv->spi_tx_dma);
+	} else {
+		kfree(priv->spi_tx_buf);
+		kfree(priv->spi_rx_buf);
+	}
+
+	if (pdata->power_enable)
+		pdata->power_enable(0);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int mcp251x_can_suspend(struct spi_device *spi, pm_message_t state)
+{
+	struct mcp251x_platform_data *pdata = spi->dev.platform_data;
+	struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
+	struct net_device *net = priv->net;
+
+	if (netif_running(net)) {
+		netif_device_detach(net);
+
+		mcp251x_hw_sleep(spi);
+		if (pdata->transceiver_enable)
+			pdata->transceiver_enable(0);
+		priv->after_suspend = AFTER_SUSPEND_UP;
+	} else {
+		priv->after_suspend = AFTER_SUSPEND_DOWN;
+	}
+
+	if (pdata->power_enable) {
+		pdata->power_enable(0);
+		priv->after_suspend |= AFTER_SUSPEND_POWER;
+	}
+
+	return 0;
+}
+
+static int mcp251x_can_resume(struct spi_device *spi)
+{
+	struct mcp251x_platform_data *pdata = spi->dev.platform_data;
+	struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
+
+	if (priv->after_suspend & AFTER_SUSPEND_POWER) {
+		pdata->power_enable(1);
+		queue_work(priv->wq, &priv->irq_work);
+	} else {
+		if (priv->after_suspend & AFTER_SUSPEND_UP) {
+			if (pdata->transceiver_enable)
+				pdata->transceiver_enable(1);
+			queue_work(priv->wq, &priv->irq_work);
+		} else {
+			priv->after_suspend = 0;
+		}
+	}
+	return 0;
+}
+#else
+#define mcp251x_can_suspend NULL
+#define mcp251x_can_resume NULL
+#endif
+
+static struct spi_driver mcp251x_can_driver = {
+	.driver = {
+		.name = DEVICE_NAME,
+		.bus = &spi_bus_type,
+		.owner = THIS_MODULE,
+	},
+
+	.probe = mcp251x_can_probe,
+	.remove = __devexit_p(mcp251x_can_remove),
+	.suspend = mcp251x_can_suspend,
+	.resume = mcp251x_can_resume,
+};
+
+static int __init mcp251x_can_init(void)
+{
+	return spi_register_driver(&mcp251x_can_driver);
+}
+
+static void __exit mcp251x_can_exit(void)
+{
+	spi_unregister_driver(&mcp251x_can_driver);
+}
+
+module_init(mcp251x_can_init);
+module_exit(mcp251x_can_exit);
+
+MODULE_AUTHOR("Chris Elston <celston@katalix.com>, "
+	      "Christian Pellegrin <chripell@evolware.org>");
+MODULE_DESCRIPTION("Microchip 251x CAN driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
new file mode 100644
index 000000000000..cd0f2d6f375d
--- /dev/null
+++ b/drivers/net/can/mscan/Kconfig
@@ -0,0 +1,23 @@
+config CAN_MSCAN
+	depends on CAN_DEV && (PPC || M68K || M68KNOMMU)
+	tristate "Support for Freescale MSCAN based chips"
+	---help---
+	  The Motorola Scalable Controller Area Network (MSCAN) definition
+	  is based on the MSCAN12 definition which is the specific
+	  implementation of the Motorola Scalable CAN concept targeted for
+	  the Motorola MC68HC12 Microcontroller Family.
+
+if CAN_MSCAN
+
+config CAN_MPC5XXX
+	tristate "Freescale MPC5xxx onboard CAN controller"
+	depends on PPC_MPC52xx
+	---help---
+	  If you say yes here you get support for Freescale's MPC5xxx
+	  onboard CAN controller.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called mscan-mpc5xxx.ko.
+
+endif
+
diff --git a/drivers/net/can/mscan/Makefile b/drivers/net/can/mscan/Makefile
new file mode 100644
index 000000000000..c9fab17cd8b4
--- /dev/null
+++ b/drivers/net/can/mscan/Makefile
@@ -0,0 +1,5 @@
+
+obj-$(CONFIG_CAN_MPC5XXX)	+= mscan-mpc5xxx.o
+mscan-mpc5xxx-objs		:= mscan.o mpc5xxx_can.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
new file mode 100644
index 000000000000..1de6f6349b16
--- /dev/null
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -0,0 +1,259 @@
+/*
+ * CAN bus driver for the Freescale MPC5xxx embedded CPU.
+ *
+ * Copyright (C) 2004-2005 Andrey Volkov <avolkov@varma-el.com>,
+ *                         Varma Electronics Oy
+ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (C) 2009 Wolfram Sang, Pengutronix <w.sang@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/of_platform.h>
+#include <sysdev/fsl_soc.h>
+#include <linux/io.h>
+#include <asm/mpc52xx.h>
+
+#include "mscan.h"
+
+#define DRV_NAME "mpc5xxx_can"
+
+static struct of_device_id mpc52xx_cdm_ids[] __devinitdata = {
+	{ .compatible = "fsl,mpc5200-cdm", },
+	{}
+};
+
+/*
+ * Get frequency of the MSCAN clock source
+ *
+ * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock (IP_CLK)
+ * can be selected. According to the MPC5200 user's manual, the oscillator
+ * clock is the better choice as it has less jitter but due to a hardware
+ * bug, it can not be selected for the old MPC5200 Rev. A chips.
+ */
+
+static unsigned int  __devinit mpc52xx_can_clock_freq(struct of_device *of,
+						      int clock_src)
+{
+	unsigned int pvr;
+	struct mpc52xx_cdm  __iomem *cdm;
+	struct device_node *np_cdm;
+	unsigned int freq;
+	u32 val;
+
+	pvr = mfspr(SPRN_PVR);
+
+	freq = mpc5xxx_get_bus_frequency(of->node);
+	if (!freq)
+		return 0;
+
+	if (clock_src == MSCAN_CLKSRC_BUS || pvr == 0x80822011)
+		return freq;
+
+	/* Determine SYS_XTAL_IN frequency from the clock domain settings */
+	np_cdm = of_find_matching_node(NULL, mpc52xx_cdm_ids);
+	if (!np_cdm) {
+		dev_err(&of->dev, "can't get clock node!\n");
+		return 0;
+	}
+	cdm = of_iomap(np_cdm, 0);
+	of_node_put(np_cdm);
+
+	if (in_8(&cdm->ipb_clk_sel) & 0x1)
+		freq *= 2;
+	val = in_be32(&cdm->rstcfg);
+
+	freq *= (val & (1 << 5)) ? 8 : 4;
+	freq /= (val & (1 << 6)) ? 12 : 16;
+
+	iounmap(cdm);
+
+	return freq;
+}
+
+static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
+				       const struct of_device_id *id)
+{
+	struct device_node *np = ofdev->node;
+	struct net_device *dev;
+	struct mscan_priv *priv;
+	void __iomem *base;
+	const char *clk_src;
+	int err, irq, clock_src;
+
+	base = of_iomap(ofdev->node, 0);
+	if (!base) {
+		dev_err(&ofdev->dev, "couldn't ioremap\n");
+		err = -ENOMEM;
+		goto exit_release_mem;
+	}
+
+	irq = irq_of_parse_and_map(np, 0);
+	if (!irq) {
+		dev_err(&ofdev->dev, "no irq found\n");
+		err = -ENODEV;
+		goto exit_unmap_mem;
+	}
+
+	dev = alloc_mscandev();
+	if (!dev) {
+		err = -ENOMEM;
+		goto exit_dispose_irq;
+	}
+
+	priv = netdev_priv(dev);
+	priv->reg_base = base;
+	dev->irq = irq;
+
+	/*
+	 * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock
+	 * (IP_CLK) can be selected as MSCAN clock source. According to
+	 * the MPC5200 user's manual, the oscillator clock is the better
+	 * choice as it has less jitter. For this reason, it is selected
+	 * by default.
+	 */
+	clk_src = of_get_property(np, "fsl,mscan-clock-source", NULL);
+	if (clk_src && strcmp(clk_src, "ip") == 0)
+		clock_src = MSCAN_CLKSRC_BUS;
+	else
+		clock_src = MSCAN_CLKSRC_XTAL;
+	priv->can.clock.freq = mpc52xx_can_clock_freq(ofdev, clock_src);
+	if (!priv->can.clock.freq) {
+		dev_err(&ofdev->dev, "couldn't get MSCAN clock frequency\n");
+		err = -ENODEV;
+		goto exit_free_mscan;
+	}
+
+	SET_NETDEV_DEV(dev, &ofdev->dev);
+
+	err = register_mscandev(dev, clock_src);
+	if (err) {
+		dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
+			DRV_NAME, err);
+		goto exit_free_mscan;
+	}
+
+	dev_set_drvdata(&ofdev->dev, dev);
+
+	dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n",
+		 priv->reg_base, dev->irq, priv->can.clock.freq);
+
+	return 0;
+
+exit_free_mscan:
+	free_candev(dev);
+exit_dispose_irq:
+	irq_dispose_mapping(irq);
+exit_unmap_mem:
+	iounmap(base);
+exit_release_mem:
+	return err;
+}
+
+static int __devexit mpc5xxx_can_remove(struct of_device *ofdev)
+{
+	struct net_device *dev = dev_get_drvdata(&ofdev->dev);
+	struct mscan_priv *priv = netdev_priv(dev);
+
+	dev_set_drvdata(&ofdev->dev, NULL);
+
+	unregister_mscandev(dev);
+	iounmap(priv->reg_base);
+	irq_dispose_mapping(dev->irq);
+	free_candev(dev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static struct mscan_regs saved_regs;
+static int mpc5xxx_can_suspend(struct of_device *ofdev, pm_message_t state)
+{
+	struct net_device *dev = dev_get_drvdata(&ofdev->dev);
+	struct mscan_priv *priv = netdev_priv(dev);
+	struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+
+	_memcpy_fromio(&saved_regs, regs, sizeof(*regs));
+
+	return 0;
+}
+
+static int mpc5xxx_can_resume(struct of_device *ofdev)
+{
+	struct net_device *dev = dev_get_drvdata(&ofdev->dev);
+	struct mscan_priv *priv = netdev_priv(dev);
+	struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+
+	regs->canctl0 |= MSCAN_INITRQ;
+	while (!(regs->canctl1 & MSCAN_INITAK))
+		udelay(10);
+
+	regs->canctl1 = saved_regs.canctl1;
+	regs->canbtr0 = saved_regs.canbtr0;
+	regs->canbtr1 = saved_regs.canbtr1;
+	regs->canidac = saved_regs.canidac;
+
+	/* restore masks, buffers etc. */
+	_memcpy_toio(&regs->canidar1_0, (void *)&saved_regs.canidar1_0,
+		     sizeof(*regs) - offsetof(struct mscan_regs, canidar1_0));
+
+	regs->canctl0 &= ~MSCAN_INITRQ;
+	regs->cantbsel = saved_regs.cantbsel;
+	regs->canrier = saved_regs.canrier;
+	regs->cantier = saved_regs.cantier;
+	regs->canctl0 = saved_regs.canctl0;
+
+	return 0;
+}
+#endif
+
+static struct of_device_id __devinitdata mpc5xxx_can_table[] = {
+	{.compatible = "fsl,mpc5200-mscan"},
+	{},
+};
+
+static struct of_platform_driver mpc5xxx_can_driver = {
+	.owner = THIS_MODULE,
+	.name = "mpc5xxx_can",
+	.probe = mpc5xxx_can_probe,
+	.remove = __devexit_p(mpc5xxx_can_remove),
+#ifdef CONFIG_PM
+	.suspend = mpc5xxx_can_suspend,
+	.resume = mpc5xxx_can_resume,
+#endif
+	.match_table = mpc5xxx_can_table,
+};
+
+static int __init mpc5xxx_can_init(void)
+{
+	return of_register_platform_driver(&mpc5xxx_can_driver);
+}
+module_init(mpc5xxx_can_init);
+
+static void __exit mpc5xxx_can_exit(void)
+{
+	return of_unregister_platform_driver(&mpc5xxx_can_driver);
+};
+module_exit(mpc5xxx_can_exit);
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("Freescale MPC5200 CAN driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
new file mode 100644
index 000000000000..bb06dfb58f25
--- /dev/null
+++ b/drivers/net/can/mscan/mscan.c
@@ -0,0 +1,668 @@
+/*
+ * CAN bus driver for the alone generic (as possible as) MSCAN controller.
+ *
+ * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
+ *                         Varma Electronics Oy
+ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ * Copytight (C) 2008-2009 Pengutronix <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/list.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/io.h>
+
+#include "mscan.h"
+
+static struct can_bittiming_const mscan_bittiming_const = {
+	.name = "mscan",
+	.tseg1_min = 4,
+	.tseg1_max = 16,
+	.tseg2_min = 2,
+	.tseg2_max = 8,
+	.sjw_max = 4,
+	.brp_min = 1,
+	.brp_max = 64,
+	.brp_inc = 1,
+};
+
+struct mscan_state {
+	u8 mode;
+	u8 canrier;
+	u8 cantier;
+};
+
+static enum can_state state_map[] = {
+	CAN_STATE_ERROR_ACTIVE,
+	CAN_STATE_ERROR_WARNING,
+	CAN_STATE_ERROR_PASSIVE,
+	CAN_STATE_BUS_OFF
+};
+
+static int mscan_set_mode(struct net_device *dev, u8 mode)
+{
+	struct mscan_priv *priv = netdev_priv(dev);
+	struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+	int ret = 0;
+	int i;
+	u8 canctl1;
+
+	if (mode != MSCAN_NORMAL_MODE) {
+		if (priv->tx_active) {
+			/* Abort transfers before going to sleep */#
+			out_8(&regs->cantarq, priv->tx_active);
+			/* Suppress TX done interrupts */
+			out_8(&regs->cantier, 0);
+		}
+
+		canctl1 = in_8(&regs->canctl1);
+		if ((mode & MSCAN_SLPRQ) && !(canctl1 & MSCAN_SLPAK)) {
+			setbits8(&regs->canctl0, MSCAN_SLPRQ);
+			for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
+				if (in_8(&regs->canctl1) & MSCAN_SLPAK)
+					break;
+				udelay(100);
+			}
+			/*
+			 * The mscan controller will fail to enter sleep mode,
+			 * while there are irregular activities on bus, like
+			 * somebody keeps retransmitting. This behavior is
+			 * undocumented and seems to differ between mscan built
+			 * in mpc5200b and mpc5200. We proceed in that case,
+			 * since otherwise the slprq will be kept set and the
+			 * controller will get stuck. NOTE: INITRQ or CSWAI
+			 * will abort all active transmit actions, if still
+			 * any, at once.
+			 */
+			if (i >= MSCAN_SET_MODE_RETRIES)
+				dev_dbg(dev->dev.parent,
+					"device failed to enter sleep mode. "
+					"We proceed anyhow.\n");
+			else
+				priv->can.state = CAN_STATE_SLEEPING;
+		}
+
+		if ((mode & MSCAN_INITRQ) && !(canctl1 & MSCAN_INITAK)) {
+			setbits8(&regs->canctl0, MSCAN_INITRQ);
+			for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
+				if (in_8(&regs->canctl1) & MSCAN_INITAK)
+					break;
+			}
+			if (i >= MSCAN_SET_MODE_RETRIES)
+				ret = -ENODEV;
+		}
+		if (!ret)
+			priv->can.state = CAN_STATE_STOPPED;
+
+		if (mode & MSCAN_CSWAI)
+			setbits8(&regs->canctl0, MSCAN_CSWAI);
+
+	} else {
+		canctl1 = in_8(&regs->canctl1);
+		if (canctl1 & (MSCAN_SLPAK | MSCAN_INITAK)) {
+			clrbits8(&regs->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ);
+			for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
+				canctl1 = in_8(&regs->canctl1);
+				if (!(canctl1 & (MSCAN_INITAK | MSCAN_SLPAK)))
+					break;
+			}
+			if (i >= MSCAN_SET_MODE_RETRIES)
+				ret = -ENODEV;
+			else
+				priv->can.state = CAN_STATE_ERROR_ACTIVE;
+		}
+	}
+	return ret;
+}
+
+static int mscan_start(struct net_device *dev)
+{
+	struct mscan_priv *priv = netdev_priv(dev);
+	struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+	u8 canrflg;
+	int err;
+
+	out_8(&regs->canrier, 0);
+
+	INIT_LIST_HEAD(&priv->tx_head);
+	priv->prev_buf_id = 0;
+	priv->cur_pri = 0;
+	priv->tx_active = 0;
+	priv->shadow_canrier = 0;
+	priv->flags = 0;
+
+	err = mscan_set_mode(dev, MSCAN_NORMAL_MODE);
+	if (err)
+		return err;
+
+	canrflg = in_8(&regs->canrflg);
+	priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
+	priv->can.state = state_map[max(MSCAN_STATE_RX(canrflg),
+				    MSCAN_STATE_TX(canrflg))];
+	out_8(&regs->cantier, 0);
+
+	/* Enable receive interrupts. */
+	out_8(&regs->canrier, MSCAN_OVRIE | MSCAN_RXFIE | MSCAN_CSCIE |
+	      MSCAN_RSTATE1 | MSCAN_RSTATE0 | MSCAN_TSTATE1 | MSCAN_TSTATE0);
+
+	return 0;
+}
+
+static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct can_frame *frame = (struct can_frame *)skb->data;
+	struct mscan_priv *priv = netdev_priv(dev);
+	struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+	int i, rtr, buf_id;
+	u32 can_id;
+
+	if (frame->can_dlc > 8)
+		return -EINVAL;
+
+	out_8(&regs->cantier, 0);
+
+	i = ~priv->tx_active & MSCAN_TXE;
+	buf_id = ffs(i) - 1;
+	switch (hweight8(i)) {
+	case 0:
+		netif_stop_queue(dev);
+		dev_err(dev->dev.parent, "Tx Ring full when queue awake!\n");
+		return NETDEV_TX_BUSY;
+	case 1:
+		/*
+		 * if buf_id < 3, then current frame will be send out of order,
+		 * since buffer with lower id have higher priority (hell..)
+		 */
+		netif_stop_queue(dev);
+	case 2:
+		if (buf_id < priv->prev_buf_id) {
+			priv->cur_pri++;
+			if (priv->cur_pri == 0xff) {
+				set_bit(F_TX_WAIT_ALL, &priv->flags);
+				netif_stop_queue(dev);
+			}
+		}
+		set_bit(F_TX_PROGRESS, &priv->flags);
+		break;
+	}
+	priv->prev_buf_id = buf_id;
+	out_8(&regs->cantbsel, i);
+
+	rtr = frame->can_id & CAN_RTR_FLAG;
+
+	/* RTR is always the lowest bit of interest, then IDs follow */
+	if (frame->can_id & CAN_EFF_FLAG) {
+		can_id = (frame->can_id & CAN_EFF_MASK)
+			 << (MSCAN_EFF_RTR_SHIFT + 1);
+		if (rtr)
+			can_id |= 1 << MSCAN_EFF_RTR_SHIFT;
+		out_be16(&regs->tx.idr3_2, can_id);
+
+		can_id >>= 16;
+		/* EFF_FLAGS are inbetween the IDs :( */
+		can_id = (can_id & 0x7) | ((can_id << 2) & 0xffe0)
+			 | MSCAN_EFF_FLAGS;
+	} else {
+		can_id = (frame->can_id & CAN_SFF_MASK)
+			 << (MSCAN_SFF_RTR_SHIFT + 1);
+		if (rtr)
+			can_id |= 1 << MSCAN_SFF_RTR_SHIFT;
+	}
+	out_be16(&regs->tx.idr1_0, can_id);
+
+	if (!rtr) {
+		void __iomem *data = &regs->tx.dsr1_0;
+		u16 *payload = (u16 *)frame->data;
+
+		/* It is safe to write into dsr[dlc+1] */
+		for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
+			out_be16(data, *payload++);
+			data += 2 + _MSCAN_RESERVED_DSR_SIZE;
+		}
+	}
+
+	out_8(&regs->tx.dlr, frame->can_dlc);
+	out_8(&regs->tx.tbpr, priv->cur_pri);
+
+	/* Start transmission. */
+	out_8(&regs->cantflg, 1 << buf_id);
+
+	if (!test_bit(F_TX_PROGRESS, &priv->flags))
+		dev->trans_start = jiffies;
+
+	list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head);
+
+	can_put_echo_skb(skb, dev, buf_id);
+
+	/* Enable interrupt. */
+	priv->tx_active |= 1 << buf_id;
+	out_8(&regs->cantier, priv->tx_active);
+
+	return NETDEV_TX_OK;
+}
+
+/* This function returns the old state to see where we came from */
+static enum can_state check_set_state(struct net_device *dev, u8 canrflg)
+{
+	struct mscan_priv *priv = netdev_priv(dev);
+	enum can_state state, old_state = priv->can.state;
+
+	if (canrflg & MSCAN_CSCIF && old_state <= CAN_STATE_BUS_OFF) {
+		state = state_map[max(MSCAN_STATE_RX(canrflg),
+				      MSCAN_STATE_TX(canrflg))];
+		priv->can.state = state;
+	}
+	return old_state;
+}
+
+static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)
+{
+	struct mscan_priv *priv = netdev_priv(dev);
+	struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+	u32 can_id;
+	int i;
+
+	can_id = in_be16(&regs->rx.idr1_0);
+	if (can_id & (1 << 3)) {
+		frame->can_id = CAN_EFF_FLAG;
+		can_id = ((can_id << 16) | in_be16(&regs->rx.idr3_2));
+		can_id = ((can_id & 0xffe00000) |
+			  ((can_id & 0x7ffff) << 2)) >> 2;
+	} else {
+		can_id >>= 4;
+		frame->can_id = 0;
+	}
+
+	frame->can_id |= can_id >> 1;
+	if (can_id & 1)
+		frame->can_id |= CAN_RTR_FLAG;
+	frame->can_dlc = in_8(&regs->rx.dlr) & 0xf;
+
+	if (!(frame->can_id & CAN_RTR_FLAG)) {
+		void __iomem *data = &regs->rx.dsr1_0;
+		u16 *payload = (u16 *)frame->data;
+
+		for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
+			*payload++ = in_be16(data);
+			data += 2 + _MSCAN_RESERVED_DSR_SIZE;
+		}
+	}
+
+	out_8(&regs->canrflg, MSCAN_RXF);
+}
+
+static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
+				u8 canrflg)
+{
+	struct mscan_priv *priv = netdev_priv(dev);
+	struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+	struct net_device_stats *stats = &dev->stats;
+	enum can_state old_state;
+
+	dev_dbg(dev->dev.parent, "error interrupt (canrflg=%#x)\n", canrflg);
+	frame->can_id = CAN_ERR_FLAG;
+
+	if (canrflg & MSCAN_OVRIF) {
+		frame->can_id |= CAN_ERR_CRTL;
+		frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+		stats->rx_over_errors++;
+		stats->rx_errors++;
+	} else {
+		frame->data[1] = 0;
+	}
+
+	old_state = check_set_state(dev, canrflg);
+	/* State changed */
+	if (old_state != priv->can.state) {
+		switch (priv->can.state) {
+		case CAN_STATE_ERROR_WARNING:
+			frame->can_id |= CAN_ERR_CRTL;
+			priv->can.can_stats.error_warning++;
+			if ((priv->shadow_statflg & MSCAN_RSTAT_MSK) <
+			    (canrflg & MSCAN_RSTAT_MSK))
+				frame->data[1] |= CAN_ERR_CRTL_RX_WARNING;
+			if ((priv->shadow_statflg & MSCAN_TSTAT_MSK) <
+			    (canrflg & MSCAN_TSTAT_MSK))
+				frame->data[1] |= CAN_ERR_CRTL_TX_WARNING;
+			break;
+		case CAN_STATE_ERROR_PASSIVE:
+			frame->can_id |= CAN_ERR_CRTL;
+			priv->can.can_stats.error_passive++;
+			frame->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
+			break;
+		case CAN_STATE_BUS_OFF:
+			frame->can_id |= CAN_ERR_BUSOFF;
+			/*
+			 * The MSCAN on the MPC5200 does recover from bus-off
+			 * automatically. To avoid that we stop the chip doing
+			 * a light-weight stop (we are in irq-context).
+			 */
+			out_8(&regs->cantier, 0);
+			out_8(&regs->canrier, 0);
+			setbits8(&regs->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ);
+			can_bus_off(dev);
+			break;
+		default:
+			break;
+		}
+	}
+	priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
+	frame->can_dlc = CAN_ERR_DLC;
+	out_8(&regs->canrflg, MSCAN_ERR_IF);
+}
+
+static int mscan_rx_poll(struct napi_struct *napi, int quota)
+{
+	struct mscan_priv *priv = container_of(napi, struct mscan_priv, napi);
+	struct net_device *dev = napi->dev;
+	struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+	struct net_device_stats *stats = &dev->stats;
+	int npackets = 0;
+	int ret = 1;
+	struct sk_buff *skb;
+	struct can_frame *frame;
+	u8 canrflg;
+
+	while (npackets < quota) {
+		canrflg = in_8(&regs->canrflg);
+		if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF)))
+			break;
+
+		skb = alloc_can_skb(dev, &frame);
+		if (!skb) {
+			if (printk_ratelimit())
+				dev_notice(dev->dev.parent, "packet dropped\n");
+			stats->rx_dropped++;
+			out_8(&regs->canrflg, canrflg);
+			continue;
+		}
+
+		if (canrflg & MSCAN_RXF)
+			mscan_get_rx_frame(dev, frame);
+		else if (canrflg & MSCAN_ERR_IF)
+			mscan_get_err_frame(dev, frame, canrflg);
+
+		stats->rx_packets++;
+		stats->rx_bytes += frame->can_dlc;
+		npackets++;
+		netif_receive_skb(skb);
+	}
+
+	if (!(in_8(&regs->canrflg) & (MSCAN_RXF | MSCAN_ERR_IF))) {
+		napi_complete(&priv->napi);
+		clear_bit(F_RX_PROGRESS, &priv->flags);
+		if (priv->can.state < CAN_STATE_BUS_OFF)
+			out_8(&regs->canrier, priv->shadow_canrier);
+		ret = 0;
+	}
+	return ret;
+}
+
+static irqreturn_t mscan_isr(int irq, void *dev_id)
+{
+	struct net_device *dev = (struct net_device *)dev_id;
+	struct mscan_priv *priv = netdev_priv(dev);
+	struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+	struct net_device_stats *stats = &dev->stats;
+	u8 cantier, cantflg, canrflg;
+	irqreturn_t ret = IRQ_NONE;
+
+	cantier = in_8(&regs->cantier) & MSCAN_TXE;
+	cantflg = in_8(&regs->cantflg) & cantier;
+
+	if (cantier && cantflg) {
+		struct list_head *tmp, *pos;
+
+		list_for_each_safe(pos, tmp, &priv->tx_head) {
+			struct tx_queue_entry *entry =
+			    list_entry(pos, struct tx_queue_entry, list);
+			u8 mask = entry->mask;
+
+			if (!(cantflg & mask))
+				continue;
+
+			out_8(&regs->cantbsel, mask);
+			stats->tx_bytes += in_8(&regs->tx.dlr);
+			stats->tx_packets++;
+			can_get_echo_skb(dev, entry->id);
+			priv->tx_active &= ~mask;
+			list_del(pos);
+		}
+
+		if (list_empty(&priv->tx_head)) {
+			clear_bit(F_TX_WAIT_ALL, &priv->flags);
+			clear_bit(F_TX_PROGRESS, &priv->flags);
+			priv->cur_pri = 0;
+		} else {
+			dev->trans_start = jiffies;
+		}
+
+		if (!test_bit(F_TX_WAIT_ALL, &priv->flags))
+			netif_wake_queue(dev);
+
+		out_8(&regs->cantier, priv->tx_active);
+		ret = IRQ_HANDLED;
+	}
+
+	canrflg = in_8(&regs->canrflg);
+	if ((canrflg & ~MSCAN_STAT_MSK) &&
+	    !test_and_set_bit(F_RX_PROGRESS, &priv->flags)) {
+		if (canrflg & ~MSCAN_STAT_MSK) {
+			priv->shadow_canrier = in_8(&regs->canrier);
+			out_8(&regs->canrier, 0);
+			napi_schedule(&priv->napi);
+			ret = IRQ_HANDLED;
+		} else {
+			clear_bit(F_RX_PROGRESS, &priv->flags);
+		}
+	}
+	return ret;
+}
+
+static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode)
+{
+	struct mscan_priv *priv = netdev_priv(dev);
+	int ret = 0;
+
+	if (!priv->open_time)
+		return -EINVAL;
+
+	switch (mode) {
+	case CAN_MODE_START:
+		if (priv->can.state <= CAN_STATE_BUS_OFF)
+			mscan_set_mode(dev, MSCAN_INIT_MODE);
+		ret = mscan_start(dev);
+		if (ret)
+			break;
+		if (netif_queue_stopped(dev))
+			netif_wake_queue(dev);
+		break;
+
+	default:
+		ret = -EOPNOTSUPP;
+		break;
+	}
+	return ret;
+}
+
+static int mscan_do_set_bittiming(struct net_device *dev)
+{
+	struct mscan_priv *priv = netdev_priv(dev);
+	struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+	struct can_bittiming *bt = &priv->can.bittiming;
+	u8 btr0, btr1;
+
+	btr0 = BTR0_SET_BRP(bt->brp) | BTR0_SET_SJW(bt->sjw);
+	btr1 = (BTR1_SET_TSEG1(bt->prop_seg + bt->phase_seg1) |
+		BTR1_SET_TSEG2(bt->phase_seg2) |
+		BTR1_SET_SAM(priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES));
+
+	dev_info(dev->dev.parent, "setting BTR0=0x%02x BTR1=0x%02x\n",
+		btr0, btr1);
+
+	out_8(&regs->canbtr0, btr0);
+	out_8(&regs->canbtr1, btr1);
+
+	return 0;
+}
+
+static int mscan_open(struct net_device *dev)
+{
+	int ret;
+	struct mscan_priv *priv = netdev_priv(dev);
+	struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+
+	/* common open */
+	ret = open_candev(dev);
+	if (ret)
+		return ret;
+
+	napi_enable(&priv->napi);
+
+	ret = request_irq(dev->irq, mscan_isr, 0, dev->name, dev);
+	if (ret < 0) {
+		dev_err(dev->dev.parent, "failed to attach interrupt\n");
+		goto exit_napi_disable;
+	}
+
+	priv->open_time = jiffies;
+
+	clrbits8(&regs->canctl1, MSCAN_LISTEN);
+
+	ret = mscan_start(dev);
+	if (ret)
+		goto exit_free_irq;
+
+	netif_start_queue(dev);
+
+	return 0;
+
+exit_free_irq:
+	priv->open_time = 0;
+	free_irq(dev->irq, dev);
+exit_napi_disable:
+	napi_disable(&priv->napi);
+	close_candev(dev);
+	return ret;
+}
+
+static int mscan_close(struct net_device *dev)
+{
+	struct mscan_priv *priv = netdev_priv(dev);
+	struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+
+	netif_stop_queue(dev);
+	napi_disable(&priv->napi);
+
+	out_8(&regs->cantier, 0);
+	out_8(&regs->canrier, 0);
+	mscan_set_mode(dev, MSCAN_INIT_MODE);
+	close_candev(dev);
+	free_irq(dev->irq, dev);
+	priv->open_time = 0;
+
+	return 0;
+}
+
+static const struct net_device_ops mscan_netdev_ops = {
+       .ndo_open               = mscan_open,
+       .ndo_stop               = mscan_close,
+       .ndo_start_xmit         = mscan_start_xmit,
+};
+
+int register_mscandev(struct net_device *dev, int clock_src)
+{
+	struct mscan_priv *priv = netdev_priv(dev);
+	struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+	u8 ctl1;
+
+	ctl1 = in_8(&regs->canctl1);
+	if (clock_src)
+		ctl1 |= MSCAN_CLKSRC;
+	else
+		ctl1 &= ~MSCAN_CLKSRC;
+
+	ctl1 |= MSCAN_CANE;
+	out_8(&regs->canctl1, ctl1);
+	udelay(100);
+
+	/* acceptance mask/acceptance code (accept everything) */
+	out_be16(&regs->canidar1_0, 0);
+	out_be16(&regs->canidar3_2, 0);
+	out_be16(&regs->canidar5_4, 0);
+	out_be16(&regs->canidar7_6, 0);
+
+	out_be16(&regs->canidmr1_0, 0xffff);
+	out_be16(&regs->canidmr3_2, 0xffff);
+	out_be16(&regs->canidmr5_4, 0xffff);
+	out_be16(&regs->canidmr7_6, 0xffff);
+	/* Two 32 bit Acceptance Filters */
+	out_8(&regs->canidac, MSCAN_AF_32BIT);
+
+	mscan_set_mode(dev, MSCAN_INIT_MODE);
+
+	return register_candev(dev);
+}
+
+void unregister_mscandev(struct net_device *dev)
+{
+	struct mscan_priv *priv = netdev_priv(dev);
+	struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+	mscan_set_mode(dev, MSCAN_INIT_MODE);
+	clrbits8(&regs->canctl1, MSCAN_CANE);
+	unregister_candev(dev);
+}
+
+struct net_device *alloc_mscandev(void)
+{
+	struct net_device *dev;
+	struct mscan_priv *priv;
+	int i;
+
+	dev = alloc_candev(sizeof(struct mscan_priv), MSCAN_ECHO_SKB_MAX);
+	if (!dev)
+		return NULL;
+	priv = netdev_priv(dev);
+
+	dev->netdev_ops = &mscan_netdev_ops;
+
+	dev->flags |= IFF_ECHO;	/* we support local echo */
+
+	netif_napi_add(dev, &priv->napi, mscan_rx_poll, 8);
+
+	priv->can.bittiming_const = &mscan_bittiming_const;
+	priv->can.do_set_bittiming = mscan_do_set_bittiming;
+	priv->can.do_set_mode = mscan_do_set_mode;
+
+	for (i = 0; i < TX_QUEUE_SIZE; i++) {
+		priv->tx_queue[i].id = i;
+		priv->tx_queue[i].mask = 1 << i;
+	}
+
+	return dev;
+}
+
+MODULE_AUTHOR("Andrey Volkov <avolkov@varma-el.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CAN port driver for a MSCAN based chips");
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
new file mode 100644
index 000000000000..00fc4aaf1ed8
--- /dev/null
+++ b/drivers/net/can/mscan/mscan.h
@@ -0,0 +1,296 @@
+/*
+ * Definitions of consts/structs to drive the Freescale MSCAN.
+ *
+ * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
+ *                         Varma Electronics Oy
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef __MSCAN_H__
+#define __MSCAN_H__
+
+#include <linux/types.h>
+
+/* MSCAN control register 0 (CANCTL0) bits */
+#define MSCAN_RXFRM		0x80
+#define MSCAN_RXACT		0x40
+#define MSCAN_CSWAI		0x20
+#define MSCAN_SYNCH		0x10
+#define MSCAN_TIME		0x08
+#define MSCAN_WUPE		0x04
+#define MSCAN_SLPRQ		0x02
+#define MSCAN_INITRQ		0x01
+
+/* MSCAN control register 1 (CANCTL1) bits */
+#define MSCAN_CANE		0x80
+#define MSCAN_CLKSRC		0x40
+#define MSCAN_LOOPB		0x20
+#define MSCAN_LISTEN		0x10
+#define MSCAN_WUPM		0x04
+#define MSCAN_SLPAK		0x02
+#define MSCAN_INITAK		0x01
+
+/* Use the MPC5200 MSCAN variant? */
+#ifdef CONFIG_PPC
+#define MSCAN_FOR_MPC5200
+#endif
+
+#ifdef MSCAN_FOR_MPC5200
+#define MSCAN_CLKSRC_BUS	0
+#define MSCAN_CLKSRC_XTAL	MSCAN_CLKSRC
+#else
+#define MSCAN_CLKSRC_BUS	MSCAN_CLKSRC
+#define MSCAN_CLKSRC_XTAL	0
+#endif
+
+/* MSCAN receiver flag register (CANRFLG) bits */
+#define MSCAN_WUPIF		0x80
+#define MSCAN_CSCIF		0x40
+#define MSCAN_RSTAT1		0x20
+#define MSCAN_RSTAT0		0x10
+#define MSCAN_TSTAT1		0x08
+#define MSCAN_TSTAT0		0x04
+#define MSCAN_OVRIF		0x02
+#define MSCAN_RXF		0x01
+#define MSCAN_ERR_IF 		(MSCAN_OVRIF | MSCAN_CSCIF)
+#define MSCAN_RSTAT_MSK		(MSCAN_RSTAT1 | MSCAN_RSTAT0)
+#define MSCAN_TSTAT_MSK		(MSCAN_TSTAT1 | MSCAN_TSTAT0)
+#define MSCAN_STAT_MSK		(MSCAN_RSTAT_MSK | MSCAN_TSTAT_MSK)
+
+#define MSCAN_STATE_BUS_OFF	(MSCAN_RSTAT1 | MSCAN_RSTAT0 | \
+				 MSCAN_TSTAT1 | MSCAN_TSTAT0)
+#define MSCAN_STATE_TX(canrflg)	(((canrflg)&MSCAN_TSTAT_MSK)>>2)
+#define MSCAN_STATE_RX(canrflg)	(((canrflg)&MSCAN_RSTAT_MSK)>>4)
+#define MSCAN_STATE_ACTIVE	0
+#define MSCAN_STATE_WARNING	1
+#define MSCAN_STATE_PASSIVE	2
+#define MSCAN_STATE_BUSOFF	3
+
+/* MSCAN receiver interrupt enable register (CANRIER) bits */
+#define MSCAN_WUPIE		0x80
+#define MSCAN_CSCIE		0x40
+#define MSCAN_RSTATE1		0x20
+#define MSCAN_RSTATE0		0x10
+#define MSCAN_TSTATE1		0x08
+#define MSCAN_TSTATE0		0x04
+#define MSCAN_OVRIE		0x02
+#define MSCAN_RXFIE		0x01
+
+/* MSCAN transmitter flag register (CANTFLG) bits */
+#define MSCAN_TXE2		0x04
+#define MSCAN_TXE1		0x02
+#define MSCAN_TXE0		0x01
+#define MSCAN_TXE		(MSCAN_TXE2 | MSCAN_TXE1 | MSCAN_TXE0)
+
+/* MSCAN transmitter interrupt enable register (CANTIER) bits */
+#define MSCAN_TXIE2		0x04
+#define MSCAN_TXIE1		0x02
+#define MSCAN_TXIE0		0x01
+#define MSCAN_TXIE		(MSCAN_TXIE2 | MSCAN_TXIE1 | MSCAN_TXIE0)
+
+/* MSCAN transmitter message abort request (CANTARQ) bits */
+#define MSCAN_ABTRQ2		0x04
+#define MSCAN_ABTRQ1		0x02
+#define MSCAN_ABTRQ0		0x01
+
+/* MSCAN transmitter message abort ack (CANTAAK) bits */
+#define MSCAN_ABTAK2		0x04
+#define MSCAN_ABTAK1		0x02
+#define MSCAN_ABTAK0		0x01
+
+/* MSCAN transmit buffer selection (CANTBSEL) bits */
+#define MSCAN_TX2		0x04
+#define MSCAN_TX1		0x02
+#define MSCAN_TX0		0x01
+
+/* MSCAN ID acceptance control register (CANIDAC) bits */
+#define MSCAN_IDAM1		0x20
+#define MSCAN_IDAM0		0x10
+#define MSCAN_IDHIT2		0x04
+#define MSCAN_IDHIT1		0x02
+#define MSCAN_IDHIT0		0x01
+
+#define MSCAN_AF_32BIT		0x00
+#define MSCAN_AF_16BIT		MSCAN_IDAM0
+#define MSCAN_AF_8BIT		MSCAN_IDAM1
+#define MSCAN_AF_CLOSED		(MSCAN_IDAM0|MSCAN_IDAM1)
+#define MSCAN_AF_MASK		(~(MSCAN_IDAM0|MSCAN_IDAM1))
+
+/* MSCAN Miscellaneous Register (CANMISC) bits */
+#define MSCAN_BOHOLD		0x01
+
+/* MSCAN Identifier Register (IDR) bits */
+#define MSCAN_SFF_RTR_SHIFT	4
+#define MSCAN_EFF_RTR_SHIFT	0
+#define MSCAN_EFF_FLAGS		0x18	/* IDE + SRR */
+
+#ifdef MSCAN_FOR_MPC5200
+#define _MSCAN_RESERVED_(n, num) u8 _res##n[num]
+#define _MSCAN_RESERVED_DSR_SIZE	2
+#else
+#define _MSCAN_RESERVED_(n, num)
+#define _MSCAN_RESERVED_DSR_SIZE	0
+#endif
+
+/* Structure of the hardware registers */
+struct mscan_regs {
+	/* (see doc S12MSCANV3/D)		  MPC5200    MSCAN */
+	u8 canctl0;				/* + 0x00     0x00 */
+	u8 canctl1;				/* + 0x01     0x01 */
+	_MSCAN_RESERVED_(1, 2);			/* + 0x02          */
+	u8 canbtr0;				/* + 0x04     0x02 */
+	u8 canbtr1;				/* + 0x05     0x03 */
+	_MSCAN_RESERVED_(2, 2);			/* + 0x06          */
+	u8 canrflg;				/* + 0x08     0x04 */
+	u8 canrier;				/* + 0x09     0x05 */
+	_MSCAN_RESERVED_(3, 2);			/* + 0x0a          */
+	u8 cantflg;				/* + 0x0c     0x06 */
+	u8 cantier;				/* + 0x0d     0x07 */
+	_MSCAN_RESERVED_(4, 2);			/* + 0x0e          */
+	u8 cantarq;				/* + 0x10     0x08 */
+	u8 cantaak;				/* + 0x11     0x09 */
+	_MSCAN_RESERVED_(5, 2);			/* + 0x12          */
+	u8 cantbsel;				/* + 0x14     0x0a */
+	u8 canidac;				/* + 0x15     0x0b */
+	u8 reserved;				/* + 0x16     0x0c */
+	_MSCAN_RESERVED_(6, 5);			/* + 0x17          */
+#ifndef MSCAN_FOR_MPC5200
+	u8 canmisc;				/*            0x0d */
+#endif
+	u8 canrxerr;				/* + 0x1c     0x0e */
+	u8 cantxerr;				/* + 0x1d     0x0f */
+	_MSCAN_RESERVED_(7, 2);			/* + 0x1e          */
+	u16 canidar1_0;				/* + 0x20     0x10 */
+	_MSCAN_RESERVED_(8, 2);			/* + 0x22          */
+	u16 canidar3_2;				/* + 0x24     0x12 */
+	_MSCAN_RESERVED_(9, 2);			/* + 0x26          */
+	u16 canidmr1_0;				/* + 0x28     0x14 */
+	_MSCAN_RESERVED_(10, 2);		/* + 0x2a          */
+	u16 canidmr3_2;				/* + 0x2c     0x16 */
+	_MSCAN_RESERVED_(11, 2);		/* + 0x2e          */
+	u16 canidar5_4;				/* + 0x30     0x18 */
+	_MSCAN_RESERVED_(12, 2);		/* + 0x32          */
+	u16 canidar7_6;				/* + 0x34     0x1a */
+	_MSCAN_RESERVED_(13, 2);		/* + 0x36          */
+	u16 canidmr5_4;				/* + 0x38     0x1c */
+	_MSCAN_RESERVED_(14, 2);		/* + 0x3a          */
+	u16 canidmr7_6;				/* + 0x3c     0x1e */
+	_MSCAN_RESERVED_(15, 2);		/* + 0x3e          */
+	struct {
+		u16 idr1_0;			/* + 0x40     0x20 */
+		 _MSCAN_RESERVED_(16, 2);	/* + 0x42          */
+		u16 idr3_2;			/* + 0x44     0x22 */
+		 _MSCAN_RESERVED_(17, 2);	/* + 0x46          */
+		u16 dsr1_0;			/* + 0x48     0x24 */
+		 _MSCAN_RESERVED_(18, 2);	/* + 0x4a          */
+		u16 dsr3_2;			/* + 0x4c     0x26 */
+		 _MSCAN_RESERVED_(19, 2);	/* + 0x4e          */
+		u16 dsr5_4;			/* + 0x50     0x28 */
+		 _MSCAN_RESERVED_(20, 2);	/* + 0x52          */
+		u16 dsr7_6;			/* + 0x54     0x2a */
+		 _MSCAN_RESERVED_(21, 2);	/* + 0x56          */
+		u8 dlr;				/* + 0x58     0x2c */
+		 u8:8;				/* + 0x59     0x2d */
+		 _MSCAN_RESERVED_(22, 2);	/* + 0x5a          */
+		u16 time;			/* + 0x5c     0x2e */
+	} rx;
+	 _MSCAN_RESERVED_(23, 2);		/* + 0x5e          */
+	struct {
+		u16 idr1_0;			/* + 0x60     0x30 */
+		 _MSCAN_RESERVED_(24, 2);	/* + 0x62          */
+		u16 idr3_2;			/* + 0x64     0x32 */
+		 _MSCAN_RESERVED_(25, 2);	/* + 0x66          */
+		u16 dsr1_0;			/* + 0x68     0x34 */
+		 _MSCAN_RESERVED_(26, 2);	/* + 0x6a          */
+		u16 dsr3_2;			/* + 0x6c     0x36 */
+		 _MSCAN_RESERVED_(27, 2);	/* + 0x6e          */
+		u16 dsr5_4;			/* + 0x70     0x38 */
+		 _MSCAN_RESERVED_(28, 2);	/* + 0x72          */
+		u16 dsr7_6;			/* + 0x74     0x3a */
+		 _MSCAN_RESERVED_(29, 2);	/* + 0x76          */
+		u8 dlr;				/* + 0x78     0x3c */
+		u8 tbpr;			/* + 0x79     0x3d */
+		 _MSCAN_RESERVED_(30, 2);	/* + 0x7a          */
+		u16 time;			/* + 0x7c     0x3e */
+	} tx;
+	 _MSCAN_RESERVED_(31, 2);		/* + 0x7e          */
+} __attribute__ ((packed));
+
+#undef _MSCAN_RESERVED_
+#define MSCAN_REGION 	sizeof(struct mscan)
+
+#define MSCAN_NORMAL_MODE	0
+#define MSCAN_SLEEP_MODE	MSCAN_SLPRQ
+#define MSCAN_INIT_MODE		(MSCAN_INITRQ | MSCAN_SLPRQ)
+#define MSCAN_POWEROFF_MODE	(MSCAN_CSWAI | MSCAN_SLPRQ)
+#define MSCAN_SET_MODE_RETRIES	255
+#define MSCAN_ECHO_SKB_MAX	3
+
+#define BTR0_BRP_MASK		0x3f
+#define BTR0_SJW_SHIFT		6
+#define BTR0_SJW_MASK		(0x3 << BTR0_SJW_SHIFT)
+
+#define BTR1_TSEG1_MASK 	0xf
+#define BTR1_TSEG2_SHIFT	4
+#define BTR1_TSEG2_MASK 	(0x7 << BTR1_TSEG2_SHIFT)
+#define BTR1_SAM_SHIFT  	7
+
+#define BTR0_SET_BRP(brp)	(((brp) - 1) & BTR0_BRP_MASK)
+#define BTR0_SET_SJW(sjw)	((((sjw) - 1) << BTR0_SJW_SHIFT) & \
+				 BTR0_SJW_MASK)
+
+#define BTR1_SET_TSEG1(tseg1)	(((tseg1) - 1) &  BTR1_TSEG1_MASK)
+#define BTR1_SET_TSEG2(tseg2)	((((tseg2) - 1) << BTR1_TSEG2_SHIFT) & \
+				 BTR1_TSEG2_MASK)
+#define BTR1_SET_SAM(sam)	((sam) ? 1 << BTR1_SAM_SHIFT : 0)
+
+#define F_RX_PROGRESS	0
+#define F_TX_PROGRESS	1
+#define F_TX_WAIT_ALL	2
+
+#define TX_QUEUE_SIZE	3
+
+struct tx_queue_entry {
+	struct list_head list;
+	u8 mask;
+	u8 id;
+};
+
+struct mscan_priv {
+	struct can_priv can;	/* must be the first member */
+	long open_time;
+	unsigned long flags;
+	void __iomem *reg_base;	/* ioremap'ed address to registers */
+	u8 shadow_statflg;
+	u8 shadow_canrier;
+	u8 cur_pri;
+	u8 prev_buf_id;
+	u8 tx_active;
+
+	struct list_head tx_head;
+	struct tx_queue_entry tx_queue[TX_QUEUE_SIZE];
+	struct napi_struct napi;
+};
+
+extern struct net_device *alloc_mscandev(void);
+/*
+ * clock_src:
+ *	1 = The MSCAN clock source is the onchip Bus Clock.
+ *	0 = The MSCAN clock source is the chip Oscillator Clock.
+ */
+extern int register_mscandev(struct net_device *dev, int clock_src);
+extern void unregister_mscandev(struct net_device *dev);
+
+#endif /* __MSCAN_H__ */
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 16d2ecd2a3b7..b4ba88a31075 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -296,11 +296,9 @@ static void sja1000_rx(struct net_device *dev)
 	uint8_t dlc;
 	int i;
 
-	skb = dev_alloc_skb(sizeof(struct can_frame));
+	skb = alloc_can_skb(dev, &cf);
 	if (skb == NULL)
 		return;
-	skb->dev = dev;
-	skb->protocol = htons(ETH_P_CAN);
 
 	fi = priv->read_reg(priv, REG_FI);
 	dlc = fi & 0x0F;
@@ -323,8 +321,6 @@ static void sja1000_rx(struct net_device *dev)
 	if (fi & FI_RTR)
 		id |= CAN_RTR_FLAG;
 
-	cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
-	memset(cf, 0, sizeof(struct can_frame));
 	cf->can_id = id;
 	cf->can_dlc = dlc;
 	for (i = 0; i < dlc; i++)
@@ -351,15 +347,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
 	enum can_state state = priv->can.state;
 	uint8_t ecc, alc;
 
-	skb = dev_alloc_skb(sizeof(struct can_frame));
+	skb = alloc_can_err_skb(dev, &cf);
 	if (skb == NULL)
 		return -ENOMEM;
-	skb->dev = dev;
-	skb->protocol = htons(ETH_P_CAN);
-	cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
-	memset(cf, 0, sizeof(struct can_frame));
-	cf->can_id = CAN_ERR_FLAG;
-	cf->can_dlc = CAN_ERR_DLC;
 
 	if (isrc & IRQ_DOI) {
 		/* data overrun interrupt */
@@ -526,7 +516,7 @@ static int sja1000_open(struct net_device *dev)
 
 	/* register interrupt handler, if not done by the device driver */
 	if (!(priv->flags & SJA1000_CUSTOM_IRQ_HANDLER)) {
-		err = request_irq(dev->irq, &sja1000_interrupt, priv->irq_flags,
+		err = request_irq(dev->irq, sja1000_interrupt, priv->irq_flags,
 				  dev->name, (void *)dev);
 		if (err) {
 			close_candev(dev);
@@ -565,7 +555,8 @@ struct net_device *alloc_sja1000dev(int sizeof_priv)
 	struct net_device *dev;
 	struct sja1000_priv *priv;
 
-	dev = alloc_candev(sizeof(struct sja1000_priv) + sizeof_priv);
+	dev = alloc_candev(sizeof(struct sja1000_priv) + sizeof_priv,
+		SJA1000_ECHO_SKB_MAX);
 	if (!dev)
 		return NULL;
 
diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
index 302d2c763ad7..97a622b9302f 100644
--- a/drivers/net/can/sja1000/sja1000.h
+++ b/drivers/net/can/sja1000/sja1000.h
@@ -50,6 +50,8 @@
 #include <linux/can/dev.h>
 #include <linux/can/platform/sja1000.h>
 
+#define SJA1000_ECHO_SKB_MAX	1 /* the SJA1000 has one TX buffer object */
+
 #define SJA1000_MAX_IRQ 20	/* max. number of interrupts handled in ISR */
 
 /* SJA1000 registers - manual section 6.4 (Pelican Mode) */
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
new file mode 100644
index 000000000000..07e8016b17ec
--- /dev/null
+++ b/drivers/net/can/ti_hecc.c
@@ -0,0 +1,993 @@
+/*
+ * TI HECC (CAN) device driver
+ *
+ * This driver supports TI's HECC (High End CAN Controller module) and the
+ * specs for the same is available at <http://www.ti.com>
+ *
+ * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed as is WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * Your platform definitions should specify module ram offsets and interrupt
+ * number to use as follows:
+ *
+ * static struct ti_hecc_platform_data am3517_evm_hecc_pdata = {
+ *         .scc_hecc_offset        = 0,
+ *         .scc_ram_offset         = 0x3000,
+ *         .hecc_ram_offset        = 0x3000,
+ *         .mbx_offset             = 0x2000,
+ *         .int_line               = 0,
+ *         .revision               = 1,
+ * };
+ *
+ * Please see include/can/platform/ti_hecc.h for description of above fields
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/platform/ti_hecc.h>
+
+#define DRV_NAME "ti_hecc"
+#define HECC_MODULE_VERSION     "0.7"
+MODULE_VERSION(HECC_MODULE_VERSION);
+#define DRV_DESC "TI High End CAN Controller Driver " HECC_MODULE_VERSION
+
+/* TX / RX Mailbox Configuration */
+#define HECC_MAX_MAILBOXES	32	/* hardware mailboxes - do not change */
+#define MAX_TX_PRIO		0x3F	/* hardware value - do not change */
+
+/*
+ * Important Note: TX mailbox configuration
+ * TX mailboxes should be restricted to the number of SKB buffers to avoid
+ * maintaining SKB buffers separately. TX mailboxes should be a power of 2
+ * for the mailbox logic to work.  Top mailbox numbers are reserved for RX
+ * and lower mailboxes for TX.
+ *
+ * HECC_MAX_TX_MBOX	HECC_MB_TX_SHIFT
+ * 4 (default)		2
+ * 8			3
+ * 16			4
+ */
+#define HECC_MB_TX_SHIFT	2 /* as per table above */
+#define HECC_MAX_TX_MBOX	BIT(HECC_MB_TX_SHIFT)
+
+#define HECC_TX_PRIO_SHIFT	(HECC_MB_TX_SHIFT)
+#define HECC_TX_PRIO_MASK	(MAX_TX_PRIO << HECC_MB_TX_SHIFT)
+#define HECC_TX_MB_MASK		(HECC_MAX_TX_MBOX - 1)
+#define HECC_TX_MASK		((HECC_MAX_TX_MBOX - 1) | HECC_TX_PRIO_MASK)
+#define HECC_TX_MBOX_MASK	(~(BIT(HECC_MAX_TX_MBOX) - 1))
+#define HECC_DEF_NAPI_WEIGHT	HECC_MAX_RX_MBOX
+
+/*
+ * Important Note: RX mailbox configuration
+ * RX mailboxes are further logically split into two - main and buffer
+ * mailboxes. The goal is to get all packets into main mailboxes as
+ * driven by mailbox number and receive priority (higher to lower) and
+ * buffer mailboxes are used to receive pkts while main mailboxes are being
+ * processed. This ensures in-order packet reception.
+ *
+ * Here are the recommended values for buffer mailbox. Note that RX mailboxes
+ * start after TX mailboxes:
+ *
+ * HECC_MAX_RX_MBOX		HECC_RX_BUFFER_MBOX	No of buffer mailboxes
+ * 28				12			8
+ * 16				20			4
+ */
+
+#define HECC_MAX_RX_MBOX	(HECC_MAX_MAILBOXES - HECC_MAX_TX_MBOX)
+#define HECC_RX_BUFFER_MBOX	12 /* as per table above */
+#define HECC_RX_FIRST_MBOX	(HECC_MAX_MAILBOXES - 1)
+#define HECC_RX_HIGH_MBOX_MASK	(~(BIT(HECC_RX_BUFFER_MBOX) - 1))
+
+/* TI HECC module registers */
+#define HECC_CANME		0x0	/* Mailbox enable */
+#define HECC_CANMD		0x4	/* Mailbox direction */
+#define HECC_CANTRS		0x8	/* Transmit request set */
+#define HECC_CANTRR		0xC	/* Transmit request */
+#define HECC_CANTA		0x10	/* Transmission acknowledge */
+#define HECC_CANAA		0x14	/* Abort acknowledge */
+#define HECC_CANRMP		0x18	/* Receive message pending */
+#define HECC_CANRML		0x1C	/* Remote message lost */
+#define HECC_CANRFP		0x20	/* Remote frame pending */
+#define HECC_CANGAM		0x24	/* SECC only:Global acceptance mask */
+#define HECC_CANMC		0x28	/* Master control */
+#define HECC_CANBTC		0x2C	/* Bit timing configuration */
+#define HECC_CANES		0x30	/* Error and status */
+#define HECC_CANTEC		0x34	/* Transmit error counter */
+#define HECC_CANREC		0x38	/* Receive error counter */
+#define HECC_CANGIF0		0x3C	/* Global interrupt flag 0 */
+#define HECC_CANGIM		0x40	/* Global interrupt mask */
+#define HECC_CANGIF1		0x44	/* Global interrupt flag 1 */
+#define HECC_CANMIM		0x48	/* Mailbox interrupt mask */
+#define HECC_CANMIL		0x4C	/* Mailbox interrupt level */
+#define HECC_CANOPC		0x50	/* Overwrite protection control */
+#define HECC_CANTIOC		0x54	/* Transmit I/O control */
+#define HECC_CANRIOC		0x58	/* Receive I/O control */
+#define HECC_CANLNT		0x5C	/* HECC only: Local network time */
+#define HECC_CANTOC		0x60	/* HECC only: Time-out control */
+#define HECC_CANTOS		0x64	/* HECC only: Time-out status */
+#define HECC_CANTIOCE		0x68	/* SCC only:Enhanced TX I/O control */
+#define HECC_CANRIOCE		0x6C	/* SCC only:Enhanced RX I/O control */
+
+/* Mailbox registers */
+#define HECC_CANMID		0x0
+#define HECC_CANMCF		0x4
+#define HECC_CANMDL		0x8
+#define HECC_CANMDH		0xC
+
+#define HECC_SET_REG		0xFFFFFFFF
+#define HECC_CANID_MASK		0x3FF	/* 18 bits mask for extended id's */
+#define HECC_CCE_WAIT_COUNT     100	/* Wait for ~1 sec for CCE bit */
+
+#define HECC_CANMC_SCM		BIT(13)	/* SCC compat mode */
+#define HECC_CANMC_CCR		BIT(12)	/* Change config request */
+#define HECC_CANMC_PDR		BIT(11)	/* Local Power down - for sleep mode */
+#define HECC_CANMC_ABO		BIT(7)	/* Auto Bus On */
+#define HECC_CANMC_STM		BIT(6)	/* Self test mode - loopback */
+#define HECC_CANMC_SRES		BIT(5)	/* Software reset */
+
+#define HECC_CANTIOC_EN		BIT(3)	/* Enable CAN TX I/O pin */
+#define HECC_CANRIOC_EN		BIT(3)	/* Enable CAN RX I/O pin */
+
+#define HECC_CANMID_IDE		BIT(31)	/* Extended frame format */
+#define HECC_CANMID_AME		BIT(30)	/* Acceptance mask enable */
+#define HECC_CANMID_AAM		BIT(29)	/* Auto answer mode */
+
+#define HECC_CANES_FE		BIT(24)	/* form error */
+#define HECC_CANES_BE		BIT(23)	/* bit error */
+#define HECC_CANES_SA1		BIT(22)	/* stuck at dominant error */
+#define HECC_CANES_CRCE		BIT(21)	/* CRC error */
+#define HECC_CANES_SE		BIT(20)	/* stuff bit error */
+#define HECC_CANES_ACKE		BIT(19)	/* ack error */
+#define HECC_CANES_BO		BIT(18)	/* Bus off status */
+#define HECC_CANES_EP		BIT(17)	/* Error passive status */
+#define HECC_CANES_EW		BIT(16)	/* Error warning status */
+#define HECC_CANES_SMA		BIT(5)	/* suspend mode ack */
+#define HECC_CANES_CCE		BIT(4)	/* Change config enabled */
+#define HECC_CANES_PDA		BIT(3)	/* Power down mode ack */
+
+#define HECC_CANBTC_SAM		BIT(7)	/* sample points */
+
+#define HECC_BUS_ERROR		(HECC_CANES_FE | HECC_CANES_BE |\
+				HECC_CANES_CRCE | HECC_CANES_SE |\
+				HECC_CANES_ACKE)
+
+#define HECC_CANMCF_RTR		BIT(4)	/* Remote transmit request */
+
+#define HECC_CANGIF_MAIF	BIT(17)	/* Message alarm interrupt */
+#define HECC_CANGIF_TCOIF	BIT(16) /* Timer counter overflow int */
+#define HECC_CANGIF_GMIF	BIT(15)	/* Global mailbox interrupt */
+#define HECC_CANGIF_AAIF	BIT(14)	/* Abort ack interrupt */
+#define HECC_CANGIF_WDIF	BIT(13)	/* Write denied interrupt */
+#define HECC_CANGIF_WUIF	BIT(12)	/* Wake up interrupt */
+#define HECC_CANGIF_RMLIF	BIT(11)	/* Receive message lost interrupt */
+#define HECC_CANGIF_BOIF	BIT(10)	/* Bus off interrupt */
+#define HECC_CANGIF_EPIF	BIT(9)	/* Error passive interrupt */
+#define HECC_CANGIF_WLIF	BIT(8)	/* Warning level interrupt */
+#define HECC_CANGIF_MBOX_MASK	0x1F	/* Mailbox number mask */
+#define HECC_CANGIM_I1EN	BIT(1)	/* Int line 1 enable */
+#define HECC_CANGIM_I0EN	BIT(0)	/* Int line 0 enable */
+#define HECC_CANGIM_DEF_MASK	0x700	/* only busoff/warning/passive */
+#define HECC_CANGIM_SIL		BIT(2)	/* system interrupts to int line 1 */
+
+/* CAN Bittiming constants as per HECC specs */
+static struct can_bittiming_const ti_hecc_bittiming_const = {
+	.name = DRV_NAME,
+	.tseg1_min = 1,
+	.tseg1_max = 16,
+	.tseg2_min = 1,
+	.tseg2_max = 8,
+	.sjw_max = 4,
+	.brp_min = 1,
+	.brp_max = 256,
+	.brp_inc = 1,
+};
+
+struct ti_hecc_priv {
+	struct can_priv can;	/* MUST be first member/field */
+	struct napi_struct napi;
+	struct net_device *ndev;
+	struct clk *clk;
+	void __iomem *base;
+	u32 scc_ram_offset;
+	u32 hecc_ram_offset;
+	u32 mbx_offset;
+	u32 int_line;
+	spinlock_t mbx_lock; /* CANME register needs protection */
+	u32 tx_head;
+	u32 tx_tail;
+	u32 rx_next;
+};
+
+static inline int get_tx_head_mb(struct ti_hecc_priv *priv)
+{
+	return priv->tx_head & HECC_TX_MB_MASK;
+}
+
+static inline int get_tx_tail_mb(struct ti_hecc_priv *priv)
+{
+	return priv->tx_tail & HECC_TX_MB_MASK;
+}
+
+static inline int get_tx_head_prio(struct ti_hecc_priv *priv)
+{
+	return (priv->tx_head >> HECC_TX_PRIO_SHIFT) & MAX_TX_PRIO;
+}
+
+static inline void hecc_write_lam(struct ti_hecc_priv *priv, u32 mbxno, u32 val)
+{
+	__raw_writel(val, priv->base + priv->hecc_ram_offset + mbxno * 4);
+}
+
+static inline void hecc_write_mbx(struct ti_hecc_priv *priv, u32 mbxno,
+	u32 reg, u32 val)
+{
+	__raw_writel(val, priv->base + priv->mbx_offset + mbxno * 0x10 +
+			reg);
+}
+
+static inline u32 hecc_read_mbx(struct ti_hecc_priv *priv, u32 mbxno, u32 reg)
+{
+	return __raw_readl(priv->base + priv->mbx_offset + mbxno * 0x10 +
+			reg);
+}
+
+static inline void hecc_write(struct ti_hecc_priv *priv, u32 reg, u32 val)
+{
+	__raw_writel(val, priv->base + reg);
+}
+
+static inline u32 hecc_read(struct ti_hecc_priv *priv, int reg)
+{
+	return __raw_readl(priv->base + reg);
+}
+
+static inline void hecc_set_bit(struct ti_hecc_priv *priv, int reg,
+	u32 bit_mask)
+{
+	hecc_write(priv, reg, hecc_read(priv, reg) | bit_mask);
+}
+
+static inline void hecc_clear_bit(struct ti_hecc_priv *priv, int reg,
+	u32 bit_mask)
+{
+	hecc_write(priv, reg, hecc_read(priv, reg) & ~bit_mask);
+}
+
+static inline u32 hecc_get_bit(struct ti_hecc_priv *priv, int reg, u32 bit_mask)
+{
+	return (hecc_read(priv, reg) & bit_mask) ? 1 : 0;
+}
+
+static int ti_hecc_get_state(const struct net_device *ndev,
+	enum can_state *state)
+{
+	struct ti_hecc_priv *priv = netdev_priv(ndev);
+
+	*state = priv->can.state;
+	return 0;
+}
+
+static int ti_hecc_set_btc(struct ti_hecc_priv *priv)
+{
+	struct can_bittiming *bit_timing = &priv->can.bittiming;
+	u32 can_btc;
+
+	can_btc = (bit_timing->phase_seg2 - 1) & 0x7;
+	can_btc |= ((bit_timing->phase_seg1 + bit_timing->prop_seg - 1)
+			& 0xF) << 3;
+	if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) {
+		if (bit_timing->brp > 4)
+			can_btc |= HECC_CANBTC_SAM;
+		else
+			dev_warn(priv->ndev->dev.parent, "WARN: Triple" \
+				"sampling not set due to h/w limitations");
+	}
+	can_btc |= ((bit_timing->sjw - 1) & 0x3) << 8;
+	can_btc |= ((bit_timing->brp - 1) & 0xFF) << 16;
+
+	/* ERM being set to 0 by default meaning resync at falling edge */
+
+	hecc_write(priv, HECC_CANBTC, can_btc);
+	dev_info(priv->ndev->dev.parent, "setting CANBTC=%#x\n", can_btc);
+
+	return 0;
+}
+
+static void ti_hecc_reset(struct net_device *ndev)
+{
+	u32 cnt;
+	struct ti_hecc_priv *priv = netdev_priv(ndev);
+
+	dev_dbg(ndev->dev.parent, "resetting hecc ...\n");
+	hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_SRES);
+
+	/* Set change control request and wait till enabled */
+	hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
+
+	/*
+	 * INFO: It has been observed that at times CCE bit may not be
+	 * set and hw seems to be ok even if this bit is not set so
+	 * timing out with a timing of 1ms to respect the specs
+	 */
+	cnt = HECC_CCE_WAIT_COUNT;
+	while (!hecc_get_bit(priv, HECC_CANES, HECC_CANES_CCE) && cnt != 0) {
+		--cnt;
+		udelay(10);
+	}
+
+	/*
+	 * Note: On HECC, BTC can be programmed only in initialization mode, so
+	 * it is expected that the can bittiming parameters are set via ip
+	 * utility before the device is opened
+	 */
+	ti_hecc_set_btc(priv);
+
+	/* Clear CCR (and CANMC register) and wait for CCE = 0 enable */
+	hecc_write(priv, HECC_CANMC, 0);
+
+	/*
+	 * INFO: CAN net stack handles bus off and hence disabling auto-bus-on
+	 * hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_ABO);
+	 */
+
+	/*
+	 * INFO: It has been observed that at times CCE bit may not be
+	 * set and hw seems to be ok even if this bit is not set so
+	 */
+	cnt = HECC_CCE_WAIT_COUNT;
+	while (hecc_get_bit(priv, HECC_CANES, HECC_CANES_CCE) && cnt != 0) {
+		--cnt;
+		udelay(10);
+	}
+
+	/* Enable TX and RX I/O Control pins */
+	hecc_write(priv, HECC_CANTIOC, HECC_CANTIOC_EN);
+	hecc_write(priv, HECC_CANRIOC, HECC_CANRIOC_EN);
+
+	/* Clear registers for clean operation */
+	hecc_write(priv, HECC_CANTA, HECC_SET_REG);
+	hecc_write(priv, HECC_CANRMP, HECC_SET_REG);
+	hecc_write(priv, HECC_CANGIF0, HECC_SET_REG);
+	hecc_write(priv, HECC_CANGIF1, HECC_SET_REG);
+	hecc_write(priv, HECC_CANME, 0);
+	hecc_write(priv, HECC_CANMD, 0);
+
+	/* SCC compat mode NOT supported (and not needed too) */
+	hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_SCM);
+}
+
+static void ti_hecc_start(struct net_device *ndev)
+{
+	struct ti_hecc_priv *priv = netdev_priv(ndev);
+	u32 cnt, mbxno, mbx_mask;
+
+	/* put HECC in initialization mode and set btc */
+	ti_hecc_reset(ndev);
+
+	priv->tx_head = priv->tx_tail = HECC_TX_MASK;
+	priv->rx_next = HECC_RX_FIRST_MBOX;
+
+	/* Enable local and global acceptance mask registers */
+	hecc_write(priv, HECC_CANGAM, HECC_SET_REG);
+
+	/* Prepare configured mailboxes to receive messages */
+	for (cnt = 0; cnt < HECC_MAX_RX_MBOX; cnt++) {
+		mbxno = HECC_MAX_MAILBOXES - 1 - cnt;
+		mbx_mask = BIT(mbxno);
+		hecc_clear_bit(priv, HECC_CANME, mbx_mask);
+		hecc_write_mbx(priv, mbxno, HECC_CANMID, HECC_CANMID_AME);
+		hecc_write_lam(priv, mbxno, HECC_SET_REG);
+		hecc_set_bit(priv, HECC_CANMD, mbx_mask);
+		hecc_set_bit(priv, HECC_CANME, mbx_mask);
+		hecc_set_bit(priv, HECC_CANMIM, mbx_mask);
+	}
+
+	/* Prevent message over-write & Enable interrupts */
+	hecc_write(priv, HECC_CANOPC, HECC_SET_REG);
+	if (priv->int_line) {
+		hecc_write(priv, HECC_CANMIL, HECC_SET_REG);
+		hecc_write(priv, HECC_CANGIM, HECC_CANGIM_DEF_MASK |
+			HECC_CANGIM_I1EN | HECC_CANGIM_SIL);
+	} else {
+		hecc_write(priv, HECC_CANMIL, 0);
+		hecc_write(priv, HECC_CANGIM,
+			HECC_CANGIM_DEF_MASK | HECC_CANGIM_I0EN);
+	}
+	priv->can.state = CAN_STATE_ERROR_ACTIVE;
+}
+
+static void ti_hecc_stop(struct net_device *ndev)
+{
+	struct ti_hecc_priv *priv = netdev_priv(ndev);
+
+	/* Disable interrupts and disable mailboxes */
+	hecc_write(priv, HECC_CANGIM, 0);
+	hecc_write(priv, HECC_CANMIM, 0);
+	hecc_write(priv, HECC_CANME, 0);
+	priv->can.state = CAN_STATE_STOPPED;
+}
+
+static int ti_hecc_do_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+	int ret = 0;
+
+	switch (mode) {
+	case CAN_MODE_START:
+		ti_hecc_start(ndev);
+		netif_wake_queue(ndev);
+		break;
+	default:
+		ret = -EOPNOTSUPP;
+		break;
+	}
+
+	return ret;
+}
+
+/*
+ * ti_hecc_xmit: HECC Transmit
+ *
+ * The transmit mailboxes start from 0 to HECC_MAX_TX_MBOX. In HECC the
+ * priority of the mailbox for tranmission is dependent upon priority setting
+ * field in mailbox registers. The mailbox with highest value in priority field
+ * is transmitted first. Only when two mailboxes have the same value in
+ * priority field the highest numbered mailbox is transmitted first.
+ *
+ * To utilize the HECC priority feature as described above we start with the
+ * highest numbered mailbox with highest priority level and move on to the next
+ * mailbox with the same priority level and so on. Once we loop through all the
+ * transmit mailboxes we choose the next priority level (lower) and so on
+ * until we reach the lowest priority level on the lowest numbered mailbox
+ * when we stop transmission until all mailboxes are transmitted and then
+ * restart at highest numbered mailbox with highest priority.
+ *
+ * Two counters (head and tail) are used to track the next mailbox to transmit
+ * and to track the echo buffer for already transmitted mailbox. The queue
+ * is stopped when all the mailboxes are busy or when there is a priority
+ * value roll-over happens.
+ */
+static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct ti_hecc_priv *priv = netdev_priv(ndev);
+	struct can_frame *cf = (struct can_frame *)skb->data;
+	u32 mbxno, mbx_mask, data;
+	unsigned long flags;
+
+	mbxno = get_tx_head_mb(priv);
+	mbx_mask = BIT(mbxno);
+	spin_lock_irqsave(&priv->mbx_lock, flags);
+	if (unlikely(hecc_read(priv, HECC_CANME) & mbx_mask)) {
+		spin_unlock_irqrestore(&priv->mbx_lock, flags);
+		netif_stop_queue(ndev);
+		dev_err(priv->ndev->dev.parent,
+			"BUG: TX mbx not ready tx_head=%08X, tx_tail=%08X\n",
+			priv->tx_head, priv->tx_tail);
+		return NETDEV_TX_BUSY;
+	}
+	spin_unlock_irqrestore(&priv->mbx_lock, flags);
+
+	/* Prepare mailbox for transmission */
+	data = min_t(u8, cf->can_dlc, 8);
+	if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */
+		data |= HECC_CANMCF_RTR;
+	data |= get_tx_head_prio(priv) << 8;
+	hecc_write_mbx(priv, mbxno, HECC_CANMCF, data);
+
+	if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */
+		data = (cf->can_id & CAN_EFF_MASK) | HECC_CANMID_IDE;
+	else /* Standard frame format */
+		data = (cf->can_id & CAN_SFF_MASK) << 18;
+	hecc_write_mbx(priv, mbxno, HECC_CANMID, data);
+	hecc_write_mbx(priv, mbxno, HECC_CANMDL,
+		be32_to_cpu(*(u32 *)(cf->data)));
+	if (cf->can_dlc > 4)
+		hecc_write_mbx(priv, mbxno, HECC_CANMDH,
+			be32_to_cpu(*(u32 *)(cf->data + 4)));
+	else
+		*(u32 *)(cf->data + 4) = 0;
+	can_put_echo_skb(skb, ndev, mbxno);
+
+	spin_lock_irqsave(&priv->mbx_lock, flags);
+	--priv->tx_head;
+	if ((hecc_read(priv, HECC_CANME) & BIT(get_tx_head_mb(priv))) ||
+		(priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK) {
+		netif_stop_queue(ndev);
+	}
+	hecc_set_bit(priv, HECC_CANME, mbx_mask);
+	spin_unlock_irqrestore(&priv->mbx_lock, flags);
+
+	hecc_clear_bit(priv, HECC_CANMD, mbx_mask);
+	hecc_set_bit(priv, HECC_CANMIM, mbx_mask);
+	hecc_write(priv, HECC_CANTRS, mbx_mask);
+
+	return NETDEV_TX_OK;
+}
+
+static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno)
+{
+	struct net_device_stats *stats = &priv->ndev->stats;
+	struct can_frame *cf;
+	struct sk_buff *skb;
+	u32 data, mbx_mask;
+	unsigned long flags;
+
+	skb = alloc_can_skb(priv->ndev, &cf);
+	if (!skb) {
+		if (printk_ratelimit())
+			dev_err(priv->ndev->dev.parent,
+				"ti_hecc_rx_pkt: alloc_can_skb() failed\n");
+		return -ENOMEM;
+	}
+
+	mbx_mask = BIT(mbxno);
+	data = hecc_read_mbx(priv, mbxno, HECC_CANMID);
+	if (data & HECC_CANMID_IDE)
+		cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
+	else
+		cf->can_id = (data >> 18) & CAN_SFF_MASK;
+	data = hecc_read_mbx(priv, mbxno, HECC_CANMCF);
+	if (data & HECC_CANMCF_RTR)
+		cf->can_id |= CAN_RTR_FLAG;
+	cf->can_dlc = data & 0xF;
+	data = hecc_read_mbx(priv, mbxno, HECC_CANMDL);
+	*(u32 *)(cf->data) = cpu_to_be32(data);
+	if (cf->can_dlc > 4) {
+		data = hecc_read_mbx(priv, mbxno, HECC_CANMDH);
+		*(u32 *)(cf->data + 4) = cpu_to_be32(data);
+	} else {
+		*(u32 *)(cf->data + 4) = 0;
+	}
+	spin_lock_irqsave(&priv->mbx_lock, flags);
+	hecc_clear_bit(priv, HECC_CANME, mbx_mask);
+	hecc_write(priv, HECC_CANRMP, mbx_mask);
+	/* enable mailbox only if it is part of rx buffer mailboxes */
+	if (priv->rx_next < HECC_RX_BUFFER_MBOX)
+		hecc_set_bit(priv, HECC_CANME, mbx_mask);
+	spin_unlock_irqrestore(&priv->mbx_lock, flags);
+
+	stats->rx_bytes += cf->can_dlc;
+	netif_receive_skb(skb);
+	stats->rx_packets++;
+
+	return 0;
+}
+
+/*
+ * ti_hecc_rx_poll - HECC receive pkts
+ *
+ * The receive mailboxes start from highest numbered mailbox till last xmit
+ * mailbox. On CAN frame reception the hardware places the data into highest
+ * numbered mailbox that matches the CAN ID filter. Since all receive mailboxes
+ * have same filtering (ALL CAN frames) packets will arrive in the highest
+ * available RX mailbox and we need to ensure in-order packet reception.
+ *
+ * To ensure the packets are received in the right order we logically divide
+ * the RX mailboxes into main and buffer mailboxes. Packets are received as per
+ * mailbox priotity (higher to lower) in the main bank and once it is full we
+ * disable further reception into main mailboxes. While the main mailboxes are
+ * processed in NAPI, further packets are received in buffer mailboxes.
+ *
+ * We maintain a RX next mailbox counter to process packets and once all main
+ * mailboxe packets are passed to the upper stack we enable all of them but
+ * continue to process packets received in buffer mailboxes. With each packet
+ * received from buffer mailbox we enable it immediately so as to handle the
+ * overflow from higher mailboxes.
+ */
+static int ti_hecc_rx_poll(struct napi_struct *napi, int quota)
+{
+	struct net_device *ndev = napi->dev;
+	struct ti_hecc_priv *priv = netdev_priv(ndev);
+	u32 num_pkts = 0;
+	u32 mbx_mask;
+	unsigned long pending_pkts, flags;
+
+	if (!netif_running(ndev))
+		return 0;
+
+	while ((pending_pkts = hecc_read(priv, HECC_CANRMP)) &&
+		num_pkts < quota) {
+		mbx_mask = BIT(priv->rx_next); /* next rx mailbox to process */
+		if (mbx_mask & pending_pkts) {
+			if (ti_hecc_rx_pkt(priv, priv->rx_next) < 0)
+				return num_pkts;
+			++num_pkts;
+		} else if (priv->rx_next > HECC_RX_BUFFER_MBOX) {
+			break; /* pkt not received yet */
+		}
+		--priv->rx_next;
+		if (priv->rx_next == HECC_RX_BUFFER_MBOX) {
+			/* enable high bank mailboxes */
+			spin_lock_irqsave(&priv->mbx_lock, flags);
+			mbx_mask = hecc_read(priv, HECC_CANME);
+			mbx_mask |= HECC_RX_HIGH_MBOX_MASK;
+			hecc_write(priv, HECC_CANME, mbx_mask);
+			spin_unlock_irqrestore(&priv->mbx_lock, flags);
+		} else if (priv->rx_next == HECC_MAX_TX_MBOX - 1) {
+			priv->rx_next = HECC_RX_FIRST_MBOX;
+			break;
+		}
+	}
+
+	/* Enable packet interrupt if all pkts are handled */
+	if (hecc_read(priv, HECC_CANRMP) == 0) {
+		napi_complete(napi);
+		/* Re-enable RX mailbox interrupts */
+		mbx_mask = hecc_read(priv, HECC_CANMIM);
+		mbx_mask |= HECC_TX_MBOX_MASK;
+		hecc_write(priv, HECC_CANMIM, mbx_mask);
+	}
+
+	return num_pkts;
+}
+
+static int ti_hecc_error(struct net_device *ndev, int int_status,
+	int err_status)
+{
+	struct ti_hecc_priv *priv = netdev_priv(ndev);
+	struct net_device_stats *stats = &ndev->stats;
+	struct can_frame *cf;
+	struct sk_buff *skb;
+
+	/* propogate the error condition to the can stack */
+	skb = alloc_can_err_skb(ndev, &cf);
+	if (!skb) {
+		if (printk_ratelimit())
+			dev_err(priv->ndev->dev.parent,
+				"ti_hecc_error: alloc_can_err_skb() failed\n");
+		return -ENOMEM;
+	}
+
+	if (int_status & HECC_CANGIF_WLIF) { /* warning level int */
+		if ((int_status & HECC_CANGIF_BOIF) == 0) {
+			priv->can.state = CAN_STATE_ERROR_WARNING;
+			++priv->can.can_stats.error_warning;
+			cf->can_id |= CAN_ERR_CRTL;
+			if (hecc_read(priv, HECC_CANTEC) > 96)
+				cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
+			if (hecc_read(priv, HECC_CANREC) > 96)
+				cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
+		}
+		hecc_set_bit(priv, HECC_CANES, HECC_CANES_EW);
+		dev_dbg(priv->ndev->dev.parent, "Error Warning interrupt\n");
+		hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
+	}
+
+	if (int_status & HECC_CANGIF_EPIF) { /* error passive int */
+		if ((int_status & HECC_CANGIF_BOIF) == 0) {
+			priv->can.state = CAN_STATE_ERROR_PASSIVE;
+			++priv->can.can_stats.error_passive;
+			cf->can_id |= CAN_ERR_CRTL;
+			if (hecc_read(priv, HECC_CANTEC) > 127)
+				cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
+			if (hecc_read(priv, HECC_CANREC) > 127)
+				cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
+		}
+		hecc_set_bit(priv, HECC_CANES, HECC_CANES_EP);
+		dev_dbg(priv->ndev->dev.parent, "Error passive interrupt\n");
+		hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
+	}
+
+	/*
+	 * Need to check busoff condition in error status register too to
+	 * ensure warning interrupts don't hog the system
+	 */
+	if ((int_status & HECC_CANGIF_BOIF) || (err_status & HECC_CANES_BO)) {
+		priv->can.state = CAN_STATE_BUS_OFF;
+		cf->can_id |= CAN_ERR_BUSOFF;
+		hecc_set_bit(priv, HECC_CANES, HECC_CANES_BO);
+		hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
+		/* Disable all interrupts in bus-off to avoid int hog */
+		hecc_write(priv, HECC_CANGIM, 0);
+		can_bus_off(ndev);
+	}
+
+	if (err_status & HECC_BUS_ERROR) {
+		++priv->can.can_stats.bus_error;
+		cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
+		cf->data[2] |= CAN_ERR_PROT_UNSPEC;
+		if (err_status & HECC_CANES_FE) {
+			hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE);
+			cf->data[2] |= CAN_ERR_PROT_FORM;
+		}
+		if (err_status & HECC_CANES_BE) {
+			hecc_set_bit(priv, HECC_CANES, HECC_CANES_BE);
+			cf->data[2] |= CAN_ERR_PROT_BIT;
+		}
+		if (err_status & HECC_CANES_SE) {
+			hecc_set_bit(priv, HECC_CANES, HECC_CANES_SE);
+			cf->data[2] |= CAN_ERR_PROT_STUFF;
+		}
+		if (err_status & HECC_CANES_CRCE) {
+			hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
+			cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+					CAN_ERR_PROT_LOC_CRC_DEL;
+		}
+		if (err_status & HECC_CANES_ACKE) {
+			hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
+			cf->data[2] |= CAN_ERR_PROT_LOC_ACK |
+					CAN_ERR_PROT_LOC_ACK_DEL;
+		}
+	}
+
+	netif_receive_skb(skb);
+	stats->rx_packets++;
+	stats->rx_bytes += cf->can_dlc;
+	return 0;
+}
+
+static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
+{
+	struct net_device *ndev = (struct net_device *)dev_id;
+	struct ti_hecc_priv *priv = netdev_priv(ndev);
+	struct net_device_stats *stats = &ndev->stats;
+	u32 mbxno, mbx_mask, int_status, err_status;
+	unsigned long ack, flags;
+
+	int_status = hecc_read(priv,
+		(priv->int_line) ? HECC_CANGIF1 : HECC_CANGIF0);
+
+	if (!int_status)
+		return IRQ_NONE;
+
+	err_status = hecc_read(priv, HECC_CANES);
+	if (err_status & (HECC_BUS_ERROR | HECC_CANES_BO |
+		HECC_CANES_EP | HECC_CANES_EW))
+			ti_hecc_error(ndev, int_status, err_status);
+
+	if (int_status & HECC_CANGIF_GMIF) {
+		while (priv->tx_tail - priv->tx_head > 0) {
+			mbxno = get_tx_tail_mb(priv);
+			mbx_mask = BIT(mbxno);
+			if (!(mbx_mask & hecc_read(priv, HECC_CANTA)))
+				break;
+			hecc_clear_bit(priv, HECC_CANMIM, mbx_mask);
+			hecc_write(priv, HECC_CANTA, mbx_mask);
+			spin_lock_irqsave(&priv->mbx_lock, flags);
+			hecc_clear_bit(priv, HECC_CANME, mbx_mask);
+			spin_unlock_irqrestore(&priv->mbx_lock, flags);
+			stats->tx_bytes += hecc_read_mbx(priv, mbxno,
+						HECC_CANMCF) & 0xF;
+			stats->tx_packets++;
+			can_get_echo_skb(ndev, mbxno);
+			--priv->tx_tail;
+		}
+
+		/* restart queue if wrap-up or if queue stalled on last pkt */
+		if (((priv->tx_head == priv->tx_tail) &&
+		((priv->tx_head & HECC_TX_MASK) != HECC_TX_MASK)) ||
+		(((priv->tx_tail & HECC_TX_MASK) == HECC_TX_MASK) &&
+		((priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK)))
+			netif_wake_queue(ndev);
+
+		/* Disable RX mailbox interrupts and let NAPI reenable them */
+		if (hecc_read(priv, HECC_CANRMP)) {
+			ack = hecc_read(priv, HECC_CANMIM);
+			ack &= BIT(HECC_MAX_TX_MBOX) - 1;
+			hecc_write(priv, HECC_CANMIM, ack);
+			napi_schedule(&priv->napi);
+		}
+	}
+
+	/* clear all interrupt conditions - read back to avoid spurious ints */
+	if (priv->int_line) {
+		hecc_write(priv, HECC_CANGIF1, HECC_SET_REG);
+		int_status = hecc_read(priv, HECC_CANGIF1);
+	} else {
+		hecc_write(priv, HECC_CANGIF0, HECC_SET_REG);
+		int_status = hecc_read(priv, HECC_CANGIF0);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int ti_hecc_open(struct net_device *ndev)
+{
+	struct ti_hecc_priv *priv = netdev_priv(ndev);
+	int err;
+
+	err = request_irq(ndev->irq, ti_hecc_interrupt, IRQF_SHARED,
+			ndev->name, ndev);
+	if (err) {
+		dev_err(ndev->dev.parent, "error requesting interrupt\n");
+		return err;
+	}
+
+	/* Open common can device */
+	err = open_candev(ndev);
+	if (err) {
+		dev_err(ndev->dev.parent, "open_candev() failed %d\n", err);
+		free_irq(ndev->irq, ndev);
+		return err;
+	}
+
+	clk_enable(priv->clk);
+	ti_hecc_start(ndev);
+	napi_enable(&priv->napi);
+	netif_start_queue(ndev);
+
+	return 0;
+}
+
+static int ti_hecc_close(struct net_device *ndev)
+{
+	struct ti_hecc_priv *priv = netdev_priv(ndev);
+
+	netif_stop_queue(ndev);
+	napi_disable(&priv->napi);
+	ti_hecc_stop(ndev);
+	free_irq(ndev->irq, ndev);
+	clk_disable(priv->clk);
+	close_candev(ndev);
+
+	return 0;
+}
+
+static const struct net_device_ops ti_hecc_netdev_ops = {
+	.ndo_open		= ti_hecc_open,
+	.ndo_stop		= ti_hecc_close,
+	.ndo_start_xmit		= ti_hecc_xmit,
+};
+
+static int ti_hecc_probe(struct platform_device *pdev)
+{
+	struct net_device *ndev = (struct net_device *)0;
+	struct ti_hecc_priv *priv;
+	struct ti_hecc_platform_data *pdata;
+	struct resource *mem, *irq;
+	void __iomem *addr;
+	int err = -ENODEV;
+
+	pdata = pdev->dev.platform_data;
+	if (!pdata) {
+		dev_err(&pdev->dev, "No platform data\n");
+		goto probe_exit;
+	}
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mem) {
+		dev_err(&pdev->dev, "No mem resources\n");
+		goto probe_exit;
+	}
+	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!irq) {
+		dev_err(&pdev->dev, "No irq resource\n");
+		goto probe_exit;
+	}
+	if (!request_mem_region(mem->start, resource_size(mem), pdev->name)) {
+		dev_err(&pdev->dev, "HECC region already claimed\n");
+		err = -EBUSY;
+		goto probe_exit;
+	}
+	addr = ioremap(mem->start, resource_size(mem));
+	if (!addr) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		err = -ENOMEM;
+		goto probe_exit_free_region;
+	}
+
+	ndev = alloc_candev(sizeof(struct ti_hecc_priv), HECC_MAX_TX_MBOX);
+	if (!ndev) {
+		dev_err(&pdev->dev, "alloc_candev failed\n");
+		err = -ENOMEM;
+		goto probe_exit_iounmap;
+	}
+
+	priv = netdev_priv(ndev);
+	priv->ndev = ndev;
+	priv->base = addr;
+	priv->scc_ram_offset = pdata->scc_ram_offset;
+	priv->hecc_ram_offset = pdata->hecc_ram_offset;
+	priv->mbx_offset = pdata->mbx_offset;
+	priv->int_line = pdata->int_line;
+
+	priv->can.bittiming_const = &ti_hecc_bittiming_const;
+	priv->can.do_set_mode = ti_hecc_do_set_mode;
+	priv->can.do_get_state = ti_hecc_get_state;
+
+	ndev->irq = irq->start;
+	ndev->flags |= IFF_ECHO;
+	platform_set_drvdata(pdev, ndev);
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+	ndev->netdev_ops = &ti_hecc_netdev_ops;
+
+	priv->clk = clk_get(&pdev->dev, "hecc_ck");
+	if (IS_ERR(priv->clk)) {
+		dev_err(&pdev->dev, "No clock available\n");
+		err = PTR_ERR(priv->clk);
+		priv->clk = NULL;
+		goto probe_exit_candev;
+	}
+	priv->can.clock.freq = clk_get_rate(priv->clk);
+	netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
+		HECC_DEF_NAPI_WEIGHT);
+
+	err = register_candev(ndev);
+	if (err) {
+		dev_err(&pdev->dev, "register_candev() failed\n");
+		goto probe_exit_clk;
+	}
+	dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
+		priv->base, (u32) ndev->irq);
+
+	return 0;
+
+probe_exit_clk:
+	clk_put(priv->clk);
+probe_exit_candev:
+	free_candev(ndev);
+probe_exit_iounmap:
+	iounmap(addr);
+probe_exit_free_region:
+	release_mem_region(mem->start, resource_size(mem));
+probe_exit:
+	return err;
+}
+
+static int __devexit ti_hecc_remove(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct ti_hecc_priv *priv = netdev_priv(ndev);
+
+	clk_put(priv->clk);
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	iounmap(priv->base);
+	release_mem_region(res->start, resource_size(res));
+	unregister_candev(ndev);
+	free_candev(ndev);
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+/* TI HECC netdevice driver: platform driver structure */
+static struct platform_driver ti_hecc_driver = {
+	.driver = {
+		.name    = DRV_NAME,
+		.owner   = THIS_MODULE,
+	},
+	.probe = ti_hecc_probe,
+	.remove = __devexit_p(ti_hecc_remove),
+};
+
+static int __init ti_hecc_init_driver(void)
+{
+	printk(KERN_INFO DRV_DESC "\n");
+	return platform_driver_register(&ti_hecc_driver);
+}
+module_init(ti_hecc_init_driver);
+
+static void __exit ti_hecc_exit_driver(void)
+{
+	printk(KERN_INFO DRV_DESC " unloaded\n");
+	platform_driver_unregister(&ti_hecc_driver);
+}
+module_exit(ti_hecc_exit_driver);
+
+MODULE_AUTHOR("Anant Gole <anantgole@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(DRV_DESC);
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index abdbd9c2b788..591eb0eb1c2b 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -232,7 +232,7 @@ MODULE_DEVICE_TABLE(usb, ems_usb_table);
 #define INTR_IN_BUFFER_SIZE 4
 
 #define MAX_RX_URBS 10
-#define MAX_TX_URBS CAN_ECHO_SKB_MAX
+#define MAX_TX_URBS 10
 
 struct ems_usb;
 
@@ -311,23 +311,19 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
 	int i;
 	struct net_device_stats *stats = &dev->netdev->stats;
 
-	skb = netdev_alloc_skb(dev->netdev, sizeof(struct can_frame));
+	skb = alloc_can_skb(dev->netdev, &cf);
 	if (skb == NULL)
 		return;
 
-	skb->protocol = htons(ETH_P_CAN);
-
-	cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
-
 	cf->can_id = le32_to_cpu(msg->msg.can_msg.id);
 	cf->can_dlc = min_t(u8, msg->msg.can_msg.length, 8);
 
-	if (msg->type == CPC_MSG_TYPE_EXT_CAN_FRAME
-	    || msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME)
+	if (msg->type == CPC_MSG_TYPE_EXT_CAN_FRAME ||
+	    msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME)
 		cf->can_id |= CAN_EFF_FLAG;
 
-	if (msg->type == CPC_MSG_TYPE_RTR_FRAME
-	    || msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME) {
+	if (msg->type == CPC_MSG_TYPE_RTR_FRAME ||
+	    msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME) {
 		cf->can_id |= CAN_RTR_FLAG;
 	} else {
 		for (i = 0; i < cf->can_dlc; i++)
@@ -346,18 +342,10 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
 	struct sk_buff *skb;
 	struct net_device_stats *stats = &dev->netdev->stats;
 
-	skb = netdev_alloc_skb(dev->netdev, sizeof(struct can_frame));
+	skb = alloc_can_err_skb(dev->netdev, &cf);
 	if (skb == NULL)
 		return;
 
-	skb->protocol = htons(ETH_P_CAN);
-
-	cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
-	memset(cf, 0, sizeof(struct can_frame));
-
-	cf->can_id = CAN_ERR_FLAG;
-	cf->can_dlc = CAN_ERR_DLC;
-
 	if (msg->type == CPC_MSG_TYPE_CAN_STATE) {
 		u8 state = msg->msg.can_state;
 
@@ -1015,7 +1003,7 @@ static int ems_usb_probe(struct usb_interface *intf,
 	struct ems_usb *dev;
 	int i, err = -ENOMEM;
 
-	netdev = alloc_candev(sizeof(struct ems_usb));
+	netdev = alloc_candev(sizeof(struct ems_usb), MAX_TX_URBS);
 	if (!netdev) {
 		dev_err(netdev->dev.parent, "Couldn't alloc candev\n");
 		return -ENOMEM;
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 3bf1b04f2cab..d4c6e7fcff53 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -33,10 +33,16 @@
 #include <net/route.h>
 #include <net/ipv6.h>
 #include <net/ip6_route.h>
+#include <net/ip6_checksum.h>
 #include <scsi/iscsi_if.h>
 
 #include "cnic_if.h"
 #include "bnx2.h"
+#include "bnx2x_reg.h"
+#include "bnx2x_fw_defs.h"
+#include "bnx2x_hsi.h"
+#include "../scsi/bnx2i/57xx_iscsi_constants.h"
+#include "../scsi/bnx2i/57xx_iscsi_hsi.h"
 #include "cnic.h"
 #include "cnic_defs.h"
 
@@ -59,6 +65,7 @@ static DEFINE_MUTEX(cnic_lock);
 static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
 
 static int cnic_service_bnx2(void *, void *);
+static int cnic_service_bnx2x(void *, void *);
 static int cnic_ctl(void *, struct cnic_ctl_info *);
 
 static struct cnic_ops cnic_bnx2_ops = {
@@ -67,9 +74,14 @@ static struct cnic_ops cnic_bnx2_ops = {
 	.cnic_ctl	= cnic_ctl,
 };
 
-static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *);
-static void cnic_init_bnx2_tx_ring(struct cnic_dev *);
-static void cnic_init_bnx2_rx_ring(struct cnic_dev *);
+static struct cnic_ops cnic_bnx2x_ops = {
+	.cnic_owner	= THIS_MODULE,
+	.cnic_handler	= cnic_service_bnx2x,
+	.cnic_ctl	= cnic_ctl,
+};
+
+static void cnic_shutdown_rings(struct cnic_dev *);
+static void cnic_init_rings(struct cnic_dev *);
 static int cnic_cm_set_pg(struct cnic_sock *);
 
 static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
@@ -83,10 +95,16 @@ static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
 	if (cp->uio_dev != -1)
 		return -EBUSY;
 
+	rtnl_lock();
+	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
+		rtnl_unlock();
+		return -ENODEV;
+	}
+
 	cp->uio_dev = iminor(inode);
 
-	cnic_init_bnx2_tx_ring(dev);
-	cnic_init_bnx2_rx_ring(dev);
+	cnic_init_rings(dev);
+	rtnl_unlock();
 
 	return 0;
 }
@@ -96,7 +114,7 @@ static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
 	struct cnic_dev *dev = uinfo->priv;
 	struct cnic_local *cp = dev->cnic_priv;
 
-	cnic_shutdown_bnx2_rx_ring(dev);
+	cnic_shutdown_rings(dev);
 
 	cp->uio_dev = -1;
 	return 0;
@@ -162,6 +180,36 @@ static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
 	ethdev->drv_ctl(dev->netdev, &info);
 }
 
+static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+	struct drv_ctl_io *io = &info.data.io;
+
+	info.cmd = DRV_CTL_CTXTBL_WR_CMD;
+	io->offset = off;
+	io->dma_addr = addr;
+	ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+	struct drv_ctl_l2_ring *ring = &info.data.ring;
+
+	if (start)
+		info.cmd = DRV_CTL_START_L2_CMD;
+	else
+		info.cmd = DRV_CTL_STOP_L2_CMD;
+
+	ring->cid = cid;
+	ring->client_id = cl_id;
+	ethdev->drv_ctl(dev->netdev, &info);
+}
+
 static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
 {
 	struct cnic_local *cp = dev->cnic_priv;
@@ -204,6 +252,19 @@ static void cnic_kwq_completion(struct cnic_dev *dev, u32 count)
 	ethdev->drv_ctl(dev->netdev, &info);
 }
 
+static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
+{
+	u32 i;
+
+	for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
+		if (cp->ctx_tbl[i].cid == cid) {
+			*l5_cid = i;
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
 static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
 			   struct cnic_sock *csk)
 {
@@ -347,7 +408,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
 {
 	struct cnic_dev *dev;
 
-	if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 		printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n",
 		       ulp_type);
 		return -EINVAL;
@@ -393,7 +454,7 @@ int cnic_unregister_driver(int ulp_type)
 	struct cnic_ulp_ops *ulp_ops;
 	int i = 0;
 
-	if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 		printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n",
 		       ulp_type);
 		return -EINVAL;
@@ -449,7 +510,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
 	struct cnic_local *cp = dev->cnic_priv;
 	struct cnic_ulp_ops *ulp_ops;
 
-	if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 		printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n",
 		       ulp_type);
 		return -EINVAL;
@@ -490,7 +551,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
 	struct cnic_local *cp = dev->cnic_priv;
 	int i = 0;
 
-	if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 		printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n",
 		       ulp_type);
 		return -EINVAL;
@@ -606,14 +667,14 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
 
 	for (i = 0; i < dma->num_pages; i++) {
 		if (dma->pg_arr[i]) {
-			pci_free_consistent(dev->pcidev, BCM_PAGE_SIZE,
-					    dma->pg_arr[i], dma->pg_map_arr[i]);
+			dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE,
+					  dma->pg_arr[i], dma->pg_map_arr[i]);
 			dma->pg_arr[i] = NULL;
 		}
 	}
 	if (dma->pgtbl) {
-		pci_free_consistent(dev->pcidev, dma->pgtbl_size,
-				    dma->pgtbl, dma->pgtbl_map);
+		dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
+				  dma->pgtbl, dma->pgtbl_map);
 		dma->pgtbl = NULL;
 	}
 	kfree(dma->pg_arr);
@@ -635,6 +696,20 @@ static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
 	}
 }
 
+static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
+{
+	int i;
+	u32 *page_table = dma->pgtbl;
+
+	for (i = 0; i < dma->num_pages; i++) {
+		/* Each entry needs to be in little endian format. */
+		*page_table = dma->pg_map_arr[i] & 0xffffffff;
+		page_table++;
+		*page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
+		page_table++;
+	}
+}
+
 static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
 			  int pages, int use_pg_tbl)
 {
@@ -650,9 +725,10 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
 	dma->num_pages = pages;
 
 	for (i = 0; i < pages; i++) {
-		dma->pg_arr[i] = pci_alloc_consistent(dev->pcidev,
-						      BCM_PAGE_SIZE,
-						      &dma->pg_map_arr[i]);
+		dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
+						    BCM_PAGE_SIZE,
+						    &dma->pg_map_arr[i],
+						    GFP_ATOMIC);
 		if (dma->pg_arr[i] == NULL)
 			goto error;
 	}
@@ -661,8 +737,8 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
 
 	dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
 			  ~(BCM_PAGE_SIZE - 1);
-	dma->pgtbl = pci_alloc_consistent(dev->pcidev, dma->pgtbl_size,
-					  &dma->pgtbl_map);
+	dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
+					&dma->pgtbl_map, GFP_ATOMIC);
 	if (dma->pgtbl == NULL)
 		goto error;
 
@@ -675,6 +751,21 @@ error:
 	return -ENOMEM;
 }
 
+static void cnic_free_context(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int i;
+
+	for (i = 0; i < cp->ctx_blks; i++) {
+		if (cp->ctx_arr[i].ctx) {
+			dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
+					  cp->ctx_arr[i].ctx,
+					  cp->ctx_arr[i].mapping);
+			cp->ctx_arr[i].ctx = NULL;
+		}
+	}
+}
+
 static void cnic_free_resc(struct cnic_dev *dev)
 {
 	struct cnic_local *cp = dev->cnic_priv;
@@ -691,25 +782,18 @@ static void cnic_free_resc(struct cnic_dev *dev)
 	}
 
 	if (cp->l2_buf) {
-		pci_free_consistent(dev->pcidev, cp->l2_buf_size,
-				    cp->l2_buf, cp->l2_buf_map);
+		dma_free_coherent(&dev->pcidev->dev, cp->l2_buf_size,
+				  cp->l2_buf, cp->l2_buf_map);
 		cp->l2_buf = NULL;
 	}
 
 	if (cp->l2_ring) {
-		pci_free_consistent(dev->pcidev, cp->l2_ring_size,
-				    cp->l2_ring, cp->l2_ring_map);
+		dma_free_coherent(&dev->pcidev->dev, cp->l2_ring_size,
+				  cp->l2_ring, cp->l2_ring_map);
 		cp->l2_ring = NULL;
 	}
 
-	for (i = 0; i < cp->ctx_blks; i++) {
-		if (cp->ctx_arr[i].ctx) {
-			pci_free_consistent(dev->pcidev, cp->ctx_blk_size,
-					    cp->ctx_arr[i].ctx,
-					    cp->ctx_arr[i].mapping);
-			cp->ctx_arr[i].ctx = NULL;
-		}
-	}
+	cnic_free_context(dev);
 	kfree(cp->ctx_arr);
 	cp->ctx_arr = NULL;
 	cp->ctx_blks = 0;
@@ -717,6 +801,7 @@ static void cnic_free_resc(struct cnic_dev *dev)
 	cnic_free_dma(dev, &cp->gbl_buf_info);
 	cnic_free_dma(dev, &cp->conn_buf_info);
 	cnic_free_dma(dev, &cp->kwq_info);
+	cnic_free_dma(dev, &cp->kwq_16_data_info);
 	cnic_free_dma(dev, &cp->kcq_info);
 	kfree(cp->iscsi_tbl);
 	cp->iscsi_tbl = NULL;
@@ -765,8 +850,10 @@ static int cnic_alloc_context(struct cnic_dev *dev)
 
 		for (i = 0; i < cp->ctx_blks; i++) {
 			cp->ctx_arr[i].ctx =
-				pci_alloc_consistent(dev->pcidev, BCM_PAGE_SIZE,
-						     &cp->ctx_arr[i].mapping);
+				dma_alloc_coherent(&dev->pcidev->dev,
+						   BCM_PAGE_SIZE,
+						   &cp->ctx_arr[i].mapping,
+						   GFP_KERNEL);
 			if (cp->ctx_arr[i].ctx == NULL)
 				return -ENOMEM;
 		}
@@ -779,15 +866,17 @@ static int cnic_alloc_l2_rings(struct cnic_dev *dev, int pages)
 	struct cnic_local *cp = dev->cnic_priv;
 
 	cp->l2_ring_size = pages * BCM_PAGE_SIZE;
-	cp->l2_ring = pci_alloc_consistent(dev->pcidev, cp->l2_ring_size,
-					   &cp->l2_ring_map);
+	cp->l2_ring = dma_alloc_coherent(&dev->pcidev->dev, cp->l2_ring_size,
+					 &cp->l2_ring_map,
+					 GFP_KERNEL | __GFP_COMP);
 	if (!cp->l2_ring)
 		return -ENOMEM;
 
 	cp->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
 	cp->l2_buf_size = PAGE_ALIGN(cp->l2_buf_size);
-	cp->l2_buf = pci_alloc_consistent(dev->pcidev, cp->l2_buf_size,
-					   &cp->l2_buf_map);
+	cp->l2_buf = dma_alloc_coherent(&dev->pcidev->dev, cp->l2_buf_size,
+					&cp->l2_buf_map,
+					GFP_KERNEL | __GFP_COMP);
 	if (!cp->l2_buf)
 		return -ENOMEM;
 
@@ -808,14 +897,20 @@ static int cnic_alloc_uio(struct cnic_dev *dev) {
 	uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
 	uinfo->mem[0].memtype = UIO_MEM_PHYS;
 
-	uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK;
 	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
+		uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK;
 		if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
 			uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
 		else
 			uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
 
 		uinfo->name = "bnx2_cnic";
+	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+		uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
+			PAGE_MASK;
+		uinfo->mem[1].size = sizeof(struct host_def_status_block);
+
+		uinfo->name = "bnx2x_cnic";
 	}
 
 	uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
@@ -880,6 +975,152 @@ error:
 	return ret;
 }
 
+static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	int ctx_blk_size = cp->ethdev->ctx_blk_size;
+	int total_mem, blks, i, cid_space;
+
+	if (BNX2X_ISCSI_START_CID < ethdev->starting_cid)
+		return -EINVAL;
+
+	cid_space = MAX_ISCSI_TBL_SZ +
+		    (BNX2X_ISCSI_START_CID - ethdev->starting_cid);
+
+	total_mem = BNX2X_CONTEXT_MEM_SIZE * cid_space;
+	blks = total_mem / ctx_blk_size;
+	if (total_mem % ctx_blk_size)
+		blks++;
+
+	if (blks > cp->ethdev->ctx_tbl_len)
+		return -ENOMEM;
+
+	cp->ctx_arr = kzalloc(blks * sizeof(struct cnic_ctx), GFP_KERNEL);
+	if (cp->ctx_arr == NULL)
+		return -ENOMEM;
+
+	cp->ctx_blks = blks;
+	cp->ctx_blk_size = ctx_blk_size;
+	if (BNX2X_CHIP_IS_E1H(cp->chip_id))
+		cp->ctx_align = 0;
+	else
+		cp->ctx_align = ctx_blk_size;
+
+	cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
+
+	for (i = 0; i < blks; i++) {
+		cp->ctx_arr[i].ctx =
+			dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
+					   &cp->ctx_arr[i].mapping,
+					   GFP_KERNEL);
+		if (cp->ctx_arr[i].ctx == NULL)
+			return -ENOMEM;
+
+		if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
+			if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
+				cnic_free_context(dev);
+				cp->ctx_blk_size += cp->ctx_align;
+				i = -1;
+				continue;
+			}
+		}
+	}
+	return 0;
+}
+
+static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int i, j, n, ret, pages;
+	struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
+
+	cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
+				GFP_KERNEL);
+	if (!cp->iscsi_tbl)
+		goto error;
+
+	cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
+				  MAX_CNIC_L5_CONTEXT, GFP_KERNEL);
+	if (!cp->ctx_tbl)
+		goto error;
+
+	for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
+		cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
+		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
+	}
+
+	pages = PAGE_ALIGN(MAX_CNIC_L5_CONTEXT * CNIC_KWQ16_DATA_SIZE) /
+		PAGE_SIZE;
+
+	ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
+	if (ret)
+		return -ENOMEM;
+
+	n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
+	for (i = 0, j = 0; i < MAX_ISCSI_TBL_SZ; i++) {
+		long off = CNIC_KWQ16_DATA_SIZE * (i % n);
+
+		cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
+		cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
+						   off;
+
+		if ((i % n) == (n - 1))
+			j++;
+	}
+
+	ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 0);
+	if (ret)
+		goto error;
+	cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr;
+
+	for (i = 0; i < KCQ_PAGE_CNT; i++) {
+		struct bnx2x_bd_chain_next *next =
+			(struct bnx2x_bd_chain_next *)
+			&cp->kcq[i][MAX_KCQE_CNT];
+		int j = i + 1;
+
+		if (j >= KCQ_PAGE_CNT)
+			j = 0;
+		next->addr_hi = (u64) cp->kcq_info.pg_map_arr[j] >> 32;
+		next->addr_lo = cp->kcq_info.pg_map_arr[j] & 0xffffffff;
+	}
+
+	pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS *
+			   BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE;
+	ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1);
+	if (ret)
+		goto error;
+
+	pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
+	ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
+	if (ret)
+		goto error;
+
+	ret = cnic_alloc_bnx2x_context(dev);
+	if (ret)
+		goto error;
+
+	cp->bnx2x_status_blk = cp->status_blk;
+	cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
+
+	cp->l2_rx_ring_size = 15;
+
+	ret = cnic_alloc_l2_rings(dev, 4);
+	if (ret)
+		goto error;
+
+	ret = cnic_alloc_uio(dev);
+	if (ret)
+		goto error;
+
+	return 0;
+
+error:
+	cnic_free_resc(dev);
+	return -ENOMEM;
+}
+
 static inline u32 cnic_kwq_avail(struct cnic_local *cp)
 {
 	return cp->max_kwq_idx -
@@ -921,6 +1162,880 @@ static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
 	return 0;
 }
 
+static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
+				   union l5cm_specific_data *l5_data)
+{
+	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+	dma_addr_t map;
+
+	map = ctx->kwqe_data_mapping;
+	l5_data->phy_address.lo = (u64) map & 0xffffffff;
+	l5_data->phy_address.hi = (u64) map >> 32;
+	return ctx->kwqe_data;
+}
+
+static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
+				u32 type, union l5cm_specific_data *l5_data)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct l5cm_spe kwqe;
+	struct kwqe_16 *kwq[1];
+	int ret;
+
+	kwqe.hdr.conn_and_cmd_data =
+		cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
+			     BNX2X_HW_CID(cid, cp->func)));
+	kwqe.hdr.type = cpu_to_le16(type);
+	kwqe.hdr.reserved = 0;
+	kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
+	kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
+
+	kwq[0] = (struct kwqe_16 *) &kwqe;
+
+	spin_lock_bh(&cp->cnic_ulp_lock);
+	ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
+	spin_unlock_bh(&cp->cnic_ulp_lock);
+
+	if (ret == 1)
+		return 0;
+
+	return -EBUSY;
+}
+
+static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
+				   struct kcqe *cqes[], u32 num_cqes)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_ulp_ops *ulp_ops;
+
+	rcu_read_lock();
+	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
+	if (likely(ulp_ops)) {
+		ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
+					  cqes, num_cqes);
+	}
+	rcu_read_unlock();
+}
+
+static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
+	int func = cp->func, pages;
+	int hq_bds;
+
+	cp->num_iscsi_tasks = req1->num_tasks_per_conn;
+	cp->num_ccells = req1->num_ccells_per_conn;
+	cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
+			      cp->num_iscsi_tasks;
+	cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
+			BNX2X_ISCSI_R2TQE_SIZE;
+	cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
+	pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
+	hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
+	cp->num_cqs = req1->num_cqs;
+
+	if (!dev->max_iscsi_conn)
+		return 0;
+
+	/* init Tstorm RAM */
+	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(func),
+		  req1->rq_num_wqes);
+	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(func),
+		  PAGE_SIZE);
+	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+		 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT);
+	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
+		  TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func),
+		  req1->num_tasks_per_conn);
+
+	/* init Ustorm RAM */
+	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
+		  USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(func),
+		  req1->rq_buffer_size);
+	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(func),
+		  PAGE_SIZE);
+	CNIC_WR8(dev, BAR_USTRORM_INTMEM +
+		 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT);
+	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
+		  USTORM_ISCSI_NUM_OF_TASKS_OFFSET(func),
+		  req1->num_tasks_per_conn);
+	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(func),
+		  req1->rq_num_wqes);
+	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(func),
+		  req1->cq_num_wqes);
+	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(func),
+		  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
+
+	/* init Xstorm RAM */
+	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(func),
+		  PAGE_SIZE);
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT);
+	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
+		  XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func),
+		  req1->num_tasks_per_conn);
+	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(func),
+		  hq_bds);
+	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(func),
+		  req1->num_tasks_per_conn);
+	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(func),
+		  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
+
+	/* init Cstorm RAM */
+	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(func),
+		  PAGE_SIZE);
+	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
+		 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT);
+	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
+		  CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func),
+		  req1->num_tasks_per_conn);
+	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(func),
+		  req1->cq_num_wqes);
+	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(func),
+		  hq_bds);
+
+	return 0;
+}
+
+static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
+	struct cnic_local *cp = dev->cnic_priv;
+	int func = cp->func;
+	struct iscsi_kcqe kcqe;
+	struct kcqe *cqes[1];
+
+	memset(&kcqe, 0, sizeof(kcqe));
+	if (!dev->max_iscsi_conn) {
+		kcqe.completion_status =
+			ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
+		goto done;
+	}
+
+	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
+		TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func), req2->error_bit_map[0]);
+	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
+		TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func) + 4,
+		req2->error_bit_map[1]);
+
+	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
+		  USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn);
+	CNIC_WR(dev, BAR_USTRORM_INTMEM +
+		USTORM_ISCSI_ERROR_BITMAP_OFFSET(func), req2->error_bit_map[0]);
+	CNIC_WR(dev, BAR_USTRORM_INTMEM +
+		USTORM_ISCSI_ERROR_BITMAP_OFFSET(func) + 4,
+		req2->error_bit_map[1]);
+
+	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
+		  CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn);
+
+	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
+
+done:
+	kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
+	cqes[0] = (struct kcqe *) &kcqe;
+	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
+
+	return 0;
+}
+
+static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+
+	if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
+		struct cnic_iscsi *iscsi = ctx->proto.iscsi;
+
+		cnic_free_dma(dev, &iscsi->hq_info);
+		cnic_free_dma(dev, &iscsi->r2tq_info);
+		cnic_free_dma(dev, &iscsi->task_array_info);
+	}
+	cnic_free_id(&cp->cid_tbl, ctx->cid);
+	ctx->cid = 0;
+}
+
+static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
+{
+	u32 cid;
+	int ret, pages;
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+	struct cnic_iscsi *iscsi = ctx->proto.iscsi;
+
+	cid = cnic_alloc_new_id(&cp->cid_tbl);
+	if (cid == -1) {
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	ctx->cid = cid;
+	pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
+
+	ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
+	if (ret)
+		goto error;
+
+	pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
+	ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
+	if (ret)
+		goto error;
+
+	pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
+	ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
+	if (ret)
+		goto error;
+
+	return 0;
+
+error:
+	cnic_free_bnx2x_conn_resc(dev, l5_cid);
+	return ret;
+}
+
+static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
+				struct regpair *ctx_addr)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
+	int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
+	unsigned long align_off = 0;
+	dma_addr_t ctx_map;
+	void *ctx;
+
+	if (cp->ctx_align) {
+		unsigned long mask = cp->ctx_align - 1;
+
+		if (cp->ctx_arr[blk].mapping & mask)
+			align_off = cp->ctx_align -
+				    (cp->ctx_arr[blk].mapping & mask);
+	}
+	ctx_map = cp->ctx_arr[blk].mapping + align_off +
+		(off * BNX2X_CONTEXT_MEM_SIZE);
+	ctx = cp->ctx_arr[blk].ctx + align_off +
+	      (off * BNX2X_CONTEXT_MEM_SIZE);
+	if (init)
+		memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
+
+	ctx_addr->lo = ctx_map & 0xffffffff;
+	ctx_addr->hi = (u64) ctx_map >> 32;
+	return ctx;
+}
+
+static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
+				u32 num)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct iscsi_kwqe_conn_offload1 *req1 =
+			(struct iscsi_kwqe_conn_offload1 *) wqes[0];
+	struct iscsi_kwqe_conn_offload2 *req2 =
+			(struct iscsi_kwqe_conn_offload2 *) wqes[1];
+	struct iscsi_kwqe_conn_offload3 *req3;
+	struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
+	struct cnic_iscsi *iscsi = ctx->proto.iscsi;
+	u32 cid = ctx->cid;
+	u32 hw_cid = BNX2X_HW_CID(cid, cp->func);
+	struct iscsi_context *ictx;
+	struct regpair context_addr;
+	int i, j, n = 2, n_max;
+
+	ctx->ctx_flags = 0;
+	if (!req2->num_additional_wqes)
+		return -EINVAL;
+
+	n_max = req2->num_additional_wqes + 2;
+
+	ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
+	if (ictx == NULL)
+		return -ENOMEM;
+
+	req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
+
+	ictx->xstorm_ag_context.hq_prod = 1;
+
+	ictx->xstorm_st_context.iscsi.first_burst_length =
+		ISCSI_DEF_FIRST_BURST_LEN;
+	ictx->xstorm_st_context.iscsi.max_send_pdu_length =
+		ISCSI_DEF_MAX_RECV_SEG_LEN;
+	ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
+		req1->sq_page_table_addr_lo;
+	ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
+		req1->sq_page_table_addr_hi;
+	ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
+	ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
+	ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
+		iscsi->hq_info.pgtbl_map & 0xffffffff;
+	ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
+		(u64) iscsi->hq_info.pgtbl_map >> 32;
+	ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
+		iscsi->hq_info.pgtbl[0];
+	ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
+		iscsi->hq_info.pgtbl[1];
+	ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
+		iscsi->r2tq_info.pgtbl_map & 0xffffffff;
+	ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
+		(u64) iscsi->r2tq_info.pgtbl_map >> 32;
+	ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
+		iscsi->r2tq_info.pgtbl[0];
+	ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
+		iscsi->r2tq_info.pgtbl[1];
+	ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
+		iscsi->task_array_info.pgtbl_map & 0xffffffff;
+	ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
+		(u64) iscsi->task_array_info.pgtbl_map >> 32;
+	ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
+		BNX2X_ISCSI_PBL_NOT_CACHED;
+	ictx->xstorm_st_context.iscsi.flags.flags |=
+		XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
+	ictx->xstorm_st_context.iscsi.flags.flags |=
+		XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
+
+	ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
+	/* TSTORM requires the base address of RQ DB & not PTE */
+	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
+		req2->rq_page_table_addr_lo & PAGE_MASK;
+	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
+		req2->rq_page_table_addr_hi;
+	ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
+	ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
+	ictx->tstorm_st_context.tcp.flags2 |=
+		TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
+
+	ictx->timers_context.flags |= ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
+
+	ictx->ustorm_st_context.ring.rq.pbl_base.lo =
+		req2->rq_page_table_addr_lo;
+	ictx->ustorm_st_context.ring.rq.pbl_base.hi =
+		req2->rq_page_table_addr_hi;
+	ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
+	ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
+	ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
+		iscsi->r2tq_info.pgtbl_map & 0xffffffff;
+	ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
+		(u64) iscsi->r2tq_info.pgtbl_map >> 32;
+	ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
+		iscsi->r2tq_info.pgtbl[0];
+	ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
+		iscsi->r2tq_info.pgtbl[1];
+	ictx->ustorm_st_context.ring.cq_pbl_base.lo =
+		req1->cq_page_table_addr_lo;
+	ictx->ustorm_st_context.ring.cq_pbl_base.hi =
+		req1->cq_page_table_addr_hi;
+	ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
+	ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
+	ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
+	ictx->ustorm_st_context.task_pbe_cache_index =
+		BNX2X_ISCSI_PBL_NOT_CACHED;
+	ictx->ustorm_st_context.task_pdu_cache_index =
+		BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
+
+	for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
+		if (j == 3) {
+			if (n >= n_max)
+				break;
+			req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
+			j = 0;
+		}
+		ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
+		ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
+			req3->qp_first_pte[j].hi;
+		ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
+			req3->qp_first_pte[j].lo;
+	}
+
+	ictx->ustorm_st_context.task_pbl_base.lo =
+		iscsi->task_array_info.pgtbl_map & 0xffffffff;
+	ictx->ustorm_st_context.task_pbl_base.hi =
+		(u64) iscsi->task_array_info.pgtbl_map >> 32;
+	ictx->ustorm_st_context.tce_phy_addr.lo =
+		iscsi->task_array_info.pgtbl[0];
+	ictx->ustorm_st_context.tce_phy_addr.hi =
+		iscsi->task_array_info.pgtbl[1];
+	ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
+	ictx->ustorm_st_context.num_cqs = cp->num_cqs;
+	ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
+	ictx->ustorm_st_context.negotiated_rx_and_flags |=
+		ISCSI_DEF_MAX_BURST_LEN;
+	ictx->ustorm_st_context.negotiated_rx |=
+		ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
+		USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
+
+	ictx->cstorm_st_context.hq_pbl_base.lo =
+		iscsi->hq_info.pgtbl_map & 0xffffffff;
+	ictx->cstorm_st_context.hq_pbl_base.hi =
+		(u64) iscsi->hq_info.pgtbl_map >> 32;
+	ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
+	ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
+	ictx->cstorm_st_context.task_pbl_base.lo =
+		iscsi->task_array_info.pgtbl_map & 0xffffffff;
+	ictx->cstorm_st_context.task_pbl_base.hi =
+		(u64) iscsi->task_array_info.pgtbl_map >> 32;
+	/* CSTORM and USTORM initialization is different, CSTORM requires
+	 * CQ DB base & not PTE addr */
+	ictx->cstorm_st_context.cq_db_base.lo =
+		req1->cq_page_table_addr_lo & PAGE_MASK;
+	ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
+	ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
+	ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
+	for (i = 0; i < cp->num_cqs; i++) {
+		ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
+			ISCSI_INITIAL_SN;
+		ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
+			ISCSI_INITIAL_SN;
+	}
+
+	ictx->xstorm_ag_context.cdu_reserved =
+		CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
+				       ISCSI_CONNECTION_TYPE);
+	ictx->ustorm_ag_context.cdu_usage =
+		CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
+				       ISCSI_CONNECTION_TYPE);
+	return 0;
+
+}
+
+static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
+				   u32 num, int *work)
+{
+	struct iscsi_kwqe_conn_offload1 *req1;
+	struct iscsi_kwqe_conn_offload2 *req2;
+	struct cnic_local *cp = dev->cnic_priv;
+	struct iscsi_kcqe kcqe;
+	struct kcqe *cqes[1];
+	u32 l5_cid;
+	int ret;
+
+	if (num < 2) {
+		*work = num;
+		return -EINVAL;
+	}
+
+	req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
+	req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
+	if ((num - 2) < req2->num_additional_wqes) {
+		*work = num;
+		return -EINVAL;
+	}
+	*work = 2 + req2->num_additional_wqes;;
+
+	l5_cid = req1->iscsi_conn_id;
+	if (l5_cid >= MAX_ISCSI_TBL_SZ)
+		return -EINVAL;
+
+	memset(&kcqe, 0, sizeof(kcqe));
+	kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
+	kcqe.iscsi_conn_id = l5_cid;
+	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
+
+	if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
+		atomic_dec(&cp->iscsi_conn);
+		ret = 0;
+		goto done;
+	}
+	ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
+	if (ret) {
+		atomic_dec(&cp->iscsi_conn);
+		ret = 0;
+		goto done;
+	}
+	ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
+	if (ret < 0) {
+		cnic_free_bnx2x_conn_resc(dev, l5_cid);
+		atomic_dec(&cp->iscsi_conn);
+		goto done;
+	}
+
+	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
+	kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp->ctx_tbl[l5_cid].cid,
+						  cp->func);
+
+done:
+	cqes[0] = (struct kcqe *) &kcqe;
+	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
+	return ret;
+}
+
+
+static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct iscsi_kwqe_conn_update *req =
+		(struct iscsi_kwqe_conn_update *) kwqe;
+	void *data;
+	union l5cm_specific_data l5_data;
+	u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
+	int ret;
+
+	if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
+		return -EINVAL;
+
+	data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
+	if (!data)
+		return -ENOMEM;
+
+	memcpy(data, kwqe, sizeof(struct kwqe));
+
+	ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
+			req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
+	return ret;
+}
+
+static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct iscsi_kwqe_conn_destroy *req =
+		(struct iscsi_kwqe_conn_destroy *) kwqe;
+	union l5cm_specific_data l5_data;
+	u32 l5_cid = req->reserved0;
+	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+	int ret = 0;
+	struct iscsi_kcqe kcqe;
+	struct kcqe *cqes[1];
+
+	if (!(ctx->ctx_flags & CTX_FL_OFFLD_START))
+		goto skip_cfc_delete;
+
+	while (!time_after(jiffies, ctx->timestamp + (2 * HZ)))
+		msleep(250);
+
+	init_waitqueue_head(&ctx->waitq);
+	ctx->wait_cond = 0;
+	memset(&l5_data, 0, sizeof(l5_data));
+	ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL,
+				  req->context_id,
+				  ETH_CONNECTION_TYPE |
+				  (1 << SPE_HDR_COMMON_RAMROD_SHIFT),
+				  &l5_data);
+	if (ret == 0)
+		wait_event(ctx->waitq, ctx->wait_cond);
+
+skip_cfc_delete:
+	cnic_free_bnx2x_conn_resc(dev, l5_cid);
+
+	atomic_dec(&cp->iscsi_conn);
+
+	memset(&kcqe, 0, sizeof(kcqe));
+	kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
+	kcqe.iscsi_conn_id = l5_cid;
+	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
+	kcqe.iscsi_conn_context_id = req->context_id;
+
+	cqes[0] = (struct kcqe *) &kcqe;
+	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
+
+	return ret;
+}
+
+static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
+				      struct l4_kwq_connect_req1 *kwqe1,
+				      struct l4_kwq_connect_req3 *kwqe3,
+				      struct l5cm_active_conn_buffer *conn_buf)
+{
+	struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
+	struct l5cm_xstorm_conn_buffer *xstorm_buf =
+		&conn_buf->xstorm_conn_buffer;
+	struct l5cm_tstorm_conn_buffer *tstorm_buf =
+		&conn_buf->tstorm_conn_buffer;
+	struct regpair context_addr;
+	u32 cid = BNX2X_SW_CID(kwqe1->cid);
+	struct in6_addr src_ip, dst_ip;
+	int i;
+	u32 *addrp;
+
+	addrp = (u32 *) &conn_addr->local_ip_addr;
+	for (i = 0; i < 4; i++, addrp++)
+		src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
+
+	addrp = (u32 *) &conn_addr->remote_ip_addr;
+	for (i = 0; i < 4; i++, addrp++)
+		dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
+
+	cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
+
+	xstorm_buf->context_addr.hi = context_addr.hi;
+	xstorm_buf->context_addr.lo = context_addr.lo;
+	xstorm_buf->mss = 0xffff;
+	xstorm_buf->rcv_buf = kwqe3->rcv_buf;
+	if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
+		xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
+	xstorm_buf->pseudo_header_checksum =
+		swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
+
+	if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
+		tstorm_buf->params |=
+			L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
+	if (kwqe3->ka_timeout) {
+		tstorm_buf->ka_enable = 1;
+		tstorm_buf->ka_timeout = kwqe3->ka_timeout;
+		tstorm_buf->ka_interval = kwqe3->ka_interval;
+		tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
+	}
+	tstorm_buf->rcv_buf = kwqe3->rcv_buf;
+	tstorm_buf->snd_buf = kwqe3->snd_buf;
+	tstorm_buf->max_rt_time = 0xffffffff;
+}
+
+static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int func = CNIC_FUNC(cp);
+	u8 *mac = dev->mac_addr;
+
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(func), mac[0]);
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(func), mac[1]);
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(func), mac[2]);
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(func), mac[3]);
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(func), mac[4]);
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(func), mac[5]);
+
+	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+		 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func), mac[5]);
+	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+		 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func) + 1,
+		 mac[4]);
+	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func), mac[3]);
+	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 1,
+		 mac[2]);
+	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 2,
+		 mac[1]);
+	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 3,
+		 mac[0]);
+}
+
+static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
+	u16 tstorm_flags = 0;
+
+	if (tcp_ts) {
+		xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
+		tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
+	}
+
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->func), xstorm_flags);
+
+	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
+		  TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->func), tstorm_flags);
+}
+
+static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
+			      u32 num, int *work)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct l4_kwq_connect_req1 *kwqe1 =
+		(struct l4_kwq_connect_req1 *) wqes[0];
+	struct l4_kwq_connect_req3 *kwqe3;
+	struct l5cm_active_conn_buffer *conn_buf;
+	struct l5cm_conn_addr_params *conn_addr;
+	union l5cm_specific_data l5_data;
+	u32 l5_cid = kwqe1->pg_cid;
+	struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
+	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+	int ret;
+
+	if (num < 2) {
+		*work = num;
+		return -EINVAL;
+	}
+
+	if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
+		*work = 3;
+	else
+		*work = 2;
+
+	if (num < *work) {
+		*work = num;
+		return -EINVAL;
+	}
+
+	if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
+		printk(KERN_ERR PFX "%s: conn_buf size too big\n",
+			       dev->netdev->name);
+		return -ENOMEM;
+	}
+	conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
+	if (!conn_buf)
+		return -ENOMEM;
+
+	memset(conn_buf, 0, sizeof(*conn_buf));
+
+	conn_addr = &conn_buf->conn_addr_buf;
+	conn_addr->remote_addr_0 = csk->ha[0];
+	conn_addr->remote_addr_1 = csk->ha[1];
+	conn_addr->remote_addr_2 = csk->ha[2];
+	conn_addr->remote_addr_3 = csk->ha[3];
+	conn_addr->remote_addr_4 = csk->ha[4];
+	conn_addr->remote_addr_5 = csk->ha[5];
+
+	if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
+		struct l4_kwq_connect_req2 *kwqe2 =
+			(struct l4_kwq_connect_req2 *) wqes[1];
+
+		conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
+		conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
+		conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
+
+		conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
+		conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
+		conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
+		conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
+	}
+	kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
+
+	conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
+	conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
+	conn_addr->local_tcp_port = kwqe1->src_port;
+	conn_addr->remote_tcp_port = kwqe1->dst_port;
+
+	conn_addr->pmtu = kwqe3->pmtu;
+	cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
+
+	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
+		  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->func), csk->vlan_id);
+
+	cnic_bnx2x_set_tcp_timestamp(dev,
+		kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
+
+	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
+			kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
+	if (!ret)
+		ctx->ctx_flags |= CTX_FL_OFFLD_START;
+
+	return ret;
+}
+
+static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
+	union l5cm_specific_data l5_data;
+	int ret;
+
+	memset(&l5_data, 0, sizeof(l5_data));
+	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
+			req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
+	return ret;
+}
+
+static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
+	union l5cm_specific_data l5_data;
+	int ret;
+
+	memset(&l5_data, 0, sizeof(l5_data));
+	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
+			req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
+	return ret;
+}
+static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
+	struct l4_kcq kcqe;
+	struct kcqe *cqes[1];
+
+	memset(&kcqe, 0, sizeof(kcqe));
+	kcqe.pg_host_opaque = req->host_opaque;
+	kcqe.pg_cid = req->host_opaque;
+	kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
+	cqes[0] = (struct kcqe *) &kcqe;
+	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
+	return 0;
+}
+
+static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
+	struct l4_kcq kcqe;
+	struct kcqe *cqes[1];
+
+	memset(&kcqe, 0, sizeof(kcqe));
+	kcqe.pg_host_opaque = req->pg_host_opaque;
+	kcqe.pg_cid = req->pg_cid;
+	kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
+	cqes[0] = (struct kcqe *) &kcqe;
+	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
+	return 0;
+}
+
+static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
+				   u32 num_wqes)
+{
+	int i, work, ret;
+	u32 opcode;
+	struct kwqe *kwqe;
+
+	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+		return -EAGAIN;		/* bnx2 is down */
+
+	for (i = 0; i < num_wqes; ) {
+		kwqe = wqes[i];
+		opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
+		work = 1;
+
+		switch (opcode) {
+		case ISCSI_KWQE_OPCODE_INIT1:
+			ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
+			break;
+		case ISCSI_KWQE_OPCODE_INIT2:
+			ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
+			break;
+		case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
+			ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
+						     num_wqes - i, &work);
+			break;
+		case ISCSI_KWQE_OPCODE_UPDATE_CONN:
+			ret = cnic_bnx2x_iscsi_update(dev, kwqe);
+			break;
+		case ISCSI_KWQE_OPCODE_DESTROY_CONN:
+			ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
+			break;
+		case L4_KWQE_OPCODE_VALUE_CONNECT1:
+			ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
+						 &work);
+			break;
+		case L4_KWQE_OPCODE_VALUE_CLOSE:
+			ret = cnic_bnx2x_close(dev, kwqe);
+			break;
+		case L4_KWQE_OPCODE_VALUE_RESET:
+			ret = cnic_bnx2x_reset(dev, kwqe);
+			break;
+		case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
+			ret = cnic_bnx2x_offload_pg(dev, kwqe);
+			break;
+		case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
+			ret = cnic_bnx2x_update_pg(dev, kwqe);
+			break;
+		case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
+			ret = 0;
+			break;
+		default:
+			ret = 0;
+			printk(KERN_ERR PFX "%s: Unknown type of KWQE(0x%x)\n",
+			       dev->netdev->name, opcode);
+			break;
+		}
+		if (ret < 0)
+			printk(KERN_ERR PFX "%s: KWQE(0x%x) failed\n",
+			       dev->netdev->name, opcode);
+		i += work;
+	}
+	return 0;
+}
+
 static void service_kcqes(struct cnic_dev *dev, int num_cqes)
 {
 	struct cnic_local *cp = dev->cnic_priv;
@@ -987,6 +2102,22 @@ static u16 cnic_bnx2_hw_idx(u16 idx)
 	return idx;
 }
 
+static u16 cnic_bnx2x_next_idx(u16 idx)
+{
+	idx++;
+	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
+		idx++;
+
+	return idx;
+}
+
+static u16 cnic_bnx2x_hw_idx(u16 idx)
+{
+	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
+		idx++;
+	return idx;
+}
+
 static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
 {
 	struct cnic_local *cp = dev->cnic_priv;
@@ -1012,7 +2143,7 @@ static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
 	return last_cnt;
 }
 
-static void cnic_chk_bnx2_pkt_rings(struct cnic_local *cp)
+static void cnic_chk_pkt_rings(struct cnic_local *cp)
 {
 	u16 rx_cons = *cp->rx_cons_ptr;
 	u16 tx_cons = *cp->tx_cons_ptr;
@@ -1020,6 +2151,7 @@ static void cnic_chk_bnx2_pkt_rings(struct cnic_local *cp)
 	if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
 		cp->tx_cons = tx_cons;
 		cp->rx_cons = rx_cons;
+
 		uio_event_notify(cp->cnic_uinfo);
 	}
 }
@@ -1062,7 +2194,7 @@ done:
 
 	cp->kcq_prod_idx = sw_prod;
 
-	cnic_chk_bnx2_pkt_rings(cp);
+	cnic_chk_pkt_rings(cp);
 	return status_idx;
 }
 
@@ -1100,7 +2232,7 @@ done:
 	CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
 	cp->kcq_prod_idx = sw_prod;
 
-	cnic_chk_bnx2_pkt_rings(cp);
+	cnic_chk_pkt_rings(cp);
 
 	cp->last_status_idx = status_idx;
 	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
@@ -1125,6 +2257,91 @@ static irqreturn_t cnic_irq(int irq, void *dev_instance)
 	return IRQ_HANDLED;
 }
 
+static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
+				      u16 index, u8 op, u8 update)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 +
+		       COMMAND_REG_INT_ACK);
+	struct igu_ack_register igu_ack;
+
+	igu_ack.status_block_index = index;
+	igu_ack.sb_id_and_flags =
+			((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
+			 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
+			 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
+			 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
+
+	CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
+}
+
+static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID, 0,
+			   IGU_INT_DISABLE, 0);
+}
+
+static void cnic_service_bnx2x_bh(unsigned long data)
+{
+	struct cnic_dev *dev = (struct cnic_dev *) data;
+	struct cnic_local *cp = dev->cnic_priv;
+	u16 hw_prod, sw_prod;
+	struct cstorm_status_block_c *sblk =
+		&cp->bnx2x_status_blk->c_status_block;
+	u32 status_idx = sblk->status_block_index;
+	int kcqe_cnt;
+
+	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
+		return;
+
+	hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS];
+	hw_prod = cp->hw_idx(hw_prod);
+	sw_prod = cp->kcq_prod_idx;
+	while (sw_prod != hw_prod) {
+		kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
+		if (kcqe_cnt == 0)
+			goto done;
+
+		service_kcqes(dev, kcqe_cnt);
+
+		/* Tell compiler that sblk fields can change. */
+		barrier();
+		if (status_idx == sblk->status_block_index)
+			break;
+
+		status_idx = sblk->status_block_index;
+		hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS];
+		hw_prod = cp->hw_idx(hw_prod);
+	}
+
+done:
+	CNIC_WR16(dev, cp->kcq_io_addr, sw_prod + MAX_KCQ_IDX);
+	cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID,
+			   status_idx, IGU_INT_ENABLE, 1);
+
+	cp->kcq_prod_idx = sw_prod;
+	return;
+}
+
+static int cnic_service_bnx2x(void *data, void *status_blk)
+{
+	struct cnic_dev *dev = data;
+	struct cnic_local *cp = dev->cnic_priv;
+	u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
+
+	prefetch(cp->status_blk);
+	prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
+
+	if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
+		tasklet_schedule(&cp->cnic_irq_task);
+
+	cnic_chk_pkt_rings(cp);
+
+	return 0;
+}
+
 static void cnic_ulp_stop(struct cnic_dev *dev)
 {
 	struct cnic_local *cp = dev->cnic_priv;
@@ -1197,6 +2414,19 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
 
 		cnic_put(dev);
 		break;
+	case CNIC_CTL_COMPLETION_CMD: {
+		u32 cid = BNX2X_SW_CID(info->data.comp.cid);
+		u32 l5_cid;
+		struct cnic_local *cp = dev->cnic_priv;
+
+		if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
+			struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+
+			ctx->wait_cond = 1;
+			wake_up(&ctx->waitq);
+		}
+		break;
+	}
 	default:
 		return -EINVAL;
 	}
@@ -1872,6 +3102,8 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
 		/* fall through */
 	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
 	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
+	case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
+	case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
 		cp->close_conn(csk, opcode);
 		break;
 
@@ -1957,6 +3189,76 @@ static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
 	return 0;
 }
 
+static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
+	union l5cm_specific_data l5_data;
+	u32 cmd = 0;
+	int close_complete = 0;
+
+	switch (opcode) {
+	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
+	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
+	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
+		if (cnic_ready_to_close(csk, opcode))
+			cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
+		break;
+	case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
+		cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
+		break;
+	case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
+		close_complete = 1;
+		break;
+	}
+	if (cmd) {
+		memset(&l5_data, 0, sizeof(l5_data));
+
+		cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
+				    &l5_data);
+	} else if (close_complete) {
+		ctx->timestamp = jiffies;
+		cnic_close_conn(csk);
+		cnic_cm_upcall(cp, csk, csk->state);
+	}
+}
+
+static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
+{
+}
+
+static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int func = CNIC_FUNC(cp);
+
+	cnic_init_bnx2x_mac(dev);
+	cnic_bnx2x_set_tcp_timestamp(dev, 1);
+
+	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
+		  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(func), 0);
+
+	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
+		XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(func), 1);
+	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
+		XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(func),
+		DEF_MAX_DA_COUNT);
+
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(func), DEF_TTL);
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(func), DEF_TOS);
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(func), 2);
+	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
+		XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(func), DEF_SWS_TIMER);
+
+	CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(func),
+		DEF_MAX_CWND);
+	return 0;
+}
+
 static int cnic_cm_open(struct cnic_dev *dev)
 {
 	struct cnic_local *cp = dev->cnic_priv;
@@ -2091,7 +3393,7 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev)
 
 		cp->bnx2_status_blk = cp->status_blk;
 		cp->last_status_idx = cp->bnx2_status_blk->status_idx;
-		tasklet_init(&cp->cnic_irq_task, &cnic_service_bnx2_msix,
+		tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
 			     (unsigned long) dev);
 		err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0,
 				  "cnic", dev);
@@ -2464,6 +3766,426 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
 	return 0;
 }
 
+static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	u32 start_offset = ethdev->ctx_tbl_offset;
+	int i;
+
+	for (i = 0; i < cp->ctx_blks; i++) {
+		struct cnic_ctx *ctx = &cp->ctx_arr[i];
+		dma_addr_t map = ctx->mapping;
+
+		if (cp->ctx_align) {
+			unsigned long mask = cp->ctx_align - 1;
+
+			map = (map + mask) & ~mask;
+		}
+
+		cnic_ctx_tbl_wr(dev, start_offset + i, map);
+	}
+}
+
+static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	int err = 0;
+
+	tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
+		     (unsigned long) dev);
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+		err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0,
+				  "cnic", dev);
+		if (err)
+			tasklet_disable(&cp->cnic_irq_task);
+	}
+	return err;
+}
+
+static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u8 sb_id = cp->status_blk_num;
+	int port = CNIC_PORT(cp);
+
+	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
+		 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
+					       HC_INDEX_C_ISCSI_EQ_CONS),
+		 64 / 12);
+	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
+		  CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
+						HC_INDEX_C_ISCSI_EQ_CONS), 0);
+}
+
+static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
+{
+}
+
+static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) cp->l2_ring;
+	struct eth_context *context;
+	struct regpair context_addr;
+	dma_addr_t buf_map;
+	int func = CNIC_FUNC(cp);
+	int port = CNIC_PORT(cp);
+	int i;
+	int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
+	u32 val;
+
+	memset(txbd, 0, BCM_PAGE_SIZE);
+
+	buf_map = cp->l2_buf_map;
+	for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
+		struct eth_tx_start_bd *start_bd = &txbd->start_bd;
+		struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
+
+		start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
+		start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
+		reg_bd->addr_hi = start_bd->addr_hi;
+		reg_bd->addr_lo = start_bd->addr_lo + 0x10;
+		start_bd->nbytes = cpu_to_le16(0x10);
+		start_bd->nbd = cpu_to_le16(3);
+		start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
+		start_bd->general_data = (UNICAST_ADDRESS <<
+			ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
+		start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
+
+	}
+	context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 1, &context_addr);
+
+	val = (u64) cp->l2_ring_map >> 32;
+	txbd->next_bd.addr_hi = cpu_to_le32(val);
+
+	context->xstorm_st_context.tx_bd_page_base_hi = val;
+
+	val = (u64) cp->l2_ring_map & 0xffffffff;
+	txbd->next_bd.addr_lo = cpu_to_le32(val);
+
+	context->xstorm_st_context.tx_bd_page_base_lo = val;
+
+	context->cstorm_st_context.sb_index_number =
+		HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS;
+	context->cstorm_st_context.status_block_id = BNX2X_DEF_SB_ID;
+
+	context->xstorm_st_context.statistics_data = (cli |
+				XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
+
+	context->xstorm_ag_context.cdu_reserved =
+		CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID, func),
+					CDU_REGION_NUMBER_XCM_AG,
+					ETH_CONNECTION_TYPE);
+
+	/* reset xstorm per client statistics */
+	val = BAR_XSTRORM_INTMEM +
+	      XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
+	for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++)
+		CNIC_WR(dev, val + i * 4, 0);
+
+	cp->tx_cons_ptr =
+		&cp->bnx2x_def_status_blk->c_def_status_block.index_values[
+			HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS];
+}
+
+static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (cp->l2_ring +
+				BCM_PAGE_SIZE);
+	struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
+				(cp->l2_ring + (2 * BCM_PAGE_SIZE));
+	struct eth_context *context;
+	struct regpair context_addr;
+	int i;
+	int port = CNIC_PORT(cp);
+	int func = CNIC_FUNC(cp);
+	int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
+	u32 val;
+	struct tstorm_eth_client_config tstorm_client = {0};
+
+	for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
+		dma_addr_t buf_map;
+		int n = (i % cp->l2_rx_ring_size) + 1;
+
+		buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size);
+		rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
+		rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
+	}
+	context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 0, &context_addr);
+
+	val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32;
+	rxbd->addr_hi = cpu_to_le32(val);
+
+	context->ustorm_st_context.common.bd_page_base_hi = val;
+
+	val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff;
+	rxbd->addr_lo = cpu_to_le32(val);
+
+	context->ustorm_st_context.common.bd_page_base_lo = val;
+
+	context->ustorm_st_context.common.sb_index_numbers =
+						BNX2X_ISCSI_RX_SB_INDEX_NUM;
+	context->ustorm_st_context.common.clientId = cli;
+	context->ustorm_st_context.common.status_block_id = BNX2X_DEF_SB_ID;
+	context->ustorm_st_context.common.flags =
+		USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS;
+	context->ustorm_st_context.common.statistics_counter_id = cli;
+	context->ustorm_st_context.common.mc_alignment_log_size = 0;
+	context->ustorm_st_context.common.bd_buff_size =
+						cp->l2_single_buf_size;
+
+	context->ustorm_ag_context.cdu_usage =
+		CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID, func),
+					CDU_REGION_NUMBER_UCM_AG,
+					ETH_CONNECTION_TYPE);
+
+	rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
+	val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
+	rxcqe->addr_hi = cpu_to_le32(val);
+
+	CNIC_WR(dev, BAR_USTRORM_INTMEM +
+		USTORM_CQE_PAGE_BASE_OFFSET(port, cli) + 4, val);
+
+	CNIC_WR(dev, BAR_USTRORM_INTMEM +
+		USTORM_CQE_PAGE_NEXT_OFFSET(port, cli) + 4, val);
+
+	val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
+	rxcqe->addr_lo = cpu_to_le32(val);
+
+	CNIC_WR(dev, BAR_USTRORM_INTMEM +
+		USTORM_CQE_PAGE_BASE_OFFSET(port, cli), val);
+
+	CNIC_WR(dev, BAR_USTRORM_INTMEM +
+		USTORM_CQE_PAGE_NEXT_OFFSET(port, cli), val);
+
+	/* client tstorm info */
+	tstorm_client.mtu = cp->l2_single_buf_size - 14;
+	tstorm_client.config_flags =
+			(TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE |
+			TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE);
+	tstorm_client.statistics_counter_id = cli;
+
+	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
+		   TSTORM_CLIENT_CONFIG_OFFSET(port, cli),
+		   ((u32 *)&tstorm_client)[0]);
+	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
+		   TSTORM_CLIENT_CONFIG_OFFSET(port, cli) + 4,
+		   ((u32 *)&tstorm_client)[1]);
+
+	/* reset tstorm per client statistics */
+	val = BAR_TSTRORM_INTMEM +
+	      TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
+	for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++)
+		CNIC_WR(dev, val + i * 4, 0);
+
+	/* reset ustorm per client statistics */
+	val = BAR_USTRORM_INTMEM +
+	      USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
+	for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++)
+		CNIC_WR(dev, val + i * 4, 0);
+
+	cp->rx_cons_ptr =
+		&cp->bnx2x_def_status_blk->u_def_status_block.index_values[
+			HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS];
+}
+
+static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 base, addr, val;
+	int port = CNIC_PORT(cp);
+
+	dev->max_iscsi_conn = 0;
+	base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR);
+	if (base < 0xa0000 || base >= 0xc0000)
+		return;
+
+	addr = BNX2X_SHMEM_ADDR(base,
+		dev_info.port_hw_config[port].iscsi_mac_upper);
+
+	val = CNIC_RD(dev, addr);
+
+	dev->mac_addr[0] = (u8) (val >> 8);
+	dev->mac_addr[1] = (u8) val;
+
+	addr = BNX2X_SHMEM_ADDR(base,
+		dev_info.port_hw_config[port].iscsi_mac_lower);
+
+	val = CNIC_RD(dev, addr);
+
+	dev->mac_addr[2] = (u8) (val >> 24);
+	dev->mac_addr[3] = (u8) (val >> 16);
+	dev->mac_addr[4] = (u8) (val >> 8);
+	dev->mac_addr[5] = (u8) val;
+
+	addr = BNX2X_SHMEM_ADDR(base, validity_map[port]);
+	val = CNIC_RD(dev, addr);
+
+	if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) {
+		u16 val16;
+
+		addr = BNX2X_SHMEM_ADDR(base,
+				drv_lic_key[port].max_iscsi_init_conn);
+		val16 = CNIC_RD16(dev, addr);
+
+		if (val16)
+			val16 ^= 0x1e1e;
+		dev->max_iscsi_conn = val16;
+	}
+	if (BNX2X_CHIP_IS_E1H(cp->chip_id)) {
+		int func = CNIC_FUNC(cp);
+
+		addr = BNX2X_SHMEM_ADDR(base,
+				mf_cfg.func_mf_config[func].e1hov_tag);
+		val = CNIC_RD(dev, addr);
+		val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
+		if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
+			addr = BNX2X_SHMEM_ADDR(base,
+				mf_cfg.func_mf_config[func].config);
+			val = CNIC_RD(dev, addr);
+			val &= FUNC_MF_CFG_PROTOCOL_MASK;
+			if (val != FUNC_MF_CFG_PROTOCOL_ISCSI)
+				dev->max_iscsi_conn = 0;
+		}
+	}
+}
+
+static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int func = CNIC_FUNC(cp), ret, i;
+	int port = CNIC_PORT(cp);
+	u16 eq_idx;
+	u8 sb_id = cp->status_blk_num;
+
+	ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
+			       BNX2X_ISCSI_START_CID);
+
+	if (ret)
+		return -ENOMEM;
+
+	cp->kcq_io_addr = BAR_CSTRORM_INTMEM +
+			  CSTORM_ISCSI_EQ_PROD_OFFSET(func, 0);
+	cp->kcq_prod_idx = 0;
+
+	cnic_get_bnx2x_iscsi_info(dev);
+
+	/* Only 1 EQ */
+	CNIC_WR16(dev, cp->kcq_io_addr, MAX_KCQ_IDX);
+	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
+		CSTORM_ISCSI_EQ_CONS_OFFSET(func, 0), 0);
+	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
+		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0),
+		cp->kcq_info.pg_map_arr[1] & 0xffffffff);
+	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
+		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0) + 4,
+		(u64) cp->kcq_info.pg_map_arr[1] >> 32);
+	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
+		CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0),
+		cp->kcq_info.pg_map_arr[0] & 0xffffffff);
+	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
+		CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0) + 4,
+		(u64) cp->kcq_info.pg_map_arr[0] >> 32);
+	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
+		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(func, 0), 1);
+	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
+		CSTORM_ISCSI_EQ_SB_NUM_OFFSET(func, 0), cp->status_blk_num);
+	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
+		CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(func, 0),
+		HC_INDEX_C_ISCSI_EQ_CONS);
+
+	for (i = 0; i < cp->conn_buf_info.num_pages; i++) {
+		CNIC_WR(dev, BAR_TSTRORM_INTMEM +
+			TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func, i),
+			cp->conn_buf_info.pgtbl[2 * i]);
+		CNIC_WR(dev, BAR_TSTRORM_INTMEM +
+			TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func, i) + 4,
+			cp->conn_buf_info.pgtbl[(2 * i) + 1]);
+	}
+
+	CNIC_WR(dev, BAR_USTRORM_INTMEM +
+		USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func),
+		cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
+	CNIC_WR(dev, BAR_USTRORM_INTMEM +
+		USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func) + 4,
+		(u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
+
+	cnic_setup_bnx2x_context(dev);
+
+	eq_idx = CNIC_RD16(dev, BAR_CSTRORM_INTMEM +
+			   CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) +
+			   offsetof(struct cstorm_status_block_c,
+				    index_values[HC_INDEX_C_ISCSI_EQ_CONS]));
+	if (eq_idx != 0) {
+		printk(KERN_ERR PFX "%s: EQ cons index %x != 0\n",
+		       dev->netdev->name, eq_idx);
+		return -EBUSY;
+	}
+	ret = cnic_init_bnx2x_irq(dev);
+	if (ret)
+		return ret;
+
+	cnic_init_bnx2x_tx_ring(dev);
+	cnic_init_bnx2x_rx_ring(dev);
+
+	return 0;
+}
+
+static void cnic_init_rings(struct cnic_dev *dev)
+{
+	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
+		cnic_init_bnx2_tx_ring(dev);
+		cnic_init_bnx2_rx_ring(dev);
+	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+		struct cnic_local *cp = dev->cnic_priv;
+		u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
+		union l5cm_specific_data l5_data;
+		struct ustorm_eth_rx_producers rx_prods = {0};
+		u32 off, i;
+
+		rx_prods.bd_prod = 0;
+		rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
+		barrier();
+
+		off = BAR_USTRORM_INTMEM +
+			USTORM_RX_PRODS_OFFSET(CNIC_PORT(cp), cli);
+
+		for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
+			CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
+
+		cnic_init_bnx2x_tx_ring(dev);
+		cnic_init_bnx2x_rx_ring(dev);
+
+		l5_data.phy_address.lo = cli;
+		l5_data.phy_address.hi = 0;
+		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
+			BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data);
+		cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 1);
+	}
+}
+
+static void cnic_shutdown_rings(struct cnic_dev *dev)
+{
+	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
+		cnic_shutdown_bnx2_rx_ring(dev);
+	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+		struct cnic_local *cp = dev->cnic_priv;
+		u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
+		union l5cm_specific_data l5_data;
+
+		cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0);
+
+		l5_data.phy_address.lo = cli;
+		l5_data.phy_address.hi = 0;
+		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
+			BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data);
+		msleep(10);
+	}
+}
+
 static int cnic_register_netdev(struct cnic_dev *dev)
 {
 	struct cnic_local *cp = dev->cnic_priv;
@@ -2554,6 +4276,22 @@ static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
 	cnic_free_resc(dev);
 }
 
+
+static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u8 sb_id = cp->status_blk_num;
+	int port = CNIC_PORT(cp);
+
+	cnic_free_irq(dev);
+	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
+		  CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) +
+		  offsetof(struct cstorm_status_block_c,
+			   index_values[HC_INDEX_C_ISCSI_EQ_CONS]),
+		  0);
+	cnic_free_resc(dev);
+}
+
 static void cnic_stop_hw(struct cnic_dev *dev)
 {
 	if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
@@ -2685,6 +4423,57 @@ cnic_err:
 	return NULL;
 }
 
+static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
+{
+	struct pci_dev *pdev;
+	struct cnic_dev *cdev;
+	struct cnic_local *cp;
+	struct cnic_eth_dev *ethdev = NULL;
+	struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
+
+	probe = symbol_get(bnx2x_cnic_probe);
+	if (probe) {
+		ethdev = (*probe)(dev);
+		symbol_put(bnx2x_cnic_probe);
+	}
+	if (!ethdev)
+		return NULL;
+
+	pdev = ethdev->pdev;
+	if (!pdev)
+		return NULL;
+
+	dev_hold(dev);
+	cdev = cnic_alloc_dev(dev, pdev);
+	if (cdev == NULL) {
+		dev_put(dev);
+		return NULL;
+	}
+
+	set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
+	cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
+
+	cp = cdev->cnic_priv;
+	cp->ethdev = ethdev;
+	cdev->pcidev = pdev;
+
+	cp->cnic_ops = &cnic_bnx2x_ops;
+	cp->start_hw = cnic_start_bnx2x_hw;
+	cp->stop_hw = cnic_stop_bnx2x_hw;
+	cp->setup_pgtbl = cnic_setup_page_tbl_le;
+	cp->alloc_resc = cnic_alloc_bnx2x_resc;
+	cp->free_resc = cnic_free_resc;
+	cp->start_cm = cnic_cm_init_bnx2x_hw;
+	cp->stop_cm = cnic_cm_stop_bnx2x_hw;
+	cp->enable_int = cnic_enable_bnx2x_int;
+	cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
+	cp->ack_int = cnic_ack_bnx2x_msix;
+	cp->close_conn = cnic_close_bnx2x_conn;
+	cp->next_idx = cnic_bnx2x_next_idx;
+	cp->hw_idx = cnic_bnx2x_hw_idx;
+	return cdev;
+}
+
 static struct cnic_dev *is_cnic_dev(struct net_device *dev)
 {
 	struct ethtool_drvinfo drvinfo;
@@ -2696,6 +4485,8 @@ static struct cnic_dev *is_cnic_dev(struct net_device *dev)
 
 		if (!strcmp(drvinfo.driver, "bnx2"))
 			cdev = init_bnx2_cnic(dev);
+		if (!strcmp(drvinfo.driver, "bnx2x"))
+			cdev = init_bnx2x_cnic(dev);
 		if (cdev) {
 			write_lock(&cnic_dev_lock);
 			list_add(&cdev->list, &cnic_dev_list);
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index a94b302bb464..241d09acc0d4 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -227,6 +227,7 @@ struct cnic_local {
 	void				*status_blk;
 	struct status_block_msix	*bnx2_status_blk;
 	struct host_status_block	*bnx2x_status_blk;
+	struct host_def_status_block	*bnx2x_def_status_blk;
 
 	u32				status_blk_num;
 	u32				int_num;
@@ -258,6 +259,7 @@ struct cnic_local {
 	struct cnic_ctx		*ctx_arr;
 	int			ctx_blks;
 	int			ctx_blk_size;
+	unsigned long		ctx_align;
 	int			cids_per_blk;
 
 	u32			chip_id;
@@ -290,11 +292,73 @@ struct bnx2x_bd_chain_next {
 	u8	reserved[8];
 };
 
+#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T 	(1)
+
 #define ISCSI_RAMROD_CMD_ID_UPDATE_CONN		(ISCSI_KCQE_OPCODE_UPDATE_CONN)
 #define ISCSI_RAMROD_CMD_ID_INIT		(ISCSI_KCQE_OPCODE_INIT)
 
 #define CDU_REGION_NUMBER_XCM_AG 2
 #define CDU_REGION_NUMBER_UCM_AG 4
 
+#define CDU_VALID_DATA(_cid, _region, _type)	\
+	(((_cid) << 8) | (((_region)&0xf)<<4) | (((_type)&0xf)))
+
+#define CDU_CRC8(_cid, _region, _type)	\
+	(calc_crc8(CDU_VALID_DATA(_cid, _region, _type), 0xff))
+
+#define CDU_RSRVD_VALUE_TYPE_A(_cid, _region, _type)	\
+	(0x80 | ((CDU_CRC8(_cid, _region, _type)) & 0x7f))
+
+#define BNX2X_CONTEXT_MEM_SIZE		1024
+#define BNX2X_FCOE_CID			16
+
+/* iSCSI client IDs are 17, 19, 21, 23 */
+#define BNX2X_ISCSI_BASE_CL_ID		17
+#define BNX2X_ISCSI_CL_ID(vn)		(BNX2X_ISCSI_BASE_CL_ID + ((vn) << 1))
+
+#define BNX2X_ISCSI_L2_CID		17
+#define BNX2X_ISCSI_START_CID		18
+#define BNX2X_ISCSI_NUM_CONNECTIONS	128
+#define BNX2X_ISCSI_TASK_CONTEXT_SIZE	128
+#define BNX2X_ISCSI_MAX_PENDING_R2TS	4
+#define BNX2X_ISCSI_R2TQE_SIZE		8
+#define BNX2X_ISCSI_HQ_BD_SIZE		64
+#define BNX2X_ISCSI_CONN_BUF_SIZE	64
+#define BNX2X_ISCSI_GLB_BUF_SIZE	64
+#define BNX2X_ISCSI_PBL_NOT_CACHED	0xff
+#define BNX2X_ISCSI_PDU_HEADER_NOT_CACHED	0xff
+#define BNX2X_HW_CID(x, func)		((x) | (((func) % PORT_MAX) << 23) | \
+					 (((func) >> 1) << 17))
+#define BNX2X_SW_CID(x)			(x & 0x1ffff)
+#define BNX2X_CHIP_NUM_57711		0x164f
+#define BNX2X_CHIP_NUM_57711E		0x1650
+#define BNX2X_CHIP_NUM(x)		(x >> 16)
+#define BNX2X_CHIP_IS_57711(x)		\
+	(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711)
+#define BNX2X_CHIP_IS_57711E(x)		\
+	(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711E)
+#define BNX2X_CHIP_IS_E1H(x)		\
+	(BNX2X_CHIP_IS_57711(x) || BNX2X_CHIP_IS_57711E(x))
+#define IS_E1H_OFFSET       		BNX2X_CHIP_IS_E1H(cp->chip_id)
+
+#define BNX2X_RX_DESC_CNT		(BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
+#define BNX2X_MAX_RX_DESC_CNT		(BNX2X_RX_DESC_CNT - 2)
+#define BNX2X_RCQ_DESC_CNT		(BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
+#define BNX2X_MAX_RCQ_DESC_CNT		(BNX2X_RCQ_DESC_CNT - 1)
+
+#define BNX2X_DEF_SB_ID			16
+
+#define BNX2X_ISCSI_RX_SB_INDEX_NUM					\
+		((HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS << \
+		  USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT) & \
+		 USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER)
+
+#define BNX2X_SHMEM_ADDR(base, field)	(base + \
+					 offsetof(struct shmem_region, field))
+
+#define CNIC_PORT(cp)			((cp)->func % PORT_MAX)
+#define CNIC_FUNC(cp)			((cp)->func)
+#define CNIC_E1HVN(cp)			((cp)->func >> 1)
+
 #endif
 
diff --git a/drivers/net/cnic_defs.h b/drivers/net/cnic_defs.h
index cee80f694457..9827b278dc7c 100644
--- a/drivers/net/cnic_defs.h
+++ b/drivers/net/cnic_defs.h
@@ -51,6 +51,9 @@
 #define L4_KCQE_COMPLETION_STATUS_SUCCESS		    (0)
 #define L4_KCQE_COMPLETION_STATUS_TIMEOUT        (0x93)
 
+#define L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL (0x83)
+#define L4_KCQE_COMPLETION_STATUS_OFFLOADED_PG   (0x89)
+
 #define L4_LAYER_CODE (4)
 #define L2_LAYER_CODE (2)
 
@@ -577,4 +580,1918 @@ struct l4_kwq_upload {
 	u32 reserved2[6];
 };
 
+/*
+ * bnx2x structures
+ */
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct ustorm_iscsi_rq_db {
+	struct regpair pbl_base;
+	struct regpair curr_pbe;
+};
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct ustorm_iscsi_r2tq_db {
+	struct regpair pbl_base;
+	struct regpair curr_pbe;
+};
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct ustorm_iscsi_cq_db {
+#if defined(__BIG_ENDIAN)
+	u16 cq_sn;
+	u16 prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 prod;
+	u16 cq_sn;
+#endif
+	struct regpair curr_pbe;
+};
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct rings_db {
+	struct ustorm_iscsi_rq_db rq;
+	struct ustorm_iscsi_r2tq_db r2tq;
+	struct ustorm_iscsi_cq_db cq[8];
+#if defined(__BIG_ENDIAN)
+	u16 rq_prod;
+	u16 r2tq_prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 r2tq_prod;
+	u16 rq_prod;
+#endif
+	struct regpair cq_pbl_base;
+};
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct ustorm_iscsi_placement_db {
+	u32 sgl_base_lo;
+	u32 sgl_base_hi;
+	u32 local_sge_0_address_hi;
+	u32 local_sge_0_address_lo;
+#if defined(__BIG_ENDIAN)
+	u16 curr_sge_offset;
+	u16 local_sge_0_size;
+#elif defined(__LITTLE_ENDIAN)
+	u16 local_sge_0_size;
+	u16 curr_sge_offset;
+#endif
+	u32 local_sge_1_address_hi;
+	u32 local_sge_1_address_lo;
+#if defined(__BIG_ENDIAN)
+	u16 reserved6;
+	u16 local_sge_1_size;
+#elif defined(__LITTLE_ENDIAN)
+	u16 local_sge_1_size;
+	u16 reserved6;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 sgl_size;
+	u8 local_sge_index_2b;
+	u16 reserved7;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved7;
+	u8 local_sge_index_2b;
+	u8 sgl_size;
+#endif
+	u32 rem_pdu;
+	u32 place_db_bitfield_1;
+#define USTORM_ISCSI_PLACEMENT_DB_REM_PDU_PAYLOAD (0xFFFFFF<<0)
+#define USTORM_ISCSI_PLACEMENT_DB_REM_PDU_PAYLOAD_SHIFT 0
+#define USTORM_ISCSI_PLACEMENT_DB_CQ_ID (0xFF<<24)
+#define USTORM_ISCSI_PLACEMENT_DB_CQ_ID_SHIFT 24
+	u32 place_db_bitfield_2;
+#define USTORM_ISCSI_PLACEMENT_DB_BYTES_2_TRUNCATE (0xFFFFFF<<0)
+#define USTORM_ISCSI_PLACEMENT_DB_BYTES_2_TRUNCATE_SHIFT 0
+#define USTORM_ISCSI_PLACEMENT_DB_HOST_SGE_INDEX (0xFF<<24)
+#define USTORM_ISCSI_PLACEMENT_DB_HOST_SGE_INDEX_SHIFT 24
+	u32 nal;
+#define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE (0xFFFFFF<<0)
+#define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE_SHIFT 0
+#define USTORM_ISCSI_PLACEMENT_DB_EXP_PADDING_2B (0x3<<24)
+#define USTORM_ISCSI_PLACEMENT_DB_EXP_PADDING_2B_SHIFT 24
+#define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B (0x7<<26)
+#define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B_SHIFT 26
+#define USTORM_ISCSI_PLACEMENT_DB_NAL_LEN_3B (0x7<<29)
+#define USTORM_ISCSI_PLACEMENT_DB_NAL_LEN_3B_SHIFT 29
+};
+
+/*
+ * Ustorm iSCSI Storm Context
+ */
+struct ustorm_iscsi_st_context {
+	u32 exp_stat_sn;
+	u32 exp_data_sn;
+	struct rings_db ring;
+	struct regpair task_pbl_base;
+	struct regpair tce_phy_addr;
+	struct ustorm_iscsi_placement_db place_db;
+	u32 data_rcv_seq;
+	u32 rem_rcv_len;
+#if defined(__BIG_ENDIAN)
+	u16 hdr_itt;
+	u16 iscsi_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+	u16 iscsi_conn_id;
+	u16 hdr_itt;
+#endif
+	u32 nal_bytes;
+#if defined(__BIG_ENDIAN)
+	u8 hdr_second_byte_union;
+	u8 bitfield_0;
+#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU (0x1<<0)
+#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0
+#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1)
+#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1
+#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x3F<<2)
+#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 2
+	u8 task_pdu_cache_index;
+	u8 task_pbe_cache_index;
+#elif defined(__LITTLE_ENDIAN)
+	u8 task_pbe_cache_index;
+	u8 task_pdu_cache_index;
+	u8 bitfield_0;
+#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU (0x1<<0)
+#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0
+#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1)
+#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1
+#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x3F<<2)
+#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 2
+	u8 hdr_second_byte_union;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserved3;
+	u8 reserved2;
+	u8 acDecrement;
+#elif defined(__LITTLE_ENDIAN)
+	u8 acDecrement;
+	u8 reserved2;
+	u16 reserved3;
+#endif
+	u32 task_stat;
+#if defined(__BIG_ENDIAN)
+	u8 hdr_opcode;
+	u8 num_cqs;
+	u16 reserved5;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved5;
+	u8 num_cqs;
+	u8 hdr_opcode;
+#endif
+	u32 negotiated_rx;
+#define USTORM_ISCSI_ST_CONTEXT_MAX_RECV_PDU_LENGTH (0xFFFFFF<<0)
+#define USTORM_ISCSI_ST_CONTEXT_MAX_RECV_PDU_LENGTH_SHIFT 0
+#define USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS (0xFF<<24)
+#define USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT 24
+	u32 negotiated_rx_and_flags;
+#define USTORM_ISCSI_ST_CONTEXT_MAX_BURST_LENGTH (0xFFFFFF<<0)
+#define USTORM_ISCSI_ST_CONTEXT_MAX_BURST_LENGTH_SHIFT 0
+#define USTORM_ISCSI_ST_CONTEXT_B_CQE_POSTED_OR_HEADER_CACHED (0x1<<24)
+#define USTORM_ISCSI_ST_CONTEXT_B_CQE_POSTED_OR_HEADER_CACHED_SHIFT 24
+#define USTORM_ISCSI_ST_CONTEXT_B_HDR_DIGEST_EN (0x1<<25)
+#define USTORM_ISCSI_ST_CONTEXT_B_HDR_DIGEST_EN_SHIFT 25
+#define USTORM_ISCSI_ST_CONTEXT_B_DATA_DIGEST_EN (0x1<<26)
+#define USTORM_ISCSI_ST_CONTEXT_B_DATA_DIGEST_EN_SHIFT 26
+#define USTORM_ISCSI_ST_CONTEXT_B_PROTOCOL_ERROR (0x1<<27)
+#define USTORM_ISCSI_ST_CONTEXT_B_PROTOCOL_ERROR_SHIFT 27
+#define USTORM_ISCSI_ST_CONTEXT_B_TASK_VALID (0x1<<28)
+#define USTORM_ISCSI_ST_CONTEXT_B_TASK_VALID_SHIFT 28
+#define USTORM_ISCSI_ST_CONTEXT_TASK_TYPE (0x3<<29)
+#define USTORM_ISCSI_ST_CONTEXT_TASK_TYPE_SHIFT 29
+#define USTORM_ISCSI_ST_CONTEXT_B_ALL_DATA_ACKED (0x1<<31)
+#define USTORM_ISCSI_ST_CONTEXT_B_ALL_DATA_ACKED_SHIFT 31
+};
+
+/*
+ * TCP context region, shared in TOE, RDMA and ISCSI
+ */
+struct tstorm_tcp_st_context_section {
+	u32 flags1;
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_20B (0xFFFFFF<<0)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_20B_SHIFT 0
+#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID (0x1<<24)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID_SHIFT 24
+#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS (0x1<<25)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS_SHIFT 25
+#define TSTORM_TCP_ST_CONTEXT_SECTION_ISLE_EXISTS (0x1<<26)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_ISLE_EXISTS_SHIFT 26
+#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD (0x1<<27)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD_SHIFT 27
+#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED (0x1<<28)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED_SHIFT 28
+#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE (0x1<<29)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE_SHIFT 29
+#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN (0x1<<30)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN_SHIFT 30
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED3 (0x1<<31)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED3_SHIFT 31
+	u32 flags2;
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_20B (0xFFFFFF<<0)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_20B_SHIFT 0
+#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN (0x1<<24)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN_SHIFT 24
+#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN (0x1<<25)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN_SHIFT 25
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_KA_PROBE_SENT (0x1<<26)
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_KA_PROBE_SENT_SHIFT 26
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_PERSIST_PROBE_SENT (0x1<<27)
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_PERSIST_PROBE_SENT_SHIFT 27
+#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<28)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 28
+#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<29)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 29
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_SECOND_ISLE_DROPPED (0x1<<30)
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_SECOND_ISLE_DROPPED_SHIFT 30
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_DONT_SUPPORT_OOO (0x1<<31)
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_DONT_SUPPORT_OOO_SHIFT 31
+#if defined(__BIG_ENDIAN)
+	u16 reserved_slowpath;
+	u8 tcp_sm_state_3b;
+	u8 rto_exp_3b;
+#elif defined(__LITTLE_ENDIAN)
+	u8 rto_exp_3b;
+	u8 tcp_sm_state_3b;
+	u16 reserved_slowpath;
+#endif
+	u32 rcv_nxt;
+	u32 timestamp_recent;
+	u32 timestamp_recent_time;
+	u32 cwnd;
+	u32 ss_thresh;
+	u32 cwnd_accum;
+	u32 prev_seg_seq;
+	u32 expected_rel_seq;
+	u32 recover;
+#if defined(__BIG_ENDIAN)
+	u8 retransmit_count;
+	u8 ka_max_probe_count;
+	u8 persist_probe_count;
+	u8 ka_probe_count;
+#elif defined(__LITTLE_ENDIAN)
+	u8 ka_probe_count;
+	u8 persist_probe_count;
+	u8 ka_max_probe_count;
+	u8 retransmit_count;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 statistics_counter_id;
+	u8 ooo_support_mode;
+	u8 snd_wnd_scale_4b;
+	u8 dup_ack_count;
+#elif defined(__LITTLE_ENDIAN)
+	u8 dup_ack_count;
+	u8 snd_wnd_scale_4b;
+	u8 ooo_support_mode;
+	u8 statistics_counter_id;
+#endif
+	u32 retransmit_start_time;
+	u32 ka_timeout;
+	u32 ka_interval;
+	u32 isle_start_seq;
+	u32 isle_end_seq;
+#if defined(__BIG_ENDIAN)
+	u16 mss;
+	u16 recent_seg_wnd;
+#elif defined(__LITTLE_ENDIAN)
+	u16 recent_seg_wnd;
+	u16 mss;
+#endif
+	u32 reserved4;
+	u32 max_rt_time;
+#if defined(__BIG_ENDIAN)
+	u16 lsb_mac_address;
+	u16 vlan_id;
+#elif defined(__LITTLE_ENDIAN)
+	u16 vlan_id;
+	u16 lsb_mac_address;
+#endif
+	u32 msb_mac_address;
+	u32 reserved2;
+};
+
+/*
+ * Termination variables
+ */
+struct iscsi_term_vars {
+	u8 BitMap;
+#define ISCSI_TERM_VARS_TCP_STATE (0xF<<0)
+#define ISCSI_TERM_VARS_TCP_STATE_SHIFT 0
+#define ISCSI_TERM_VARS_FIN_RECEIVED_SBIT (0x1<<4)
+#define ISCSI_TERM_VARS_FIN_RECEIVED_SBIT_SHIFT 4
+#define ISCSI_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT (0x1<<5)
+#define ISCSI_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT_SHIFT 5
+#define ISCSI_TERM_VARS_TERM_ON_CHIP (0x1<<6)
+#define ISCSI_TERM_VARS_TERM_ON_CHIP_SHIFT 6
+#define ISCSI_TERM_VARS_RSRV (0x1<<7)
+#define ISCSI_TERM_VARS_RSRV_SHIFT 7
+};
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct tstorm_iscsi_st_context_section {
+#if defined(__BIG_ENDIAN)
+	u16 rem_tcp_data_len;
+	u16 brb_offset;
+#elif defined(__LITTLE_ENDIAN)
+	u16 brb_offset;
+	u16 rem_tcp_data_len;
+#endif
+	u32 b2nh;
+#if defined(__BIG_ENDIAN)
+	u16 rq_cons;
+	u8 flags;
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN (0x1<<0)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN_SHIFT 0
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN (0x1<<1)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN_SHIFT 1
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER (0x1<<2)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER_SHIFT 2
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE (0x1<<3)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_FLAGS_RSRV (0x7<<5)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_FLAGS_RSRV_SHIFT 5
+	u8 hdr_bytes_2_fetch;
+#elif defined(__LITTLE_ENDIAN)
+	u8 hdr_bytes_2_fetch;
+	u8 flags;
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN (0x1<<0)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN_SHIFT 0
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN (0x1<<1)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN_SHIFT 1
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER (0x1<<2)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER_SHIFT 2
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE (0x1<<3)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_FLAGS_RSRV (0x7<<5)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_FLAGS_RSRV_SHIFT 5
+	u16 rq_cons;
+#endif
+	struct regpair rq_db_phy_addr;
+#if defined(__BIG_ENDIAN)
+	struct iscsi_term_vars term_vars;
+	u8 scratchpad_idx;
+	u16 iscsi_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+	u16 iscsi_conn_id;
+	u8 scratchpad_idx;
+	struct iscsi_term_vars term_vars;
+#endif
+	u32 reserved2;
+};
+
+/*
+ * The iSCSI non-aggregative context of Tstorm
+ */
+struct tstorm_iscsi_st_context {
+	struct tstorm_tcp_st_context_section tcp;
+	struct tstorm_iscsi_st_context_section iscsi;
+};
+
+/*
+ * The tcp aggregative context section of Xstorm
+ */
+struct xstorm_tcp_tcp_ag_context_section {
+#if defined(__BIG_ENDIAN)
+	u8 __tcp_agg_vars1;
+	u8 __da_cnt;
+	u16 mss;
+#elif defined(__LITTLE_ENDIAN)
+	u16 mss;
+	u8 __da_cnt;
+	u8 __tcp_agg_vars1;
+#endif
+	u32 snd_nxt;
+	u32 tx_wnd;
+	u32 snd_una;
+	u32 local_adv_wnd;
+#if defined(__BIG_ENDIAN)
+	u8 __agg_val8_th;
+	u8 __agg_val8;
+	u16 tcp_agg_vars2;
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_SHIFT 0
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED (0x1<<1)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED_SHIFT 1
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE (0x1<<2)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE_SHIFT 2
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE (0x1<<5)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_CF_EN (0x1<<7)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_CF_EN_SHIFT 7
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF (0x3<<14)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 tcp_agg_vars2;
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_SHIFT 0
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED (0x1<<1)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED_SHIFT 1
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE (0x1<<2)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE_SHIFT 2
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE (0x1<<5)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_CF_EN (0x1<<7)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_CF_EN_SHIFT 7
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF (0x3<<14)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF_SHIFT 14
+	u8 __agg_val8;
+	u8 __agg_val8_th;
+#endif
+	u32 ack_to_far_end;
+	u32 rto_timer;
+	u32 ka_timer;
+	u32 ts_to_echo;
+#if defined(__BIG_ENDIAN)
+	u16 __agg_val7_th;
+	u16 __agg_val7;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __agg_val7;
+	u16 __agg_val7_th;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 __tcp_agg_vars5;
+	u8 __tcp_agg_vars4;
+	u8 __tcp_agg_vars3;
+	u8 __force_pure_ack_cnt;
+#elif defined(__LITTLE_ENDIAN)
+	u8 __force_pure_ack_cnt;
+	u8 __tcp_agg_vars3;
+	u8 __tcp_agg_vars4;
+	u8 __tcp_agg_vars5;
+#endif
+	u32 tcp_agg_vars6;
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN (0x1<<0)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN_SHIFT 0
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF_EN (0x1<<1)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF_EN_SHIFT 1
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN (0x1<<2)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN_SHIFT 2
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<3)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 3
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX6_FLAG (0x1<<4)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX6_FLAG_SHIFT 4
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX7_FLAG (0x1<<5)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX7_FLAG_SHIFT 5
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX5_CF (0x3<<6)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX5_CF_SHIFT 6
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF (0x3<<8)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_SHIFT 8
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF (0x3<<10)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_SHIFT 10
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF (0x3<<12)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_SHIFT 12
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF (0x3<<14)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_SHIFT 14
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX13_CF (0x3<<16)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX13_CF_SHIFT 16
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX14_CF (0x3<<18)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX14_CF_SHIFT 18
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX15_CF (0x3<<20)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX15_CF_SHIFT 20
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX16_CF (0x3<<22)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX16_CF_SHIFT 22
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX17_CF (0x3<<24)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX17_CF_SHIFT 24
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ECE_FLAG (0x1<<26)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ECE_FLAG_SHIFT 26
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED71 (0x1<<27)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED71_SHIFT 27
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_FORCE_PURE_ACK_CNT_DIRTY (0x1<<28)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_FORCE_PURE_ACK_CNT_DIRTY_SHIFT 28
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TCP_AUTO_STOP_FLAG (0x1<<29)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TCP_AUTO_STOP_FLAG_SHIFT 29
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DO_TS_UPDATE_FLAG (0x1<<30)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DO_TS_UPDATE_FLAG_SHIFT 30
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CANCEL_RETRANSMIT_FLAG (0x1<<31)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CANCEL_RETRANSMIT_FLAG_SHIFT 31
+#if defined(__BIG_ENDIAN)
+	u16 __agg_misc6;
+	u16 __tcp_agg_vars7;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __tcp_agg_vars7;
+	u16 __agg_misc6;
+#endif
+	u32 __agg_val10;
+	u32 __agg_val10_th;
+#if defined(__BIG_ENDIAN)
+	u16 __reserved3;
+	u8 __reserved2;
+	u8 __da_only_cnt;
+#elif defined(__LITTLE_ENDIAN)
+	u8 __da_only_cnt;
+	u8 __reserved2;
+	u16 __reserved3;
+#endif
+};
+
+/*
+ * The iscsi aggregative context of Xstorm
+ */
+struct xstorm_iscsi_ag_context {
+#if defined(__BIG_ENDIAN)
+	u16 agg_val1;
+	u8 agg_vars1;
+#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
+	u8 state;
+#elif defined(__LITTLE_ENDIAN)
+	u8 state;
+	u8 agg_vars1;
+#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
+	u16 agg_val1;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 cdu_reserved;
+	u8 agg_vars4;
+#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF (0x3<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF (0x3<<2)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_SHIFT 2
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN (0x1<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN_SHIFT 4
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN (0x1<<5)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN_SHIFT 5
+#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN (0x1<<6)
+#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN_SHIFT 6
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN (0x1<<7)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN_SHIFT 7
+	u8 agg_vars3;
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF (0x3<<6)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_SHIFT 6
+	u8 agg_vars2;
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+	u8 agg_vars2;
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+	u8 agg_vars3;
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF (0x3<<6)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_SHIFT 6
+	u8 agg_vars4;
+#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF (0x3<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF (0x3<<2)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_SHIFT 2
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN (0x1<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN_SHIFT 4
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN (0x1<<5)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN_SHIFT 5
+#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN (0x1<<6)
+#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN_SHIFT 6
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN (0x1<<7)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN_SHIFT 7
+	u8 cdu_reserved;
+#endif
+	u32 more_to_send;
+#if defined(__BIG_ENDIAN)
+	u16 agg_vars5;
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14
+	u16 sq_cons;
+#elif defined(__LITTLE_ENDIAN)
+	u16 sq_cons;
+	u16 agg_vars5;
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14
+#endif
+	struct xstorm_tcp_tcp_ag_context_section tcp;
+#if defined(__BIG_ENDIAN)
+	u16 agg_vars7;
+#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF (0x3<<4)
+#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
+#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG (0x1<<15)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG_SHIFT 15
+	u8 agg_val3_th;
+	u8 agg_vars6;
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6
+#elif defined(__LITTLE_ENDIAN)
+	u8 agg_vars6;
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6
+	u8 agg_val3_th;
+	u16 agg_vars7;
+#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF (0x3<<4)
+#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
+#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG (0x1<<15)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG_SHIFT 15
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_val11_th;
+	u16 __agg_val11;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __agg_val11;
+	u16 __agg_val11_th;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 __reserved1;
+	u8 __agg_val6_th;
+	u16 __agg_val9;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __agg_val9;
+	u8 __agg_val6_th;
+	u8 __reserved1;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 hq_prod;
+	u16 hq_cons;
+#elif defined(__LITTLE_ENDIAN)
+	u16 hq_cons;
+	u16 hq_prod;
+#endif
+	u32 agg_vars8;
+#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3 (0xFF<<24)
+#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3_SHIFT 24
+#if defined(__BIG_ENDIAN)
+	u16 r2tq_prod;
+	u16 sq_prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 sq_prod;
+	u16 r2tq_prod;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 agg_val3;
+	u8 agg_val6;
+	u8 agg_val5_th;
+	u8 agg_val5;
+#elif defined(__LITTLE_ENDIAN)
+	u8 agg_val5;
+	u8 agg_val5_th;
+	u8 agg_val6;
+	u8 agg_val3;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_misc1;
+	u16 agg_limit1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_limit1;
+	u16 __agg_misc1;
+#endif
+	u32 hq_cons_tcp_seq;
+	u32 exp_stat_sn;
+	u32 agg_misc5;
+};
+
+/*
+ * The tcp aggregative context section of Tstorm
+ */
+struct tstorm_tcp_tcp_ag_context_section {
+	u32 __agg_val1;
+#if defined(__BIG_ENDIAN)
+	u8 __tcp_agg_vars2;
+	u8 __agg_val3;
+	u16 __agg_val2;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __agg_val2;
+	u8 __agg_val3;
+	u8 __tcp_agg_vars2;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_val5;
+	u8 __agg_val6;
+	u8 __tcp_agg_vars3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 __tcp_agg_vars3;
+	u8 __agg_val6;
+	u16 __agg_val5;
+#endif
+	u32 snd_nxt;
+	u32 rtt_seq;
+	u32 rtt_time;
+	u32 __reserved66;
+	u32 wnd_right_edge;
+	u32 tcp_agg_vars1;
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN (0x1<<9)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN_SHIFT 9
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19)
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20)
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21)
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22)
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28
+	u32 snd_max;
+	u32 snd_una;
+	u32 __reserved2;
+};
+
+/*
+ * The iscsi aggregative context of Tstorm
+ */
+struct tstorm_iscsi_ag_context {
+#if defined(__BIG_ENDIAN)
+	u16 ulp_credit;
+	u8 agg_vars1;
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG (0x1<<7)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG_SHIFT 7
+	u8 state;
+#elif defined(__LITTLE_ENDIAN)
+	u8 state;
+	u8 agg_vars1;
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG (0x1<<7)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG_SHIFT 7
+	u16 ulp_credit;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_val4;
+	u16 agg_vars2;
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG (0x1<<0)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG_SHIFT 0
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG (0x1<<1)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG_SHIFT 1
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF (0x3<<2)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_SHIFT 2
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
+#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<11)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 11
+#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN (0x1<<12)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN_SHIFT 12
+#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN (0x1<<13)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN_SHIFT 13
+#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
+#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_vars2;
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG (0x1<<0)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG_SHIFT 0
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG (0x1<<1)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG_SHIFT 1
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF (0x3<<2)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_SHIFT 2
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
+#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<11)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 11
+#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN (0x1<<12)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN_SHIFT 12
+#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN (0x1<<13)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN_SHIFT 13
+#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
+#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
+	u16 __agg_val4;
+#endif
+	struct tstorm_tcp_tcp_ag_context_section tcp;
+};
+
+/*
+ * The iscsi aggregative context of Cstorm
+ */
+struct cstorm_iscsi_ag_context {
+	u32 agg_vars1;
+#define CSTORM_ISCSI_AG_CONTEXT_STATE (0xFF<<0)
+#define CSTORM_ISCSI_AG_CONTEXT_STATE_SHIFT 0
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<8)
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 8
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<9)
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 9
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<10)
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 10
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<11)
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 11
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN (0x1<<12)
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN_SHIFT 12
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN (0x1<<13)
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN_SHIFT 13
+#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF (0x3<<14)
+#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_SHIFT 14
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66 (0x3<<16)
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66_SHIFT 16
+#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN (0x1<<18)
+#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN_SHIFT 18
+#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN (0x1<<19)
+#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN_SHIFT 19
+#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN (0x1<<20)
+#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN_SHIFT 20
+#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN (0x1<<21)
+#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN_SHIFT 21
+#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN (0x1<<22)
+#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN_SHIFT 22
+#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE (0x7<<23)
+#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE_SHIFT 23
+#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE (0x3<<26)
+#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE_SHIFT 26
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52 (0x3<<28)
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52_SHIFT 28
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53 (0x3<<30)
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53_SHIFT 30
+#if defined(__BIG_ENDIAN)
+	u8 __aux1_th;
+	u8 __aux1_val;
+	u16 __agg_vars2;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __agg_vars2;
+	u8 __aux1_val;
+	u8 __aux1_th;
+#endif
+	u32 rel_seq;
+	u32 rel_seq_th;
+#if defined(__BIG_ENDIAN)
+	u16 hq_cons;
+	u16 hq_prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 hq_prod;
+	u16 hq_cons;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 __reserved62;
+	u8 __reserved61;
+	u8 __reserved60;
+	u8 __reserved59;
+#elif defined(__LITTLE_ENDIAN)
+	u8 __reserved59;
+	u8 __reserved60;
+	u8 __reserved61;
+	u8 __reserved62;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __reserved64;
+	u16 __cq_u_prod0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __cq_u_prod0;
+	u16 __reserved64;
+#endif
+	u32 __cq_u_prod1;
+#if defined(__BIG_ENDIAN)
+	u16 __agg_vars3;
+	u16 __cq_u_prod2;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __cq_u_prod2;
+	u16 __agg_vars3;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __aux2_th;
+	u16 __cq_u_prod3;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __cq_u_prod3;
+	u16 __aux2_th;
+#endif
+};
+
+/*
+ * The iscsi aggregative context of Ustorm
+ */
+struct ustorm_iscsi_ag_context {
+#if defined(__BIG_ENDIAN)
+	u8 __aux_counter_flags;
+	u8 agg_vars2;
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0)
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2)
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2
+#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
+	u8 agg_vars1;
+#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4)
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6)
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6
+	u8 state;
+#elif defined(__LITTLE_ENDIAN)
+	u8 state;
+	u8 agg_vars1;
+#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4)
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6)
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6
+	u8 agg_vars2;
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0)
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2)
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2
+#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
+	u8 __aux_counter_flags;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 cdu_usage;
+	u8 agg_misc2;
+	u16 __cq_local_comp_itt_val;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __cq_local_comp_itt_val;
+	u8 agg_misc2;
+	u8 cdu_usage;
+#endif
+	u32 agg_misc4;
+#if defined(__BIG_ENDIAN)
+	u8 agg_val3_th;
+	u8 agg_val3;
+	u16 agg_misc3;
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_misc3;
+	u8 agg_val3;
+	u8 agg_val3_th;
+#endif
+	u32 agg_val1;
+	u32 agg_misc4_th;
+#if defined(__BIG_ENDIAN)
+	u16 agg_val2_th;
+	u16 agg_val2;
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_val2;
+	u16 agg_val2_th;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __reserved2;
+	u8 decision_rules;
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
+#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
+	u8 decision_rule_enable_bits;
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0)
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2)
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
+#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4)
+#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4
+#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5)
+#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5
+#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
+#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
+#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+	u8 decision_rule_enable_bits;
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0)
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2)
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
+#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4)
+#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4
+#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5)
+#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5
+#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
+#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
+#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+	u8 decision_rules;
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
+#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
+	u16 __reserved2;
+#endif
+};
+
+/*
+ * Timers connection context
+ */
+struct iscsi_timers_block_context {
+	u32 __reserved_0;
+	u32 __reserved_1;
+	u32 __reserved_2;
+	u32 flags;
+#define __ISCSI_TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0)
+#define __ISCSI_TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0
+#define ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2)
+#define ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2
+#define __ISCSI_TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3)
+#define __ISCSI_TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3
+};
+
+/*
+ * Ethernet context section, shared in TOE, RDMA and ISCSI
+ */
+struct xstorm_eth_context_section {
+#if defined(__BIG_ENDIAN)
+	u8 remote_addr_4;
+	u8 remote_addr_5;
+	u8 local_addr_0;
+	u8 local_addr_1;
+#elif defined(__LITTLE_ENDIAN)
+	u8 local_addr_1;
+	u8 local_addr_0;
+	u8 remote_addr_5;
+	u8 remote_addr_4;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 remote_addr_0;
+	u8 remote_addr_1;
+	u8 remote_addr_2;
+	u8 remote_addr_3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 remote_addr_3;
+	u8 remote_addr_2;
+	u8 remote_addr_1;
+	u8 remote_addr_0;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserved_vlan_type;
+	u16 params;
+#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
+#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
+#define XSTORM_ETH_CONTEXT_SECTION_CFI (0x1<<12)
+#define XSTORM_ETH_CONTEXT_SECTION_CFI_SHIFT 12
+#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
+#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
+#elif defined(__LITTLE_ENDIAN)
+	u16 params;
+#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
+#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
+#define XSTORM_ETH_CONTEXT_SECTION_CFI (0x1<<12)
+#define XSTORM_ETH_CONTEXT_SECTION_CFI_SHIFT 12
+#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
+#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
+	u16 reserved_vlan_type;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 local_addr_2;
+	u8 local_addr_3;
+	u8 local_addr_4;
+	u8 local_addr_5;
+#elif defined(__LITTLE_ENDIAN)
+	u8 local_addr_5;
+	u8 local_addr_4;
+	u8 local_addr_3;
+	u8 local_addr_2;
+#endif
+};
+
+/*
+ * IpV4 context section, shared in TOE, RDMA and ISCSI
+ */
+struct xstorm_ip_v4_context_section {
+#if defined(__BIG_ENDIAN)
+	u16 __pbf_hdr_cmd_rsvd_id;
+	u16 __pbf_hdr_cmd_rsvd_flags_offset;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __pbf_hdr_cmd_rsvd_flags_offset;
+	u16 __pbf_hdr_cmd_rsvd_id;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 __pbf_hdr_cmd_rsvd_ver_ihl;
+	u8 tos;
+	u16 __pbf_hdr_cmd_rsvd_length;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __pbf_hdr_cmd_rsvd_length;
+	u8 tos;
+	u8 __pbf_hdr_cmd_rsvd_ver_ihl;
+#endif
+	u32 ip_local_addr;
+#if defined(__BIG_ENDIAN)
+	u8 ttl;
+	u8 __pbf_hdr_cmd_rsvd_protocol;
+	u16 __pbf_hdr_cmd_rsvd_csum;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __pbf_hdr_cmd_rsvd_csum;
+	u8 __pbf_hdr_cmd_rsvd_protocol;
+	u8 ttl;
+#endif
+	u32 __pbf_hdr_cmd_rsvd_1;
+	u32 ip_remote_addr;
+};
+
+/*
+ * context section, shared in TOE, RDMA and ISCSI
+ */
+struct xstorm_padded_ip_v4_context_section {
+	struct xstorm_ip_v4_context_section ip_v4;
+	u32 reserved1[4];
+};
+
+/*
+ * IpV6 context section, shared in TOE, RDMA and ISCSI
+ */
+struct xstorm_ip_v6_context_section {
+#if defined(__BIG_ENDIAN)
+	u16 pbf_hdr_cmd_rsvd_payload_len;
+	u8 pbf_hdr_cmd_rsvd_nxt_hdr;
+	u8 hop_limit;
+#elif defined(__LITTLE_ENDIAN)
+	u8 hop_limit;
+	u8 pbf_hdr_cmd_rsvd_nxt_hdr;
+	u16 pbf_hdr_cmd_rsvd_payload_len;
+#endif
+	u32 priority_flow_label;
+#define XSTORM_IP_V6_CONTEXT_SECTION_FLOW_LABEL (0xFFFFF<<0)
+#define XSTORM_IP_V6_CONTEXT_SECTION_FLOW_LABEL_SHIFT 0
+#define XSTORM_IP_V6_CONTEXT_SECTION_TRAFFIC_CLASS (0xFF<<20)
+#define XSTORM_IP_V6_CONTEXT_SECTION_TRAFFIC_CLASS_SHIFT 20
+#define XSTORM_IP_V6_CONTEXT_SECTION_PBF_HDR_CMD_RSVD_VER (0xF<<28)
+#define XSTORM_IP_V6_CONTEXT_SECTION_PBF_HDR_CMD_RSVD_VER_SHIFT 28
+	u32 ip_local_addr_lo_hi;
+	u32 ip_local_addr_lo_lo;
+	u32 ip_local_addr_hi_hi;
+	u32 ip_local_addr_hi_lo;
+	u32 ip_remote_addr_lo_hi;
+	u32 ip_remote_addr_lo_lo;
+	u32 ip_remote_addr_hi_hi;
+	u32 ip_remote_addr_hi_lo;
+};
+
+union xstorm_ip_context_section_types {
+	struct xstorm_padded_ip_v4_context_section padded_ip_v4;
+	struct xstorm_ip_v6_context_section ip_v6;
+};
+
+/*
+ * TCP context section, shared in TOE, RDMA and ISCSI
+ */
+struct xstorm_tcp_context_section {
+	u32 snd_max;
+#if defined(__BIG_ENDIAN)
+	u16 remote_port;
+	u16 local_port;
+#elif defined(__LITTLE_ENDIAN)
+	u16 local_port;
+	u16 remote_port;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 original_nagle_1b;
+	u8 ts_enabled_1b;
+	u16 tcp_params;
+#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE (0xFF<<0)
+#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE_SHIFT 0
+#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT (0x1<<8)
+#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT_SHIFT 8
+#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED (0x1<<9)
+#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9
+#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10)
+#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10
+#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE (0x1<<11)
+#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE_SHIFT 11
+#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12)
+#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12
+#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13)
+#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED_SHIFT 13
+#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER (0x3<<14)
+#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 tcp_params;
+#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE (0xFF<<0)
+#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE_SHIFT 0
+#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT (0x1<<8)
+#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT_SHIFT 8
+#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED (0x1<<9)
+#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9
+#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10)
+#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10
+#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE (0x1<<11)
+#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE_SHIFT 11
+#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12)
+#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12
+#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13)
+#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED_SHIFT 13
+#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER (0x3<<14)
+#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER_SHIFT 14
+	u8 ts_enabled_1b;
+	u8 original_nagle_1b;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 pseudo_csum;
+	u16 window_scaling_factor;
+#elif defined(__LITTLE_ENDIAN)
+	u16 window_scaling_factor;
+	u16 pseudo_csum;
+#endif
+	u32 reserved2;
+	u32 ts_time_diff;
+	u32 __next_timer_expir;
+};
+
+/*
+ * Common context section, shared in TOE, RDMA and ISCSI
+ */
+struct xstorm_common_context_section {
+	struct xstorm_eth_context_section ethernet;
+	union xstorm_ip_context_section_types ip_union;
+	struct xstorm_tcp_context_section tcp;
+#if defined(__BIG_ENDIAN)
+	u16 reserved;
+	u8 statistics_params;
+#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0)
+#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0
+#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1)
+#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
+#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID (0x1F<<2)
+#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID_SHIFT 2
+#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0 (0x1<<7)
+#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0_SHIFT 7
+	u8 ip_version_1b;
+#elif defined(__LITTLE_ENDIAN)
+	u8 ip_version_1b;
+	u8 statistics_params;
+#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0)
+#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0
+#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1)
+#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
+#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID (0x1F<<2)
+#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID_SHIFT 2
+#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0 (0x1<<7)
+#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0_SHIFT 7
+	u16 reserved;
+#endif
+};
+
+/*
+ * Flags used in ISCSI context section
+ */
+struct xstorm_iscsi_context_flags {
+	u8 flags;
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA (0x1<<0)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA_SHIFT 0
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T (0x1<<1)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T_SHIFT 1
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_HEADER_DIGEST (0x1<<2)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_HEADER_DIGEST_SHIFT 2
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_DATA_DIGEST (0x1<<3)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_DATA_DIGEST_SHIFT 3
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_HQ_BD_WRITTEN (0x1<<4)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_HQ_BD_WRITTEN_SHIFT 4
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_LAST_OP_SQ (0x1<<5)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_LAST_OP_SQ_SHIFT 5
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_UPDATE_SND_NXT (0x1<<6)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_UPDATE_SND_NXT_SHIFT 6
+#define XSTORM_ISCSI_CONTEXT_FLAGS_RESERVED4 (0x1<<7)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_RESERVED4_SHIFT 7
+};
+
+struct iscsi_task_context_entry_x {
+	u32 data_out_buffer_offset;
+	u32 itt;
+	u32 data_sn;
+};
+
+struct iscsi_task_context_entry_xuc_x_write_only {
+	u32 tx_r2t_sn;
+};
+
+struct iscsi_task_context_entry_xuc_xu_write_both {
+	u32 sgl_base_lo;
+	u32 sgl_base_hi;
+#if defined(__BIG_ENDIAN)
+	u8 sgl_size;
+	u8 sge_index;
+	u16 sge_offset;
+#elif defined(__LITTLE_ENDIAN)
+	u16 sge_offset;
+	u8 sge_index;
+	u8 sgl_size;
+#endif
+};
+
+/*
+ * iSCSI context section
+ */
+struct xstorm_iscsi_context_section {
+	u32 first_burst_length;
+	u32 max_send_pdu_length;
+	struct regpair sq_pbl_base;
+	struct regpair sq_curr_pbe;
+	struct regpair hq_pbl_base;
+	struct regpair hq_curr_pbe_base;
+	struct regpair r2tq_pbl_base;
+	struct regpair r2tq_curr_pbe_base;
+	struct regpair task_pbl_base;
+#if defined(__BIG_ENDIAN)
+	u16 data_out_count;
+	struct xstorm_iscsi_context_flags flags;
+	u8 task_pbl_cache_idx;
+#elif defined(__LITTLE_ENDIAN)
+	u8 task_pbl_cache_idx;
+	struct xstorm_iscsi_context_flags flags;
+	u16 data_out_count;
+#endif
+	u32 seq_more_2_send;
+	u32 pdu_more_2_send;
+	struct iscsi_task_context_entry_x temp_tce_x;
+	struct iscsi_task_context_entry_xuc_x_write_only temp_tce_x_wr;
+	struct iscsi_task_context_entry_xuc_xu_write_both temp_tce_xu_wr;
+	struct regpair lun;
+	u32 exp_data_transfer_len_ttt;
+	u32 pdu_data_2_rxmit;
+	u32 rxmit_bytes_2_dr;
+#if defined(__BIG_ENDIAN)
+	u16 rxmit_sge_offset;
+	u16 hq_rxmit_cons;
+#elif defined(__LITTLE_ENDIAN)
+	u16 hq_rxmit_cons;
+	u16 rxmit_sge_offset;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 r2tq_cons;
+	u8 rxmit_flags;
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD (0x1<<0)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD_SHIFT 0
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR (0x1<<1)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR_SHIFT 1
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU (0x1<<2)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU_SHIFT 2
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR (0x1<<3)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR_SHIFT 3
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR (0x1<<4)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR_SHIFT 4
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING (0x3<<5)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING_SHIFT 5
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT (0x1<<7)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT_SHIFT 7
+	u8 rxmit_sge_idx;
+#elif defined(__LITTLE_ENDIAN)
+	u8 rxmit_sge_idx;
+	u8 rxmit_flags;
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD (0x1<<0)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD_SHIFT 0
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR (0x1<<1)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR_SHIFT 1
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU (0x1<<2)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU_SHIFT 2
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR (0x1<<3)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR_SHIFT 3
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR (0x1<<4)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR_SHIFT 4
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING (0x3<<5)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING_SHIFT 5
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT (0x1<<7)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT_SHIFT 7
+	u16 r2tq_cons;
+#endif
+	u32 hq_rxmit_tcp_seq;
+};
+
+/*
+ * Xstorm iSCSI Storm Context
+ */
+struct xstorm_iscsi_st_context {
+	struct xstorm_common_context_section common;
+	struct xstorm_iscsi_context_section iscsi;
+};
+
+/*
+ * CQ DB CQ producer and pending completion counter
+ */
+struct iscsi_cq_db_prod_pnd_cmpltn_cnt {
+#if defined(__BIG_ENDIAN)
+	u16 cntr;
+	u16 prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 prod;
+	u16 cntr;
+#endif
+};
+
+/*
+ * CQ DB pending completion ITT array
+ */
+struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr {
+	struct iscsi_cq_db_prod_pnd_cmpltn_cnt prod_pend_comp[8];
+};
+
+/*
+ * Cstorm CQ sequence to notify array, updated by driver
+ */
+struct iscsi_cq_db_sqn_2_notify_arr {
+	u16 sqn[8];
+};
+
+/*
+ * Cstorm iSCSI Storm Context
+ */
+struct cstorm_iscsi_st_context {
+	struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr cq_c_prod_pend_comp_ctr_arr;
+	struct iscsi_cq_db_sqn_2_notify_arr cq_c_prod_sqn_arr;
+	struct iscsi_cq_db_sqn_2_notify_arr cq_c_sqn_2_notify_arr;
+	struct regpair hq_pbl_base;
+	struct regpair hq_curr_pbe;
+	struct regpair task_pbl_base;
+	struct regpair cq_db_base;
+#if defined(__BIG_ENDIAN)
+	u16 hq_bd_itt;
+	u16 iscsi_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+	u16 iscsi_conn_id;
+	u16 hq_bd_itt;
+#endif
+	u32 hq_bd_data_segment_len;
+	u32 hq_bd_buffer_offset;
+#if defined(__BIG_ENDIAN)
+	u8 timer_entry_idx;
+	u8 cq_proc_en_bit_map;
+	u8 cq_pend_comp_itt_valid_bit_map;
+	u8 hq_bd_opcode;
+#elif defined(__LITTLE_ENDIAN)
+	u8 hq_bd_opcode;
+	u8 cq_pend_comp_itt_valid_bit_map;
+	u8 cq_proc_en_bit_map;
+	u8 timer_entry_idx;
+#endif
+	u32 hq_tcp_seq;
+#if defined(__BIG_ENDIAN)
+	u16 flags;
+#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0)
+#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0
+#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1)
+#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4
+#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5)
+#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5
+	u16 hq_cons;
+#elif defined(__LITTLE_ENDIAN)
+	u16 hq_cons;
+	u16 flags;
+#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0)
+#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0
+#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1)
+#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4
+#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5)
+#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5
+#endif
+	struct regpair rsrv1;
+};
+
+/*
+ * Iscsi connection context
+ */
+struct iscsi_context {
+	struct ustorm_iscsi_st_context ustorm_st_context;
+	struct tstorm_iscsi_st_context tstorm_st_context;
+	struct xstorm_iscsi_ag_context xstorm_ag_context;
+	struct tstorm_iscsi_ag_context tstorm_ag_context;
+	struct cstorm_iscsi_ag_context cstorm_ag_context;
+	struct ustorm_iscsi_ag_context ustorm_ag_context;
+	struct iscsi_timers_block_context timers_context;
+	struct regpair upb_context;
+	struct xstorm_iscsi_st_context xstorm_st_context;
+	struct regpair xpb_context;
+	struct cstorm_iscsi_st_context cstorm_st_context;
+};
+
+/*
+ * Buffer per connection, used in Tstorm
+ */
+struct iscsi_conn_buf {
+	struct regpair reserved[8];
+};
+
+/*
+ * ipv6 structure
+ */
+struct ip_v6_addr {
+	u32 ip_addr_lo_lo;
+	u32 ip_addr_lo_hi;
+	u32 ip_addr_hi_lo;
+	u32 ip_addr_hi_hi;
+};
+
+/*
+ * l5cm- connection identification params
+ */
+struct l5cm_conn_addr_params {
+	u32 pmtu;
+#if defined(__BIG_ENDIAN)
+	u8 remote_addr_3;
+	u8 remote_addr_2;
+	u8 remote_addr_1;
+	u8 remote_addr_0;
+#elif defined(__LITTLE_ENDIAN)
+	u8 remote_addr_0;
+	u8 remote_addr_1;
+	u8 remote_addr_2;
+	u8 remote_addr_3;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 params;
+#define L5CM_CONN_ADDR_PARAMS_IP_VERSION (0x1<<0)
+#define L5CM_CONN_ADDR_PARAMS_IP_VERSION_SHIFT 0
+#define L5CM_CONN_ADDR_PARAMS_RSRV (0x7FFF<<1)
+#define L5CM_CONN_ADDR_PARAMS_RSRV_SHIFT 1
+	u8 remote_addr_5;
+	u8 remote_addr_4;
+#elif defined(__LITTLE_ENDIAN)
+	u8 remote_addr_4;
+	u8 remote_addr_5;
+	u16 params;
+#define L5CM_CONN_ADDR_PARAMS_IP_VERSION (0x1<<0)
+#define L5CM_CONN_ADDR_PARAMS_IP_VERSION_SHIFT 0
+#define L5CM_CONN_ADDR_PARAMS_RSRV (0x7FFF<<1)
+#define L5CM_CONN_ADDR_PARAMS_RSRV_SHIFT 1
+#endif
+	struct ip_v6_addr local_ip_addr;
+	struct ip_v6_addr remote_ip_addr;
+	u32 ipv6_flow_label_20b;
+	u32 reserved1;
+#if defined(__BIG_ENDIAN)
+	u16 remote_tcp_port;
+	u16 local_tcp_port;
+#elif defined(__LITTLE_ENDIAN)
+	u16 local_tcp_port;
+	u16 remote_tcp_port;
+#endif
+};
+
+/*
+ * l5cm-xstorm connection buffer
+ */
+struct l5cm_xstorm_conn_buffer {
+#if defined(__BIG_ENDIAN)
+	u16 rsrv1;
+	u16 params;
+#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE (0x1<<0)
+#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE_SHIFT 0
+#define L5CM_XSTORM_CONN_BUFFER_RSRV (0x7FFF<<1)
+#define L5CM_XSTORM_CONN_BUFFER_RSRV_SHIFT 1
+#elif defined(__LITTLE_ENDIAN)
+	u16 params;
+#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE (0x1<<0)
+#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE_SHIFT 0
+#define L5CM_XSTORM_CONN_BUFFER_RSRV (0x7FFF<<1)
+#define L5CM_XSTORM_CONN_BUFFER_RSRV_SHIFT 1
+	u16 rsrv1;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 mss;
+	u16 pseudo_header_checksum;
+#elif defined(__LITTLE_ENDIAN)
+	u16 pseudo_header_checksum;
+	u16 mss;
+#endif
+	u32 rcv_buf;
+	u32 rsrv2;
+	struct regpair context_addr;
+};
+
+/*
+ * l5cm-tstorm connection buffer
+ */
+struct l5cm_tstorm_conn_buffer {
+	u32 snd_buf;
+	u32 rcv_buf;
+#if defined(__BIG_ENDIAN)
+	u16 params;
+#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE (0x1<<0)
+#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE_SHIFT 0
+#define L5CM_TSTORM_CONN_BUFFER_RSRV (0x7FFF<<1)
+#define L5CM_TSTORM_CONN_BUFFER_RSRV_SHIFT 1
+	u8 ka_max_probe_count;
+	u8 ka_enable;
+#elif defined(__LITTLE_ENDIAN)
+	u8 ka_enable;
+	u8 ka_max_probe_count;
+	u16 params;
+#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE (0x1<<0)
+#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE_SHIFT 0
+#define L5CM_TSTORM_CONN_BUFFER_RSRV (0x7FFF<<1)
+#define L5CM_TSTORM_CONN_BUFFER_RSRV_SHIFT 1
+#endif
+	u32 ka_timeout;
+	u32 ka_interval;
+	u32 max_rt_time;
+};
+
+/*
+ * l5cm connection buffer for active side
+ */
+struct l5cm_active_conn_buffer {
+	struct l5cm_conn_addr_params conn_addr_buf;
+	struct l5cm_xstorm_conn_buffer xstorm_conn_buffer;
+	struct l5cm_tstorm_conn_buffer tstorm_conn_buffer;
+};
+
+/*
+ * l5cm slow path element
+ */
+struct l5cm_packet_size {
+	u32 size;
+	u32 rsrv;
+};
+
+/*
+ * l5cm connection parameters
+ */
+union l5cm_reduce_param_union {
+	u32 passive_side_scramble_key;
+	u32 pcs_id;
+};
+
+/*
+ * l5cm connection parameters
+ */
+struct l5cm_reduce_conn {
+	union l5cm_reduce_param_union param;
+	u32 isn;
+};
+
+/*
+ * l5cm slow path element
+ */
+union l5cm_specific_data {
+	u8 protocol_data[8];
+	struct regpair phy_address;
+	struct l5cm_packet_size packet_size;
+	struct l5cm_reduce_conn reduced_conn;
+};
+
+/*
+ * l5 slow path element
+ */
+struct l5cm_spe {
+	struct spe_hdr hdr;
+	union l5cm_specific_data data;
+};
+
+/*
+ * Tstorm Tcp flags
+ */
+struct tstorm_l5cm_tcp_flags {
+	u16 flags;
+#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID (0xFFF<<0)
+#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID_SHIFT 0
+#define TSTORM_L5CM_TCP_FLAGS_RSRV0 (0x1<<12)
+#define TSTORM_L5CM_TCP_FLAGS_RSRV0_SHIFT 12
+#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<13)
+#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 13
+#define TSTORM_L5CM_TCP_FLAGS_RSRV1 (0x3<<14)
+#define TSTORM_L5CM_TCP_FLAGS_RSRV1_SHIFT 14
+};
+
+/*
+ * Xstorm Tcp flags
+ */
+struct xstorm_l5cm_tcp_flags {
+	u8 flags;
+#define XSTORM_L5CM_TCP_FLAGS_ENC_ENABLED (0x1<<0)
+#define XSTORM_L5CM_TCP_FLAGS_ENC_ENABLED_SHIFT 0
+#define XSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<1)
+#define XSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 1
+#define XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN (0x1<<2)
+#define XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN_SHIFT 2
+#define XSTORM_L5CM_TCP_FLAGS_RSRV (0x1F<<3)
+#define XSTORM_L5CM_TCP_FLAGS_RSRV_SHIFT 3
+};
+
 #endif /* CNIC_DEFS_H */
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index d8b09efdcb52..8aaf98bdd4f7 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
 #ifndef CNIC_IF_H
 #define CNIC_IF_H
 
-#define CNIC_MODULE_VERSION	"2.0.1"
-#define CNIC_MODULE_RELDATE	"Oct 01, 2009"
+#define CNIC_MODULE_VERSION	"2.1.0"
+#define CNIC_MODULE_RELDATE	"Oct 10, 2009"
 
 #define CNIC_ULP_RDMA		0
 #define CNIC_ULP_ISCSI		1
@@ -81,6 +81,8 @@ struct kcqe {
 #define DRV_CTL_CTX_WR_CMD		0x103
 #define DRV_CTL_CTXTBL_WR_CMD		0x104
 #define DRV_CTL_COMPLETION_CMD		0x105
+#define DRV_CTL_START_L2_CMD		0x106
+#define DRV_CTL_STOP_L2_CMD		0x107
 
 struct cnic_ctl_completion {
 	u32	cid;
@@ -105,11 +107,17 @@ struct drv_ctl_io {
 	dma_addr_t	dma_addr;
 };
 
+struct drv_ctl_l2_ring {
+	u32		client_id;
+	u32		cid;
+};
+
 struct drv_ctl_info {
 	int	cmd;
 	union {
 		struct drv_ctl_completion comp;
 		struct drv_ctl_io io;
+		struct drv_ctl_l2_ring ring;
 		char bytes[MAX_DRV_CTL_DATA];
 	} data;
 };
@@ -143,6 +151,7 @@ struct cnic_eth_dev {
 	u32		max_kwqe_pending;
 	struct pci_dev	*pdev;
 	void __iomem	*io_base;
+	void __iomem	*io_base2;
 
 	u32		ctx_tbl_offset;
 	u32		ctx_tbl_len;
@@ -298,5 +307,6 @@ extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
 extern int cnic_unregister_driver(int ulp_type);
 
 extern struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev);
+extern struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
 
 #endif
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 61f9da2b4943..678222389407 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -380,9 +380,8 @@ static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
 		return NULL;
 	}
 
-	skb = netdev_alloc_skb(priv->dev, CPMAC_SKB_SIZE);
+	skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE);
 	if (likely(skb)) {
-		skb_reserve(skb, 2);
 		skb_put(desc->skb, desc->datalen);
 		desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
 		desc->skb->ip_summed = CHECKSUM_NONE;
@@ -991,12 +990,11 @@ static int cpmac_open(struct net_device *dev)
 
 	priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
 	for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) {
-		skb = netdev_alloc_skb(dev, CPMAC_SKB_SIZE);
+		skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE);
 		if (unlikely(!skb)) {
 			res = -ENOMEM;
 			goto fail_desc;
 		}
-		skb_reserve(skb, 2);
 		desc->skb = skb;
 		desc->data_mapping = dma_map_single(&dev->dev, skb->data,
 						    CPMAC_SKB_SIZE,
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index 0c54219960e2..af9321617ce4 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -1323,7 +1323,7 @@ net_open(struct net_device *dev)
 		writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON);
 #endif
 		write_irq(dev, lp->chip_type, dev->irq);
-		ret = request_irq(dev->irq, &net_interrupt, 0, dev->name, dev);
+		ret = request_irq(dev->irq, net_interrupt, 0, dev->name, dev);
 		if (ret) {
 			if (net_debug)
 				printk(KERN_DEBUG "cs89x0: request_irq(%d) failed\n", dev->irq);
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 2b1aea6aa558..3e8618b4efbc 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -48,12 +48,27 @@
 struct vlan_group;
 struct adapter;
 struct sge_qset;
+struct port_info;
 
 enum {			/* rx_offload flags */
 	T3_RX_CSUM	= 1 << 0,
 	T3_LRO		= 1 << 1,
 };
 
+enum mac_idx_types {
+	LAN_MAC_IDX	= 0,
+	SAN_MAC_IDX,
+
+	MAX_MAC_IDX
+};
+
+struct iscsi_config {
+	__u8	mac_addr[ETH_ALEN];
+	__u32	flags;
+	int (*send)(struct port_info *pi, struct sk_buff **skb);
+	int (*recv)(struct port_info *pi, struct sk_buff *skb);
+};
+
 struct port_info {
 	struct adapter *adapter;
 	struct vlan_group *vlan_grp;
@@ -68,6 +83,7 @@ struct port_info {
 	struct net_device_stats netstats;
 	int activity;
 	__be32 iscsi_ipv4addr;
+	struct iscsi_config iscsic;
 
 	int link_fault; /* link fault was detected */
 };
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index 1b2c305fb82b..6ff356d4c7ab 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -125,11 +125,9 @@ enum {				/* adapter interrupt-maintained statistics */
 	IRQ_NUM_STATS		/* keep last */
 };
 
-enum {
-	TP_VERSION_MAJOR	= 1,
-	TP_VERSION_MINOR	= 1,
-	TP_VERSION_MICRO	= 0
-};
+#define TP_VERSION_MAJOR	1
+#define TP_VERSION_MINOR	1
+#define TP_VERSION_MICRO	0
 
 #define S_TP_VERSION_MAJOR		16
 #define M_TP_VERSION_MAJOR		0xFF
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 34e776c5f06b..cef3f882e2b6 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -44,6 +44,7 @@
 #include <linux/rtnetlink.h>
 #include <linux/firmware.h>
 #include <linux/log2.h>
+#include <linux/stringify.h>
 #include <asm/uaccess.h>
 
 #include "common.h"
@@ -344,8 +345,10 @@ static void link_start(struct net_device *dev)
 
 	init_rx_mode(&rm, dev, dev->mc_list);
 	t3_mac_reset(mac);
+	t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
 	t3_mac_set_mtu(mac, dev->mtu);
-	t3_mac_set_address(mac, 0, dev->dev_addr);
+	t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
+	t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
 	t3_mac_set_rx_mode(mac, &rm);
 	t3_link_start(&pi->phy, mac, &pi->link_config);
 	t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
@@ -903,6 +906,7 @@ static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
 static int write_smt_entry(struct adapter *adapter, int idx)
 {
 	struct cpl_smt_write_req *req;
+	struct port_info *pi = netdev_priv(adapter->port[idx]);
 	struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 
 	if (!skb)
@@ -913,8 +917,8 @@ static int write_smt_entry(struct adapter *adapter, int idx)
 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
 	req->mtu_idx = NMTUS - 1;	/* should be 0 but there's a T3 bug */
 	req->iff = idx;
-	memset(req->src_mac1, 0, sizeof(req->src_mac1));
 	memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
+	memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
 	skb->priority = 1;
 	offload_tx(&adapter->tdev, skb);
 	return 0;
@@ -989,11 +993,21 @@ static int bind_qsets(struct adapter *adap)
 	return err;
 }
 
-#define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
-#define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
+#define FW_VERSION __stringify(FW_VERSION_MAJOR) "."			\
+	__stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
+#define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
+#define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "."		\
+	__stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
+#define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
 #define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
 #define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
 #define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
+MODULE_FIRMWARE(FW_FNAME);
+MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
+MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
+MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
+MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
+MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
 
 static inline const char *get_edc_fw_name(int edc_idx)
 {
@@ -1064,16 +1078,13 @@ int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
 static int upgrade_fw(struct adapter *adap)
 {
 	int ret;
-	char buf[64];
 	const struct firmware *fw;
 	struct device *dev = &adap->pdev->dev;
 
-	snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
-		 FW_VERSION_MINOR, FW_VERSION_MICRO);
-	ret = request_firmware(&fw, buf, dev);
+	ret = request_firmware(&fw, FW_FNAME, dev);
 	if (ret < 0) {
 		dev_err(dev, "could not upgrade firmware: unable to load %s\n",
-			buf);
+			FW_FNAME);
 		return ret;
 	}
 	ret = t3_load_fw(adap, fw->data, fw->size);
@@ -1117,8 +1128,7 @@ static int update_tpsram(struct adapter *adap)
 	if (!rev)
 		return 0;
 
-	snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
-		 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
+	snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
 
 	ret = request_firmware(&tpsram, buf, dev);
 	if (ret < 0) {
@@ -2107,19 +2117,19 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
 		if (t.qset_idx >= SGE_QSETS)
 			return -EINVAL;
 		if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
-			!in_range(t.cong_thres, 0, 255) ||
-			!in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
-				MAX_TXQ_ENTRIES) ||
-			!in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
-				MAX_TXQ_ENTRIES) ||
-			!in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
-				MAX_CTRL_TXQ_ENTRIES) ||
-			!in_range(t.fl_size[0], MIN_FL_ENTRIES,
-				MAX_RX_BUFFERS)
-			|| !in_range(t.fl_size[1], MIN_FL_ENTRIES,
-					MAX_RX_JUMBO_BUFFERS)
-			|| !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
-					MAX_RSPQ_ENTRIES))
+		    !in_range(t.cong_thres, 0, 255) ||
+		    !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
+			      MAX_TXQ_ENTRIES) ||
+		    !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
+			      MAX_TXQ_ENTRIES) ||
+		    !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
+			      MAX_CTRL_TXQ_ENTRIES) ||
+		    !in_range(t.fl_size[0], MIN_FL_ENTRIES,
+			      MAX_RX_BUFFERS) ||
+		    !in_range(t.fl_size[1], MIN_FL_ENTRIES,
+			      MAX_RX_JUMBO_BUFFERS) ||
+		    !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
+			      MAX_RSPQ_ENTRIES))
 			return -EINVAL;
 
 		if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
@@ -2516,7 +2526,7 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
 		return -EINVAL;
 
 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
-	t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
+	t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
 	if (offload_running(adapter))
 		write_smt_entry(adapter, pi->port_id);
 	return 0;
@@ -2654,7 +2664,7 @@ static void check_t3b2_mac(struct adapter *adapter)
 			struct cmac *mac = &p->mac;
 
 			t3_mac_set_mtu(mac, dev->mtu);
-			t3_mac_set_address(mac, 0, dev->dev_addr);
+			t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
 			cxgb_set_rxmode(dev);
 			t3_link_start(&p->phy, mac, &p->link_config);
 			t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
@@ -3112,6 +3122,14 @@ static const struct net_device_ops cxgb_netdev_ops = {
 #endif
 };
 
+static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev)
+{
+	struct port_info *pi = netdev_priv(dev);
+
+	memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
+	pi->iscsic.mac_addr[3] |= 0x80;
+}
+
 static int __devinit init_one(struct pci_dev *pdev,
 			      const struct pci_device_id *ent)
 {
@@ -3270,6 +3288,9 @@ static int __devinit init_one(struct pci_dev *pdev,
 		goto out_free_dev;
 	}
 
+	for_each_port(adapter, i)
+		cxgb3_init_iscsi_mac(adapter->port[i]);
+
 	/* Driver's ready. Reflect it on LEDs */
 	t3_led_ready(adapter);
 
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 6366061712f4..49f3de79118c 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -1260,7 +1260,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 		if (should_restart_tx(q) &&
 		    test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
 			q->restarts++;
-			netif_tx_wake_queue(txq);
+			netif_tx_start_queue(txq);
 		}
 	}
 
@@ -1946,10 +1946,9 @@ static void restart_tx(struct sge_qset *qs)
  *	Check if the ARP request is probing the private IP address
  *	dedicated to iSCSI, generate an ARP reply if so.
  */
-static void cxgb3_arp_process(struct adapter *adapter, struct sk_buff *skb)
+static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb)
 {
 	struct net_device *dev = skb->dev;
-	struct port_info *pi;
 	struct arphdr *arp;
 	unsigned char *arp_ptr;
 	unsigned char *sha;
@@ -1972,12 +1971,11 @@ static void cxgb3_arp_process(struct adapter *adapter, struct sk_buff *skb)
 	arp_ptr += dev->addr_len;
 	memcpy(&tip, arp_ptr, sizeof(tip));
 
-	pi = netdev_priv(dev);
 	if (tip != pi->iscsi_ipv4addr)
 		return;
 
 	arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
-		 dev->dev_addr, sha);
+		 pi->iscsic.mac_addr, sha);
 
 }
 
@@ -1986,6 +1984,19 @@ static inline int is_arp(struct sk_buff *skb)
 	return skb->protocol == htons(ETH_P_ARP);
 }
 
+static void cxgb3_process_iscsi_prov_pack(struct port_info *pi,
+					struct sk_buff *skb)
+{
+	if (is_arp(skb)) {
+		cxgb3_arp_process(pi, skb);
+		return;
+	}
+
+	if (pi->iscsic.recv)
+		pi->iscsic.recv(pi, skb);
+
+}
+
 /**
  *	rx_eth - process an ingress ethernet packet
  *	@adap: the adapter
@@ -2024,13 +2035,12 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
 				vlan_gro_receive(&qs->napi, grp,
 						 ntohs(p->vlan), skb);
 			else {
-				if (unlikely(pi->iscsi_ipv4addr &&
-				    is_arp(skb))) {
+				if (unlikely(pi->iscsic.flags)) {
 					unsigned short vtag = ntohs(p->vlan) &
 								VLAN_VID_MASK;
 					skb->dev = vlan_group_get_device(grp,
 									 vtag);
-					cxgb3_arp_process(adap, skb);
+					cxgb3_process_iscsi_prov_pack(pi, skb);
 				}
 				__vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
 					  	  rq->polling);
@@ -2041,8 +2051,8 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
 		if (lro)
 			napi_gro_receive(&qs->napi, skb);
 		else {
-			if (unlikely(pi->iscsi_ipv4addr && is_arp(skb)))
-				cxgb3_arp_process(adap, skb);
+			if (unlikely(pi->iscsic.flags))
+				cxgb3_process_iscsi_prov_pack(pi, skb);
 			netif_receive_skb(skb);
 		}
 	} else
@@ -2125,6 +2135,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
 	if (!complete)
 		return;
 
+	skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
 	skb->ip_summed = CHECKSUM_UNNECESSARY;
 	cpl = qs->lro_va;
 
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index e3478314c002..8edac8915ea8 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -2803,11 +2803,33 @@ static int __devexit davinci_emac_remove(struct platform_device *pdev)
 	return 0;
 }
 
+static
+int davinci_emac_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct net_device *dev = platform_get_drvdata(pdev);
+
+	if (netif_running(dev))
+		emac_dev_stop(dev);
+
+	clk_disable(emac_clk);
+
+	return 0;
+}
+
+static int davinci_emac_resume(struct platform_device *pdev)
+{
+	struct net_device *dev = platform_get_drvdata(pdev);
+
+	clk_enable(emac_clk);
+
+	if (netif_running(dev))
+		emac_dev_open(dev);
+
+	return 0;
+}
+
 /**
  * davinci_emac_driver: EMAC platform driver structure
- *
- * We implement only probe and remove functions - suspend/resume and
- * others not supported by this module
  */
 static struct platform_driver davinci_emac_driver = {
 	.driver = {
@@ -2816,6 +2838,8 @@ static struct platform_driver davinci_emac_driver = {
 	},
 	.probe = davinci_emac_probe,
 	.remove = __devexit_p(davinci_emac_remove),
+	.suspend = davinci_emac_suspend,
+	.resume = davinci_emac_resume,
 };
 
 /**
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index a31696a3928e..be9590253aa1 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -801,14 +801,14 @@ static int lance_open(struct net_device *dev)
 	netif_start_queue(dev);
 
 	/* Associate IRQ with lance_interrupt */
-	if (request_irq(dev->irq, &lance_interrupt, 0, "lance", dev)) {
+	if (request_irq(dev->irq, lance_interrupt, 0, "lance", dev)) {
 		printk("%s: Can't get IRQ %d\n", dev->name, dev->irq);
 		return -EAGAIN;
 	}
 	if (lp->dma_irq >= 0) {
 		unsigned long flags;
 
-		if (request_irq(lp->dma_irq, &lance_dma_merr_int, 0,
+		if (request_irq(lp->dma_irq, lance_dma_merr_int, 0,
 				"lance error", dev)) {
 			free_irq(dev->irq, dev);
 			printk("%s: Can't get DMA IRQ %d\n", dev->name,
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 7a3bdac84abe..0c1f491d20bf 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -849,7 +849,7 @@ static int depca_open(struct net_device *dev)
 
 	depca_dbg_open(dev);
 
-	if (request_irq(dev->irq, &depca_interrupt, 0, lp->adapter_name, dev)) {
+	if (request_irq(dev->irq, depca_interrupt, 0, lp->adapter_name, dev)) {
 		printk("depca_open(): Requested IRQ%d is busy\n", dev->irq);
 		status = -EAGAIN;
 	} else {
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 7fa7a907f134..2a8b6a7c0b87 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -163,8 +163,8 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
 			    strcmp (media[card_idx], "4") == 0) {
 				np->speed = 100;
 				np->full_duplex = 1;
-			} else if (strcmp (media[card_idx], "100mbps_hd") == 0
-				   || strcmp (media[card_idx], "3") == 0) {
+			} else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
+				   strcmp (media[card_idx], "3") == 0) {
 				np->speed = 100;
 				np->full_duplex = 0;
 			} else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
@@ -411,7 +411,7 @@ rio_open (struct net_device *dev)
 	int i;
 	u16 macctrl;
 
-	i = request_irq (dev->irq, &rio_interrupt, IRQF_SHARED, dev->name, dev);
+	i = request_irq (dev->irq, rio_interrupt, IRQF_SHARED, dev->name, dev);
 	if (i)
 		return i;
 
@@ -505,7 +505,8 @@ rio_timer (unsigned long data)
 			entry = np->old_rx % RX_RING_SIZE;
 			/* Dropped packets don't need to re-allocate */
 			if (np->rx_skbuff[entry] == NULL) {
-				skb = netdev_alloc_skb (dev, np->rx_buf_sz);
+				skb = netdev_alloc_skb_ip_align(dev,
+								np->rx_buf_sz);
 				if (skb == NULL) {
 					np->rx_ring[entry].fraginfo = 0;
 					printk (KERN_INFO
@@ -514,8 +515,6 @@ rio_timer (unsigned long data)
 					break;
 				}
 				np->rx_skbuff[entry] = skb;
-				/* 16 byte align the IP header */
-				skb_reserve (skb, 2);
 				np->rx_ring[entry].fraginfo =
 				    cpu_to_le64 (pci_map_single
 					 (np->pdev, skb->data, np->rx_buf_sz,
@@ -576,7 +575,9 @@ alloc_list (struct net_device *dev)
 	/* Allocate the rx buffers */
 	for (i = 0; i < RX_RING_SIZE; i++) {
 		/* Allocated fixed size of skbuff */
-		struct sk_buff *skb = netdev_alloc_skb (dev, np->rx_buf_sz);
+		struct sk_buff *skb;
+
+		skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
 		np->rx_skbuff[i] = skb;
 		if (skb == NULL) {
 			printk (KERN_ERR
@@ -584,7 +585,6 @@ alloc_list (struct net_device *dev)
 				dev->name);
 			break;
 		}
-		skb_reserve (skb, 2);	/* 16 byte align the IP header. */
 		/* Rubicon now supports 40 bits of addressing space. */
 		np->rx_ring[i].fraginfo =
 		    cpu_to_le64 ( pci_map_single (
@@ -871,13 +871,11 @@ receive_packet (struct net_device *dev)
 						  PCI_DMA_FROMDEVICE);
 				skb_put (skb = np->rx_skbuff[entry], pkt_len);
 				np->rx_skbuff[entry] = NULL;
-			} else if ((skb = netdev_alloc_skb(dev, pkt_len + 2))) {
+			} else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
 				pci_dma_sync_single_for_cpu(np->pdev,
 							    desc_to_dma(desc),
 							    np->rx_buf_sz,
 							    PCI_DMA_FROMDEVICE);
-				/* 16 byte align the IP header */
-				skb_reserve (skb, 2);
 				skb_copy_to_linear_data (skb,
 						  np->rx_skbuff[entry]->data,
 						  pkt_len);
@@ -907,7 +905,7 @@ receive_packet (struct net_device *dev)
 		struct sk_buff *skb;
 		/* Dropped packets don't need to re-allocate */
 		if (np->rx_skbuff[entry] == NULL) {
-			skb = netdev_alloc_skb(dev, np->rx_buf_sz);
+			skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
 			if (skb == NULL) {
 				np->rx_ring[entry].fraginfo = 0;
 				printk (KERN_INFO
@@ -917,8 +915,6 @@ receive_packet (struct net_device *dev)
 				break;
 			}
 			np->rx_skbuff[entry] = skb;
-			/* 16 byte align the IP header */
-			skb_reserve (skb, 2);
 			np->rx_ring[entry].fraginfo =
 			    cpu_to_le64 (pci_map_single
 					 (np->pdev, skb->data, np->rx_buf_sz,
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 31b8bef49d2e..0cbe3c0e7c06 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -100,6 +100,7 @@ typedef struct board_info {
 
 	unsigned int	flags;
 	unsigned int	in_suspend :1;
+	unsigned int	wake_supported :1;
 	int		debug_level;
 
 	enum dm9000_type type;
@@ -116,6 +117,8 @@ typedef struct board_info {
 	struct resource *data_req;
 	struct resource *irq_res;
 
+	int		 irq_wake;
+
 	struct mutex	 addr_lock;	/* phy and eeprom access lock */
 
 	struct delayed_work phy_poll;
@@ -125,6 +128,7 @@ typedef struct board_info {
 
 	struct mii_if_info mii;
 	u32		msg_enable;
+	u32		wake_state;
 
 	int		rx_csum;
 	int		can_csum;
@@ -568,6 +572,54 @@ static int dm9000_set_eeprom(struct net_device *dev,
 	return 0;
 }
 
+static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
+{
+	board_info_t *dm = to_dm9000_board(dev);
+
+	memset(w, 0, sizeof(struct ethtool_wolinfo));
+
+	/* note, we could probably support wake-phy too */
+	w->supported = dm->wake_supported ? WAKE_MAGIC : 0;
+	w->wolopts = dm->wake_state;
+}
+
+static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
+{
+	board_info_t *dm = to_dm9000_board(dev);
+	unsigned long flags;
+	u32 opts = w->wolopts;
+	u32 wcr = 0;
+
+	if (!dm->wake_supported)
+		return -EOPNOTSUPP;
+
+	if (opts & ~WAKE_MAGIC)
+		return -EINVAL;
+
+	if (opts & WAKE_MAGIC)
+		wcr |= WCR_MAGICEN;
+
+	mutex_lock(&dm->addr_lock);
+
+	spin_lock_irqsave(&dm->lock, flags);
+	iow(dm, DM9000_WCR, wcr);
+	spin_unlock_irqrestore(&dm->lock, flags);
+
+	mutex_unlock(&dm->addr_lock);
+
+	if (dm->wake_state != opts) {
+		/* change in wol state, update IRQ state */
+
+		if (!dm->wake_state)
+			set_irq_wake(dm->irq_wake, 1);
+		else if (dm->wake_state & !opts)
+			set_irq_wake(dm->irq_wake, 0);
+	}
+
+	dm->wake_state = opts;
+	return 0;
+}
+
 static const struct ethtool_ops dm9000_ethtool_ops = {
 	.get_drvinfo		= dm9000_get_drvinfo,
 	.get_settings		= dm9000_get_settings,
@@ -576,6 +628,8 @@ static const struct ethtool_ops dm9000_ethtool_ops = {
 	.set_msglevel		= dm9000_set_msglevel,
 	.nway_reset		= dm9000_nway_reset,
 	.get_link		= dm9000_get_link,
+	.get_wol		= dm9000_get_wol,
+	.set_wol		= dm9000_set_wol,
  	.get_eeprom_len		= dm9000_get_eeprom_len,
  	.get_eeprom		= dm9000_get_eeprom,
  	.set_eeprom		= dm9000_set_eeprom,
@@ -722,6 +776,7 @@ dm9000_init_dm9000(struct net_device *dev)
 {
 	board_info_t *db = netdev_priv(dev);
 	unsigned int imr;
+	unsigned int ncr;
 
 	dm9000_dbg(db, 1, "entering %s\n", __func__);
 
@@ -736,8 +791,15 @@ dm9000_init_dm9000(struct net_device *dev)
 	iow(db, DM9000_GPCR, GPCR_GEP_CNTL);	/* Let GPIO0 output */
 	iow(db, DM9000_GPR, 0);	/* Enable PHY */
 
-	if (db->flags & DM9000_PLATF_EXT_PHY)
-		iow(db, DM9000_NCR, NCR_EXT_PHY);
+	ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
+
+	/* if wol is needed, then always set NCR_WAKEEN otherwise we end
+	 * up dumping the wake events if we disable this. There is already
+	 * a wake-mask in DM9000_WCR */
+	if (db->wake_supported)
+		ncr |= NCR_WAKEEN;
+
+	iow(db, DM9000_NCR, ncr);
 
 	/* Program operating register */
 	iow(db, DM9000_TCR, 0);	        /* TX Polling clear */
@@ -962,8 +1024,8 @@ dm9000_rx(struct net_device *dev)
 		}
 
 		/* Move data from DM9000 */
-		if (GoodPacket
-		    && ((skb = dev_alloc_skb(RxLen + 4)) != NULL)) {
+		if (GoodPacket &&
+		    ((skb = dev_alloc_skb(RxLen + 4)) != NULL)) {
 			skb_reserve(skb, 2);
 			rdptr = (u8 *) skb_put(skb, RxLen - 4);
 
@@ -1045,6 +1107,41 @@ static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
 	return IRQ_HANDLED;
 }
 
+static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
+{
+	struct net_device *dev = dev_id;
+	board_info_t *db = netdev_priv(dev);
+	unsigned long flags;
+	unsigned nsr, wcr;
+
+	spin_lock_irqsave(&db->lock, flags);
+
+	nsr = ior(db, DM9000_NSR);
+	wcr = ior(db, DM9000_WCR);
+
+	dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr);
+
+	if (nsr & NSR_WAKEST) {
+		/* clear, so we can avoid */
+		iow(db, DM9000_NSR, NSR_WAKEST);
+
+		if (wcr & WCR_LINKST)
+			dev_info(db->dev, "wake by link status change\n");
+		if (wcr & WCR_SAMPLEST)
+			dev_info(db->dev, "wake by sample packet\n");
+		if (wcr & WCR_MAGICST )
+			dev_info(db->dev, "wake by magic packet\n");
+		if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
+			dev_err(db->dev, "wake signalled with no reason? "
+				"NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
+
+	}
+
+	spin_unlock_irqrestore(&db->lock, flags);
+
+	return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE;
+}
+
 #ifdef CONFIG_NET_POLL_CONTROLLER
 /*
  *Used by netconsole
@@ -1078,7 +1175,7 @@ dm9000_open(struct net_device *dev)
 
 	irqflags |= IRQF_SHARED;
 
-	if (request_irq(dev->irq, &dm9000_interrupt, irqflags, dev->name, dev))
+	if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
 		return -EAGAIN;
 
 	/* Initialize DM9000 board */
@@ -1299,6 +1396,29 @@ dm9000_probe(struct platform_device *pdev)
 		goto out;
 	}
 
+	db->irq_wake = platform_get_irq(pdev, 1);
+	if (db->irq_wake >= 0) {
+		dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
+
+		ret = request_irq(db->irq_wake, dm9000_wol_interrupt,
+				  IRQF_SHARED, dev_name(db->dev), ndev);
+		if (ret) {
+			dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret);
+		} else {
+
+			/* test to see if irq is really wakeup capable */
+			ret = set_irq_wake(db->irq_wake, 1);
+			if (ret) {
+				dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
+					db->irq_wake, ret);
+				ret = 0;
+			} else {
+				set_irq_wake(db->irq_wake, 0);
+				db->wake_supported = 1;
+			}
+		}
+	}
+
 	iosize = resource_size(db->addr_res);
 	db->addr_req = request_mem_region(db->addr_res->start, iosize,
 					  pdev->name);
@@ -1490,10 +1610,14 @@ dm9000_drv_suspend(struct device *dev)
 		db = netdev_priv(ndev);
 		db->in_suspend = 1;
 
-		if (netif_running(ndev)) {
-			netif_device_detach(ndev);
+		if (!netif_running(ndev))
+			return 0;
+
+		netif_device_detach(ndev);
+
+		/* only shutdown if not using WoL */
+		if (!db->wake_state)
 			dm9000_shutdown(ndev);
-		}
 	}
 	return 0;
 }
@@ -1506,10 +1630,13 @@ dm9000_drv_resume(struct device *dev)
 	board_info_t *db = netdev_priv(ndev);
 
 	if (ndev) {
-
 		if (netif_running(ndev)) {
-			dm9000_reset(db);
-			dm9000_init_dm9000(ndev);
+			/* reset if we were not in wake mode to ensure if
+			 * the device was powered off it is in a known state */
+			if (!db->wake_state) {
+				dm9000_reset(db);
+				dm9000_init_dm9000(ndev);
+			}
 
 			netif_device_attach(ndev);
 		}
diff --git a/drivers/net/dm9000.h b/drivers/net/dm9000.h
index fb1c924d79b4..55688bd1a3ef 100644
--- a/drivers/net/dm9000.h
+++ b/drivers/net/dm9000.h
@@ -111,6 +111,13 @@
 #define RSR_CE              (1<<1)
 #define RSR_FOE             (1<<0)
 
+#define WCR_LINKEN		(1 << 5)
+#define WCR_SAMPLEEN		(1 << 4)
+#define WCR_MAGICEN		(1 << 3)
+#define WCR_LINKST		(1 << 2)
+#define WCR_SAMPLEST		(1 << 1)
+#define WCR_MAGICST		(1 << 0)
+
 #define FCTR_HWOT(ot)	(( ot & 0xf ) << 4 )
 #define FCTR_LWOT(ot)	( ot & 0xf )
 
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index d269a68ce354..929701ca07d3 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -624,6 +624,7 @@ struct nic {
 	u16 eeprom_wc;
 	__le16 eeprom[256];
 	spinlock_t mdio_lock;
+	const struct firmware *fw;
 };
 
 static inline void e100_write_flush(struct nic *nic)
@@ -1225,9 +1226,9 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
 static const struct firmware *e100_request_firmware(struct nic *nic)
 {
 	const char *fw_name;
-	const struct firmware *fw;
+	const struct firmware *fw = nic->fw;
 	u8 timer, bundle, min_size;
-	int err;
+	int err = 0;
 
 	/* do not load u-code for ICH devices */
 	if (nic->flags & ich)
@@ -1243,12 +1244,20 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
 	else /* No ucode on other devices */
 		return NULL;
 
-	err = request_firmware(&fw, fw_name, &nic->pdev->dev);
+	/* If the firmware has not previously been loaded, request a pointer
+	 * to it. If it was previously loaded, we are reinitializing the
+	 * adapter, possibly in a resume from hibernate, in which case
+	 * request_firmware() cannot be used.
+	 */
+	if (!fw)
+		err = request_firmware(&fw, fw_name, &nic->pdev->dev);
+
 	if (err) {
 		DPRINTK(PROBE, ERR, "Failed to load firmware \"%s\": %d\n",
 			fw_name, err);
 		return ERR_PTR(err);
 	}
+
 	/* Firmware should be precisely UCODE_SIZE (words) plus three bytes
 	   indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
 	if (fw->size != UCODE_SIZE * 4 + 3) {
@@ -1271,7 +1280,10 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
 		release_firmware(fw);
 		return ERR_PTR(-EINVAL);
 	}
-	/* OK, firmware is validated and ready to use... */
+
+	/* OK, firmware is validated and ready to use. Save a pointer
+	 * to it in the nic */
+	nic->fw = fw;
 	return fw;
 }
 
@@ -1852,11 +1864,10 @@ static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
 static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
 {
-	if (!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
+	if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
 		return -ENOMEM;
 
-	/* Align, init, and map the RFD. */
-	skb_reserve(rx->skb, NET_IP_ALIGN);
+	/* Init, and map the RFD. */
 	skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
 	rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
 		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 42e2b7e21c29..2a567df3ea71 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -167,6 +167,7 @@ struct e1000_buffer {
 	unsigned long time_stamp;
 	u16 length;
 	u16 next_to_watch;
+	u16 mapped_as_page;
 };
 
 struct e1000_tx_ring {
@@ -302,7 +303,6 @@ struct e1000_adapter {
 	/* OS defined structs */
 	struct net_device *netdev;
 	struct pci_dev *pdev;
-	struct net_device_stats net_stats;
 
 	/* structs defined in e1000_hw.h */
 	struct e1000_hw hw;
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 490b2b7cd3ab..13e9ece16889 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -31,14 +31,22 @@
 #include "e1000.h"
 #include <asm/uaccess.h>
 
+enum {NETDEV_STATS, E1000_STATS};
+
 struct e1000_stats {
 	char stat_string[ETH_GSTRING_LEN];
+	int type;
 	int sizeof_stat;
 	int stat_offset;
 };
 
-#define E1000_STAT(m) FIELD_SIZEOF(struct e1000_adapter, m), \
-		      offsetof(struct e1000_adapter, m)
+#define E1000_STAT(m)		E1000_STATS, \
+				sizeof(((struct e1000_adapter *)0)->m), \
+		      		offsetof(struct e1000_adapter, m)
+#define E1000_NETDEV_STAT(m)	NETDEV_STATS, \
+				sizeof(((struct net_device *)0)->m), \
+				offsetof(struct net_device, m)
+
 static const struct e1000_stats e1000_gstrings_stats[] = {
 	{ "rx_packets", E1000_STAT(stats.gprc) },
 	{ "tx_packets", E1000_STAT(stats.gptc) },
@@ -50,19 +58,19 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
 	{ "tx_multicast", E1000_STAT(stats.mptc) },
 	{ "rx_errors", E1000_STAT(stats.rxerrc) },
 	{ "tx_errors", E1000_STAT(stats.txerrc) },
-	{ "tx_dropped", E1000_STAT(net_stats.tx_dropped) },
+	{ "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) },
 	{ "multicast", E1000_STAT(stats.mprc) },
 	{ "collisions", E1000_STAT(stats.colc) },
 	{ "rx_length_errors", E1000_STAT(stats.rlerrc) },
-	{ "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) },
+	{ "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) },
 	{ "rx_crc_errors", E1000_STAT(stats.crcerrs) },
-	{ "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) },
+	{ "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) },
 	{ "rx_no_buffer_count", E1000_STAT(stats.rnbc) },
 	{ "rx_missed_errors", E1000_STAT(stats.mpc) },
 	{ "tx_aborted_errors", E1000_STAT(stats.ecol) },
 	{ "tx_carrier_errors", E1000_STAT(stats.tncrs) },
-	{ "tx_fifo_errors", E1000_STAT(net_stats.tx_fifo_errors) },
-	{ "tx_heartbeat_errors", E1000_STAT(net_stats.tx_heartbeat_errors) },
+	{ "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) },
+	{ "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) },
 	{ "tx_window_errors", E1000_STAT(stats.latecol) },
 	{ "tx_abort_late_coll", E1000_STAT(stats.latecol) },
 	{ "tx_deferred_ok", E1000_STAT(stats.dc) },
@@ -861,10 +869,10 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
 
 	/* NOTE: we don't test MSI interrupts here, yet */
 	/* Hook up test interrupt handler just for this test */
-	if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
+	if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
 	                 netdev))
 		shared_int = false;
-	else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED,
+	else if (request_irq(irq, e1000_test_intr, IRQF_SHARED,
 	         netdev->name, netdev)) {
 		*data = 1;
 		return -1;
@@ -1830,10 +1838,21 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	int i;
+	char *p = NULL;
 
 	e1000_update_stats(adapter);
 	for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
-		char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
+		switch (e1000_gstrings_stats[i].type) {
+		case NETDEV_STATS:
+			p = (char *) netdev +
+					e1000_gstrings_stats[i].stat_offset;
+			break;
+		case E1000_STATS:
+			p = (char *) adapter +
+					e1000_gstrings_stats[i].stat_offset;
+			break;
+		}
+
 		data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
 			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
 	}
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index bcd192ca47b0..7e855f9bbd97 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1839,10 +1839,17 @@ void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
 static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
 					     struct e1000_buffer *buffer_info)
 {
-	buffer_info->dma = 0;
+	if (buffer_info->dma) {
+		if (buffer_info->mapped_as_page)
+			pci_unmap_page(adapter->pdev, buffer_info->dma,
+				       buffer_info->length, PCI_DMA_TODEVICE);
+		else
+			pci_unmap_single(adapter->pdev,	buffer_info->dma,
+					 buffer_info->length,
+					 PCI_DMA_TODEVICE);
+		buffer_info->dma = 0;
+	}
 	if (buffer_info->skb) {
-		skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
-		              DMA_TO_DEVICE);
 		dev_kfree_skb_any(buffer_info->skb);
 		buffer_info->skb = NULL;
 	}
@@ -2683,22 +2690,14 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
 			unsigned int mss)
 {
 	struct e1000_hw *hw = &adapter->hw;
+	struct pci_dev *pdev = adapter->pdev;
 	struct e1000_buffer *buffer_info;
 	unsigned int len = skb_headlen(skb);
-	unsigned int offset, size, count = 0, i;
+	unsigned int offset = 0, size, count = 0, i;
 	unsigned int f;
-	dma_addr_t *map;
 
 	i = tx_ring->next_to_use;
 
-	if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
-		dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
-		return 0;
-	}
-
-	map = skb_shinfo(skb)->dma_maps;
-	offset = 0;
-
 	while (len) {
 		buffer_info = &tx_ring->buffer_info[i];
 		size = min(len, max_per_txd);
@@ -2735,7 +2734,11 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
 		buffer_info->length = size;
 		/* set time_stamp *before* dma to help avoid a possible race */
 		buffer_info->time_stamp = jiffies;
-		buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
+		buffer_info->mapped_as_page = false;
+		buffer_info->dma = pci_map_single(pdev,	skb->data + offset,
+						  size,	PCI_DMA_TODEVICE);
+		if (pci_dma_mapping_error(pdev, buffer_info->dma))
+			goto dma_error;
 		buffer_info->next_to_watch = i;
 
 		len -= size;
@@ -2753,7 +2756,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
 
 		frag = &skb_shinfo(skb)->frags[f];
 		len = frag->size;
-		offset = 0;
+		offset = frag->page_offset;
 
 		while (len) {
 			i++;
@@ -2777,7 +2780,12 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
 
 			buffer_info->length = size;
 			buffer_info->time_stamp = jiffies;
-			buffer_info->dma = map[f] + offset;
+			buffer_info->mapped_as_page = true;
+			buffer_info->dma = pci_map_page(pdev, frag->page,
+							offset,	size,
+							PCI_DMA_TODEVICE);
+			if (pci_dma_mapping_error(pdev, buffer_info->dma))
+				goto dma_error;
 			buffer_info->next_to_watch = i;
 
 			len -= size;
@@ -2790,6 +2798,22 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
 	tx_ring->buffer_info[first].next_to_watch = i;
 
 	return count;
+
+dma_error:
+	dev_err(&pdev->dev, "TX DMA map failed\n");
+	buffer_info->dma = 0;
+	count--;
+
+	while (count >= 0) {
+		count--;
+		i--;
+		if (i < 0)
+			i += tx_ring->count;
+		buffer_info = &tx_ring->buffer_info[i];
+		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+	}
+
+	return 0;
 }
 
 static void e1000_tx_queue(struct e1000_adapter *adapter,
@@ -3101,10 +3125,8 @@ static void e1000_reset_task(struct work_struct *work)
 
 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
 {
-	struct e1000_adapter *adapter = netdev_priv(netdev);
-
 	/* only return the current stats */
-	return &adapter->net_stats;
+	return &netdev->stats;
 }
 
 /**
@@ -3196,6 +3218,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
 
 void e1000_update_stats(struct e1000_adapter *adapter)
 {
+	struct net_device *netdev = adapter->netdev;
 	struct e1000_hw *hw = &adapter->hw;
 	struct pci_dev *pdev = adapter->pdev;
 	unsigned long flags;
@@ -3288,32 +3311,32 @@ void e1000_update_stats(struct e1000_adapter *adapter)
 	}
 
 	/* Fill out the OS statistics structure */
-	adapter->net_stats.multicast = adapter->stats.mprc;
-	adapter->net_stats.collisions = adapter->stats.colc;
+	netdev->stats.multicast = adapter->stats.mprc;
+	netdev->stats.collisions = adapter->stats.colc;
 
 	/* Rx Errors */
 
 	/* RLEC on some newer hardware can be incorrect so build
 	* our own version based on RUC and ROC */
-	adapter->net_stats.rx_errors = adapter->stats.rxerrc +
+	netdev->stats.rx_errors = adapter->stats.rxerrc +
 		adapter->stats.crcerrs + adapter->stats.algnerrc +
 		adapter->stats.ruc + adapter->stats.roc +
 		adapter->stats.cexterr;
 	adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
-	adapter->net_stats.rx_length_errors = adapter->stats.rlerrc;
-	adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
-	adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
-	adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
+	netdev->stats.rx_length_errors = adapter->stats.rlerrc;
+	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
+	netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
+	netdev->stats.rx_missed_errors = adapter->stats.mpc;
 
 	/* Tx Errors */
 	adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
-	adapter->net_stats.tx_errors = adapter->stats.txerrc;
-	adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
-	adapter->net_stats.tx_window_errors = adapter->stats.latecol;
-	adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
+	netdev->stats.tx_errors = adapter->stats.txerrc;
+	netdev->stats.tx_aborted_errors = adapter->stats.ecol;
+	netdev->stats.tx_window_errors = adapter->stats.latecol;
+	netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
 	if (hw->bad_tx_carr_stats_fd &&
 	    adapter->link_duplex == FULL_DUPLEX) {
-		adapter->net_stats.tx_carrier_errors = 0;
+		netdev->stats.tx_carrier_errors = 0;
 		adapter->stats.tncrs = 0;
 	}
 
@@ -3484,8 +3507,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
 		adapter->detect_tx_hung = false;
 		if (tx_ring->buffer_info[eop].time_stamp &&
 		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
-		               (adapter->tx_timeout_factor * HZ))
-		    && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
+		               (adapter->tx_timeout_factor * HZ)) &&
+		    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
 
 			/* detected Tx unit hang */
 			DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
@@ -3514,8 +3537,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
 	}
 	adapter->total_tx_bytes += total_tx_bytes;
 	adapter->total_tx_packets += total_tx_packets;
-	adapter->net_stats.tx_bytes += total_tx_bytes;
-	adapter->net_stats.tx_packets += total_tx_packets;
+	netdev->stats.tx_bytes += total_tx_bytes;
+	netdev->stats.tx_packets += total_tx_packets;
 	return (count < tx_ring->count);
 }
 
@@ -3767,8 +3790,8 @@ next_desc:
 
 	adapter->total_rx_packets += total_rx_packets;
 	adapter->total_rx_bytes += total_rx_bytes;
-	adapter->net_stats.rx_bytes += total_rx_bytes;
-	adapter->net_stats.rx_packets += total_rx_packets;
+	netdev->stats.rx_bytes += total_rx_bytes;
+	netdev->stats.rx_packets += total_rx_packets;
 	return cleaned;
 }
 
@@ -3867,9 +3890,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 		 * of reassembly being done in the stack */
 		if (length < copybreak) {
 			struct sk_buff *new_skb =
-			    netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
+			    netdev_alloc_skb_ip_align(netdev, length);
 			if (new_skb) {
-				skb_reserve(new_skb, NET_IP_ALIGN);
 				skb_copy_to_linear_data_offset(new_skb,
 							       -NET_IP_ALIGN,
 							       (skb->data -
@@ -3916,8 +3938,8 @@ next_desc:
 
 	adapter->total_rx_packets += total_rx_packets;
 	adapter->total_rx_bytes += total_rx_bytes;
-	adapter->net_stats.rx_bytes += total_rx_bytes;
-	adapter->net_stats.rx_packets += total_rx_packets;
+	netdev->stats.rx_bytes += total_rx_bytes;
+	netdev->stats.rx_packets += total_rx_packets;
 	return cleaned;
 }
 
@@ -3938,9 +3960,7 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
 	struct e1000_buffer *buffer_info;
 	struct sk_buff *skb;
 	unsigned int i;
-	unsigned int bufsz = 256 -
-	                     16 /*for skb_reserve */ -
-	                     NET_IP_ALIGN;
+	unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
 
 	i = rx_ring->next_to_use;
 	buffer_info = &rx_ring->buffer_info[i];
@@ -3952,7 +3972,7 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
 			goto check_page;
 		}
 
-		skb = netdev_alloc_skb(netdev, bufsz);
+		skb = netdev_alloc_skb_ip_align(netdev, bufsz);
 		if (unlikely(!skb)) {
 			/* Better luck next round */
 			adapter->alloc_rx_buff_failed++;
@@ -3965,7 +3985,7 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
 			DPRINTK(PROBE, ERR, "skb align check failed: %u bytes "
 					     "at %p\n", bufsz, skb->data);
 			/* Try again, without freeing the previous */
-			skb = netdev_alloc_skb(netdev, bufsz);
+			skb = netdev_alloc_skb_ip_align(netdev, bufsz);
 			/* Failed allocation, critical failure */
 			if (!skb) {
 				dev_kfree_skb(oldskb);
@@ -3983,12 +4003,6 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
 			/* Use new allocation */
 			dev_kfree_skb(oldskb);
 		}
-		/* Make buffer alignment 2 beyond a 16 byte boundary
-		 * this will result in a 16 byte aligned IP header after
-		 * the 14 byte MAC header is removed
-		 */
-		skb_reserve(skb, NET_IP_ALIGN);
-
 		buffer_info->skb = skb;
 		buffer_info->length = adapter->rx_buffer_len;
 check_page:
@@ -4045,7 +4059,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 	struct e1000_buffer *buffer_info;
 	struct sk_buff *skb;
 	unsigned int i;
-	unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
+	unsigned int bufsz = adapter->rx_buffer_len;
 
 	i = rx_ring->next_to_use;
 	buffer_info = &rx_ring->buffer_info[i];
@@ -4057,7 +4071,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 			goto map_skb;
 		}
 
-		skb = netdev_alloc_skb(netdev, bufsz);
+		skb = netdev_alloc_skb_ip_align(netdev, bufsz);
 		if (unlikely(!skb)) {
 			/* Better luck next round */
 			adapter->alloc_rx_buff_failed++;
@@ -4070,7 +4084,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 			DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
 					     "at %p\n", bufsz, skb->data);
 			/* Try again, without freeing the previous */
-			skb = netdev_alloc_skb(netdev, bufsz);
+			skb = netdev_alloc_skb_ip_align(netdev, bufsz);
 			/* Failed allocation, critical failure */
 			if (!skb) {
 				dev_kfree_skb(oldskb);
@@ -4089,12 +4103,6 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 			/* Use new allocation */
 			dev_kfree_skb(oldskb);
 		}
-		/* Make buffer alignment 2 beyond a 16 byte boundary
-		 * this will result in a 16 byte aligned IP header after
-		 * the 14 byte MAC header is removed
-		 */
-		skb_reserve(skb, NET_IP_ALIGN);
-
 		buffer_info->skb = skb;
 		buffer_info->length = adapter->rx_buffer_len;
 map_skb:
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index d1e0563a67df..c1a42cfc80ba 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2008 Intel Corporation.
+  Copyright(c) 1999 - 2009 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -43,10 +43,6 @@
  * 82583V Gigabit Network Connection
  */
 
-#include <linux/netdevice.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-
 #include "e1000.h"
 
 #define ID_LED_RESERVED_F746 0xF746
@@ -69,15 +65,15 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw);
 static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw);
 static s32 e1000_setup_link_82571(struct e1000_hw *hw);
 static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw);
+static void e1000_clear_vfta_82571(struct e1000_hw *hw);
 static bool e1000_check_mng_mode_82574(struct e1000_hw *hw);
 static s32 e1000_led_on_82574(struct e1000_hw *hw);
 static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
+static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw);
 
 /**
  *  e1000_init_phy_params_82571 - Init PHY func ptrs.
  *  @hw: pointer to the HW structure
- *
- *  This is a function pointer entry point called by the api module.
  **/
 static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
 {
@@ -93,6 +89,9 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
 	phy->autoneg_mask		 = AUTONEG_ADVERTISE_SPEED_DEFAULT;
 	phy->reset_delay_us		 = 100;
 
+	phy->ops.power_up		 = e1000_power_up_phy_copper;
+	phy->ops.power_down		 = e1000_power_down_phy_copper_82571;
+
 	switch (hw->mac.type) {
 	case e1000_82571:
 	case e1000_82572:
@@ -140,8 +139,6 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
 /**
  *  e1000_init_nvm_params_82571 - Init NVM func ptrs.
  *  @hw: pointer to the HW structure
- *
- *  This is a function pointer entry point called by the api module.
  **/
 static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
 {
@@ -205,8 +202,6 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
 /**
  *  e1000_init_mac_params_82571 - Init MAC func ptrs.
  *  @hw: pointer to the HW structure
- *
- *  This is a function pointer entry point called by the api module.
  **/
 static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
 {
@@ -240,7 +235,8 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
 	/* Set rar entry count */
 	mac->rar_entry_count = E1000_RAR_ENTRIES;
 	/* Set if manageability features are enabled. */
-	mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0;
+	mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK)
+	                ? true : false;
 
 	/* check for link */
 	switch (hw->phy.media_type) {
@@ -313,7 +309,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
 			 * indicates that the bootagent or EFI code has
 			 * improperly left this bit enabled
 			 */
-			hw_dbg(hw, "Please update your 82571 Bootagent\n");
+			e_dbg("Please update your 82571 Bootagent\n");
 		}
 		ew32(SWSM, swsm & ~E1000_SWSM_SMBI);
 	}
@@ -487,7 +483,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
 	}
 
 	if (i == sw_timeout) {
-		hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n");
+		e_dbg("Driver can't access device - SMBI bit is set.\n");
 		hw->dev_spec.e82571.smb_counter++;
 	}
 	/* Get the FW semaphore. */
@@ -505,7 +501,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
 	if (i == fw_timeout) {
 		/* Release semaphores */
 		e1000_put_hw_semaphore_82571(hw);
-		hw_dbg(hw, "Driver can't access the NVM\n");
+		e_dbg("Driver can't access the NVM\n");
 		return -E1000_ERR_NVM;
 	}
 
@@ -702,8 +698,7 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
 				      u16 words, u16 *data)
 {
 	struct e1000_nvm_info *nvm = &hw->nvm;
-	u32 i;
-	u32 eewr = 0;
+	u32 i, eewr = 0;
 	s32 ret_val = 0;
 
 	/*
@@ -712,7 +707,7 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
 	 */
 	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
 	    (words == 0)) {
-		hw_dbg(hw, "nvm parameter(s) out of bounds\n");
+		e_dbg("nvm parameter(s) out of bounds\n");
 		return -E1000_ERR_NVM;
 	}
 
@@ -753,7 +748,7 @@ static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw)
 		timeout--;
 	}
 	if (!timeout) {
-		hw_dbg(hw, "MNG configuration cycle has not completed.\n");
+		e_dbg("MNG configuration cycle has not completed.\n");
 		return -E1000_ERR_RESET;
 	}
 
@@ -763,7 +758,7 @@ static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw)
 /**
  *  e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state
  *  @hw: pointer to the HW structure
- *  @active: TRUE to enable LPLU, FALSE to disable
+ *  @active: true to enable LPLU, false to disable
  *
  *  Sets the LPLU D0 state according to the active flag.  When activating LPLU
  *  this function also disables smart speed and vice versa.  LPLU will not be
@@ -834,15 +829,11 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
  *  e1000_reset_hw_82571 - Reset hardware
  *  @hw: pointer to the HW structure
  *
- *  This resets the hardware into a known state.  This is a
- *  function pointer entry point called by the api module.
+ *  This resets the hardware into a known state.
  **/
 static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
 {
-	u32 ctrl;
-	u32 extcnf_ctrl;
-	u32 ctrl_ext;
-	u32 icr;
+	u32 ctrl, extcnf_ctrl, ctrl_ext, icr;
 	s32 ret_val;
 	u16 i = 0;
 
@@ -852,9 +843,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
 	 */
 	ret_val = e1000e_disable_pcie_master(hw);
 	if (ret_val)
-		hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
+		e_dbg("PCI-E Master disable polling has failed.\n");
 
-	hw_dbg(hw, "Masking off all interrupts\n");
+	e_dbg("Masking off all interrupts\n");
 	ew32(IMC, 0xffffffff);
 
 	ew32(RCTL, 0);
@@ -893,7 +884,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
 
 	ctrl = er32(CTRL);
 
-	hw_dbg(hw, "Issuing a global reset to MAC\n");
+	e_dbg("Issuing a global reset to MAC\n");
 	ew32(CTRL, ctrl | E1000_CTRL_RST);
 
 	if (hw->nvm.type == e1000_nvm_flash_hw) {
@@ -951,21 +942,19 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
 	struct e1000_mac_info *mac = &hw->mac;
 	u32 reg_data;
 	s32 ret_val;
-	u16 i;
-	u16 rar_count = mac->rar_entry_count;
+	u16 i, rar_count = mac->rar_entry_count;
 
 	e1000_initialize_hw_bits_82571(hw);
 
 	/* Initialize identification LED */
 	ret_val = e1000e_id_led_init(hw);
-	if (ret_val) {
-		hw_dbg(hw, "Error initializing identification LED\n");
-		return ret_val;
-	}
+	if (ret_val)
+		e_dbg("Error initializing identification LED\n");
+		/* This is not fatal and we should not stop init due to this */
 
 	/* Disabling VLAN filtering */
-	hw_dbg(hw, "Initializing the IEEE VLAN\n");
-	e1000e_clear_vfta(hw);
+	e_dbg("Initializing the IEEE VLAN\n");
+	mac->ops.clear_vfta(hw);
 
 	/* Setup the receive address. */
 	/*
@@ -978,7 +967,7 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
 	e1000e_init_rx_addrs(hw, rar_count);
 
 	/* Zero out the Multicast HASH table */
-	hw_dbg(hw, "Zeroing the MTA\n");
+	e_dbg("Zeroing the MTA\n");
 	for (i = 0; i < mac->mta_reg_count; i++)
 		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
 
@@ -1125,6 +1114,13 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
 		reg |= (1 << 22);
 		ew32(GCR, reg);
 
+		/*
+		 * Workaround for hardware errata.
+		 * apply workaround for hardware errata documented in errata
+		 * docs Fixes issue where some error prone or unreliable PCIe
+		 * completions are occurring, particularly with ASPM enabled.
+		 * Without fix, issue can cause tx timeouts.
+		 */
 		reg = er32(GCR2);
 		reg |= 1;
 		ew32(GCR2, reg);
@@ -1137,13 +1133,13 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
 }
 
 /**
- *  e1000e_clear_vfta - Clear VLAN filter table
+ *  e1000_clear_vfta_82571 - Clear VLAN filter table
  *  @hw: pointer to the HW structure
  *
  *  Clears the register array which contains the VLAN filter table by
  *  setting all the values to 0.
  **/
-void e1000e_clear_vfta(struct e1000_hw *hw)
+static void e1000_clear_vfta_82571(struct e1000_hw *hw)
 {
 	u32 offset;
 	u32 vfta_value = 0;
@@ -1360,8 +1356,20 @@ static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
  *  e1000_check_for_serdes_link_82571 - Check for link (Serdes)
  *  @hw: pointer to the HW structure
  *
- *  Checks for link up on the hardware.  If link is not up and we have
- *  a signal, then we need to force link up.
+ *  Reports the link state as up or down.
+ *
+ *  If autonegotiation is supported by the link partner, the link state is
+ *  determined by the result of autonegotiation. This is the most likely case.
+ *  If autonegotiation is not supported by the link partner, and the link
+ *  has a valid signal, force the link up.
+ *
+ *  The link state is represented internally here by 4 states:
+ *
+ *  1) down
+ *  2) autoneg_progress
+ *  3) autoneg_complete (the link sucessfully autonegotiated)
+ *  4) forced_up (the link has been forced up, it did not autonegotiate)
+ *
  **/
 static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
 {
@@ -1387,7 +1395,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
 				 */
 				mac->serdes_link_state =
 				    e1000_serdes_link_autoneg_progress;
-				hw_dbg(hw, "AN_UP     -> AN_PROG\n");
+				mac->serdes_has_link = false;
+				e_dbg("AN_UP     -> AN_PROG\n");
 			}
 		break;
 
@@ -1401,79 +1410,86 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
 			if (rxcw & E1000_RXCW_C) {
 				/* Enable autoneg, and unforce link up */
 				ew32(TXCW, mac->txcw);
-				ew32(CTRL,
-				    (ctrl & ~E1000_CTRL_SLU));
+				ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
 				mac->serdes_link_state =
 				    e1000_serdes_link_autoneg_progress;
-				hw_dbg(hw, "FORCED_UP -> AN_PROG\n");
+				mac->serdes_has_link = false;
+				e_dbg("FORCED_UP -> AN_PROG\n");
 			}
 			break;
 
 		case e1000_serdes_link_autoneg_progress:
-			/*
-			 * If the LU bit is set in the STATUS register,
-			 * autoneg has completed sucessfully. If not,
-			 * try foring the link because the far end may be
-			 * available but not capable of autonegotiation.
-			 */
-			if (status & E1000_STATUS_LU)  {
-				mac->serdes_link_state =
-				    e1000_serdes_link_autoneg_complete;
-				hw_dbg(hw, "AN_PROG   -> AN_UP\n");
+			if (rxcw & E1000_RXCW_C) {
+				/*
+				 * We received /C/ ordered sets, meaning the
+				 * link partner has autonegotiated, and we can
+				 * trust the Link Up (LU) status bit.
+				 */
+				if (status & E1000_STATUS_LU) {
+					mac->serdes_link_state =
+					    e1000_serdes_link_autoneg_complete;
+					e_dbg("AN_PROG   -> AN_UP\n");
+					mac->serdes_has_link = true;
+				} else {
+					/* Autoneg completed, but failed. */
+					mac->serdes_link_state =
+					    e1000_serdes_link_down;
+					e_dbg("AN_PROG   -> DOWN\n");
+				}
 			} else {
 				/*
-				 * Disable autoneg, force link up and
-				 * full duplex, and change state to forced
+				 * The link partner did not autoneg.
+				 * Force link up and full duplex, and change
+				 * state to forced.
 				 */
-				ew32(TXCW,
-				    (mac->txcw & ~E1000_TXCW_ANE));
+				ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
 				ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
 				ew32(CTRL, ctrl);
 
 				/* Configure Flow Control after link up. */
-				ret_val =
-				    e1000e_config_fc_after_link_up(hw);
+				ret_val = e1000e_config_fc_after_link_up(hw);
 				if (ret_val) {
-					hw_dbg(hw, "Error config flow control\n");
+					e_dbg("Error config flow control\n");
 					break;
 				}
 				mac->serdes_link_state =
 				    e1000_serdes_link_forced_up;
-				hw_dbg(hw, "AN_PROG   -> FORCED_UP\n");
+				mac->serdes_has_link = true;
+				e_dbg("AN_PROG   -> FORCED_UP\n");
 			}
-			mac->serdes_has_link = true;
 			break;
 
 		case e1000_serdes_link_down:
 		default:
-			/* The link was down but the receiver has now gained
+			/*
+			 * The link was down but the receiver has now gained
 			 * valid sync, so lets see if we can bring the link
-			 * up. */
+			 * up.
+			 */
 			ew32(TXCW, mac->txcw);
-			ew32(CTRL,
-			    (ctrl & ~E1000_CTRL_SLU));
+			ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
 			mac->serdes_link_state =
 			    e1000_serdes_link_autoneg_progress;
-			hw_dbg(hw, "DOWN      -> AN_PROG\n");
+			e_dbg("DOWN      -> AN_PROG\n");
 			break;
 		}
 	} else {
 		if (!(rxcw & E1000_RXCW_SYNCH)) {
 			mac->serdes_has_link = false;
 			mac->serdes_link_state = e1000_serdes_link_down;
-			hw_dbg(hw, "ANYSTATE  -> DOWN\n");
+			e_dbg("ANYSTATE  -> DOWN\n");
 		} else {
 			/*
-			 * We have sync, and can tolerate one
-			 * invalid (IV) codeword before declaring
-			 * link down, so reread to look again
+			 * We have sync, and can tolerate one invalid (IV)
+			 * codeword before declaring link down, so reread
+			 * to look again.
 			 */
 			udelay(10);
 			rxcw = er32(RXCW);
 			if (rxcw & E1000_RXCW_IV) {
 				mac->serdes_link_state = e1000_serdes_link_down;
 				mac->serdes_has_link = false;
-				hw_dbg(hw, "ANYSTATE  -> DOWN\n");
+				e_dbg("ANYSTATE  -> DOWN\n");
 			}
 		}
 	}
@@ -1495,7 +1511,7 @@ static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data)
 
 	ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
 	if (ret_val) {
-		hw_dbg(hw, "NVM Read Error\n");
+		e_dbg("NVM Read Error\n");
 		return ret_val;
 	}
 
@@ -1525,7 +1541,7 @@ static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data)
 bool e1000e_get_laa_state_82571(struct e1000_hw *hw)
 {
 	if (hw->mac.type != e1000_82571)
-		return 0;
+		return false;
 
 	return hw->dev_spec.e82571.laa_is_present;
 }
@@ -1535,7 +1551,7 @@ bool e1000e_get_laa_state_82571(struct e1000_hw *hw)
  *  @hw: pointer to the HW structure
  *  @state: enable/disable locally administered address
  *
- *  Enable/Disable the current locally administers address state.
+ *  Enable/Disable the current locally administered address state.
  **/
 void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state)
 {
@@ -1609,6 +1625,28 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
 }
 
 /**
+ * e1000_power_down_phy_copper_82571 - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	struct e1000_mac_info *mac = &hw->mac;
+
+	if (!(phy->ops.check_reset_block))
+		return;
+
+	/* If the management interface is not enabled, then power down */
+	if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw)))
+		e1000_power_down_phy_copper(hw);
+
+	return;
+}
+
+/**
  *  e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters
  *  @hw: pointer to the HW structure
  *
@@ -1616,44 +1654,42 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
  **/
 static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
 {
-	u32 temp;
-
 	e1000e_clear_hw_cntrs_base(hw);
 
-	temp = er32(PRC64);
-	temp = er32(PRC127);
-	temp = er32(PRC255);
-	temp = er32(PRC511);
-	temp = er32(PRC1023);
-	temp = er32(PRC1522);
-	temp = er32(PTC64);
-	temp = er32(PTC127);
-	temp = er32(PTC255);
-	temp = er32(PTC511);
-	temp = er32(PTC1023);
-	temp = er32(PTC1522);
-
-	temp = er32(ALGNERRC);
-	temp = er32(RXERRC);
-	temp = er32(TNCRS);
-	temp = er32(CEXTERR);
-	temp = er32(TSCTC);
-	temp = er32(TSCTFC);
-
-	temp = er32(MGTPRC);
-	temp = er32(MGTPDC);
-	temp = er32(MGTPTC);
-
-	temp = er32(IAC);
-	temp = er32(ICRXOC);
-
-	temp = er32(ICRXPTC);
-	temp = er32(ICRXATC);
-	temp = er32(ICTXPTC);
-	temp = er32(ICTXATC);
-	temp = er32(ICTXQEC);
-	temp = er32(ICTXQMTC);
-	temp = er32(ICRXDMTC);
+	er32(PRC64);
+	er32(PRC127);
+	er32(PRC255);
+	er32(PRC511);
+	er32(PRC1023);
+	er32(PRC1522);
+	er32(PTC64);
+	er32(PTC127);
+	er32(PTC255);
+	er32(PTC511);
+	er32(PTC1023);
+	er32(PTC1522);
+
+	er32(ALGNERRC);
+	er32(RXERRC);
+	er32(TNCRS);
+	er32(CEXTERR);
+	er32(TSCTC);
+	er32(TSCTFC);
+
+	er32(MGTPRC);
+	er32(MGTPDC);
+	er32(MGTPTC);
+
+	er32(IAC);
+	er32(ICRXOC);
+
+	er32(ICRXPTC);
+	er32(ICRXATC);
+	er32(ICTXPTC);
+	er32(ICTXATC);
+	er32(ICTXQEC);
+	er32(ICTXQMTC);
+	er32(ICRXDMTC);
 }
 
 static struct e1000_mac_operations e82571_mac_ops = {
@@ -1667,6 +1703,8 @@ static struct e1000_mac_operations e82571_mac_ops = {
 	/* .led_on: mac type dependent */
 	.led_off		= e1000e_led_off_generic,
 	.update_mc_addr_list	= e1000_update_mc_addr_list_82571,
+	.write_vfta		= e1000_write_vfta_generic,
+	.clear_vfta		= e1000_clear_vfta_82571,
 	.reset_hw		= e1000_reset_hw_82571,
 	.init_hw		= e1000_init_hw_82571,
 	.setup_link		= e1000_setup_link_82571,
@@ -1675,64 +1713,67 @@ static struct e1000_mac_operations e82571_mac_ops = {
 };
 
 static struct e1000_phy_operations e82_phy_ops_igp = {
-	.acquire_phy		= e1000_get_hw_semaphore_82571,
+	.acquire		= e1000_get_hw_semaphore_82571,
+	.check_polarity		= e1000_check_polarity_igp,
 	.check_reset_block	= e1000e_check_reset_block_generic,
-	.commit_phy		= NULL,
+	.commit			= NULL,
 	.force_speed_duplex	= e1000e_phy_force_speed_duplex_igp,
 	.get_cfg_done		= e1000_get_cfg_done_82571,
 	.get_cable_length	= e1000e_get_cable_length_igp_2,
-	.get_phy_info		= e1000e_get_phy_info_igp,
-	.read_phy_reg		= e1000e_read_phy_reg_igp,
-	.release_phy		= e1000_put_hw_semaphore_82571,
-	.reset_phy		= e1000e_phy_hw_reset_generic,
+	.get_info		= e1000e_get_phy_info_igp,
+	.read_reg		= e1000e_read_phy_reg_igp,
+	.release		= e1000_put_hw_semaphore_82571,
+	.reset			= e1000e_phy_hw_reset_generic,
 	.set_d0_lplu_state	= e1000_set_d0_lplu_state_82571,
 	.set_d3_lplu_state	= e1000e_set_d3_lplu_state,
-	.write_phy_reg		= e1000e_write_phy_reg_igp,
+	.write_reg		= e1000e_write_phy_reg_igp,
 	.cfg_on_link_up      	= NULL,
 };
 
 static struct e1000_phy_operations e82_phy_ops_m88 = {
-	.acquire_phy		= e1000_get_hw_semaphore_82571,
+	.acquire		= e1000_get_hw_semaphore_82571,
+	.check_polarity		= e1000_check_polarity_m88,
 	.check_reset_block	= e1000e_check_reset_block_generic,
-	.commit_phy		= e1000e_phy_sw_reset,
+	.commit			= e1000e_phy_sw_reset,
 	.force_speed_duplex	= e1000e_phy_force_speed_duplex_m88,
 	.get_cfg_done		= e1000e_get_cfg_done,
 	.get_cable_length	= e1000e_get_cable_length_m88,
-	.get_phy_info		= e1000e_get_phy_info_m88,
-	.read_phy_reg		= e1000e_read_phy_reg_m88,
-	.release_phy		= e1000_put_hw_semaphore_82571,
-	.reset_phy		= e1000e_phy_hw_reset_generic,
+	.get_info		= e1000e_get_phy_info_m88,
+	.read_reg		= e1000e_read_phy_reg_m88,
+	.release		= e1000_put_hw_semaphore_82571,
+	.reset			= e1000e_phy_hw_reset_generic,
 	.set_d0_lplu_state	= e1000_set_d0_lplu_state_82571,
 	.set_d3_lplu_state	= e1000e_set_d3_lplu_state,
-	.write_phy_reg		= e1000e_write_phy_reg_m88,
+	.write_reg		= e1000e_write_phy_reg_m88,
 	.cfg_on_link_up      	= NULL,
 };
 
 static struct e1000_phy_operations e82_phy_ops_bm = {
-	.acquire_phy		= e1000_get_hw_semaphore_82571,
+	.acquire		= e1000_get_hw_semaphore_82571,
+	.check_polarity		= e1000_check_polarity_m88,
 	.check_reset_block	= e1000e_check_reset_block_generic,
-	.commit_phy		= e1000e_phy_sw_reset,
+	.commit			= e1000e_phy_sw_reset,
 	.force_speed_duplex	= e1000e_phy_force_speed_duplex_m88,
 	.get_cfg_done		= e1000e_get_cfg_done,
 	.get_cable_length	= e1000e_get_cable_length_m88,
-	.get_phy_info		= e1000e_get_phy_info_m88,
-	.read_phy_reg		= e1000e_read_phy_reg_bm2,
-	.release_phy		= e1000_put_hw_semaphore_82571,
-	.reset_phy		= e1000e_phy_hw_reset_generic,
+	.get_info		= e1000e_get_phy_info_m88,
+	.read_reg		= e1000e_read_phy_reg_bm2,
+	.release		= e1000_put_hw_semaphore_82571,
+	.reset			= e1000e_phy_hw_reset_generic,
 	.set_d0_lplu_state	= e1000_set_d0_lplu_state_82571,
 	.set_d3_lplu_state	= e1000e_set_d3_lplu_state,
-	.write_phy_reg		= e1000e_write_phy_reg_bm2,
+	.write_reg		= e1000e_write_phy_reg_bm2,
 	.cfg_on_link_up      	= NULL,
 };
 
 static struct e1000_nvm_operations e82571_nvm_ops = {
-	.acquire_nvm		= e1000_acquire_nvm_82571,
-	.read_nvm		= e1000e_read_nvm_eerd,
-	.release_nvm		= e1000_release_nvm_82571,
-	.update_nvm		= e1000_update_nvm_checksum_82571,
+	.acquire		= e1000_acquire_nvm_82571,
+	.read			= e1000e_read_nvm_eerd,
+	.release		= e1000_release_nvm_82571,
+	.update			= e1000_update_nvm_checksum_82571,
 	.valid_led_default	= e1000_valid_led_default_82571,
-	.validate_nvm		= e1000_validate_nvm_checksum_82571,
-	.write_nvm		= e1000_write_nvm_82571,
+	.validate		= e1000_validate_nvm_checksum_82571,
+	.write			= e1000_write_nvm_82571,
 };
 
 struct e1000_info e1000_82571_info = {
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 1190167a8b3d..86d2809763c3 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2008 Intel Corporation.
+  Copyright(c) 1999 - 2009 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 3e187b0e4203..cebbd9079d53 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2008 Intel Corporation.
+  Copyright(c) 1999 - 2009 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -36,6 +36,7 @@
 #include <linux/workqueue.h>
 #include <linux/io.h>
 #include <linux/netdevice.h>
+#include <linux/pci.h>
 
 #include "hw.h"
 
@@ -47,9 +48,9 @@ struct e1000_info;
 
 #ifdef DEBUG
 #define e_dbg(format, arg...) \
-	e_printk(KERN_DEBUG , adapter, format, ## arg)
+	e_printk(KERN_DEBUG , hw->adapter, format, ## arg)
 #else
-#define e_dbg(format, arg...) do { (void)(adapter); } while (0)
+#define e_dbg(format, arg...) do { (void)(hw); } while (0)
 #endif
 
 #define e_err(format, arg...) \
@@ -193,12 +194,15 @@ struct e1000_buffer {
 			unsigned long time_stamp;
 			u16 length;
 			u16 next_to_watch;
+			u16 mapped_as_page;
 		};
 		/* Rx */
-		/* arrays of page information for packet split */
-		struct e1000_ps_page *ps_pages;
+		struct {
+			/* arrays of page information for packet split */
+			struct e1000_ps_page *ps_pages;
+			struct page *page;
+		};
 	};
-	struct page *page;
 };
 
 struct e1000_ring {
@@ -331,7 +335,6 @@ struct e1000_adapter {
 	/* OS defined structs */
 	struct net_device *netdev;
 	struct pci_dev *pdev;
-	struct net_device_stats net_stats;
 
 	/* structs defined in e1000_hw.h */
 	struct e1000_hw hw;
@@ -366,6 +369,7 @@ struct e1000_adapter {
 	struct work_struct downshift_task;
 	struct work_struct update_phy_task;
 	struct work_struct led_blink_task;
+	struct work_struct print_hang_task;
 };
 
 struct e1000_info {
@@ -488,6 +492,7 @@ extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
 extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
 extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
 extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw);
+extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
 
 extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
 extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
@@ -507,7 +512,7 @@ extern s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw);
 extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw);
 extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw);
 extern s32 e1000e_setup_link(struct e1000_hw *hw);
-extern void e1000e_clear_vfta(struct e1000_hw *hw);
+extern void e1000_clear_vfta_generic(struct e1000_hw *hw);
 extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
 extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
 					       u8 *mc_addr_list,
@@ -523,7 +528,7 @@ extern void e1000e_config_collision_dist(struct e1000_hw *hw);
 extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
 extern s32 e1000e_force_mac_fc(struct e1000_hw *hw);
 extern s32 e1000e_blink_led(struct e1000_hw *hw);
-extern void e1000e_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
+extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
 extern void e1000e_reset_adaptive(struct e1000_hw *hw);
 extern void e1000e_update_adaptive(struct e1000_hw *hw);
 
@@ -566,6 +571,8 @@ extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
 extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
 			       u32 usec_interval, bool *success);
 extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
+extern void e1000_power_up_phy_copper(struct e1000_hw *hw);
+extern void e1000_power_down_phy_copper(struct e1000_hw *hw);
 extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
 extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
 extern s32 e1000e_check_downshift(struct e1000_hw *hw);
@@ -583,9 +590,15 @@ extern s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
 extern s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
 extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
 
+extern s32 e1000_check_polarity_m88(struct e1000_hw *hw);
+extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
+extern s32 e1000_check_polarity_ife(struct e1000_hw *hw);
+extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
+extern s32 e1000_check_polarity_igp(struct e1000_hw *hw);
+
 static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
 {
-	return hw->phy.ops.reset_phy(hw);
+	return hw->phy.ops.reset(hw);
 }
 
 static inline s32 e1000_check_reset_block(struct e1000_hw *hw)
@@ -595,12 +608,12 @@ static inline s32 e1000_check_reset_block(struct e1000_hw *hw)
 
 static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data)
 {
-	return hw->phy.ops.read_phy_reg(hw, offset, data);
+	return hw->phy.ops.read_reg(hw, offset, data);
 }
 
 static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data)
 {
-	return hw->phy.ops.write_phy_reg(hw, offset, data);
+	return hw->phy.ops.write_reg(hw, offset, data);
 }
 
 static inline s32 e1000_get_cable_length(struct e1000_hw *hw)
@@ -620,27 +633,27 @@ extern s32 e1000e_read_mac_addr(struct e1000_hw *hw);
 
 static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
 {
-	return hw->nvm.ops.validate_nvm(hw);
+	return hw->nvm.ops.validate(hw);
 }
 
 static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw)
 {
-	return hw->nvm.ops.update_nvm(hw);
+	return hw->nvm.ops.update(hw);
 }
 
 static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
 {
-	return hw->nvm.ops.read_nvm(hw, offset, words, data);
+	return hw->nvm.ops.read(hw, offset, words, data);
 }
 
 static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
 {
-	return hw->nvm.ops.write_nvm(hw, offset, words, data);
+	return hw->nvm.ops.write(hw, offset, words, data);
 }
 
 static inline s32 e1000_get_phy_info(struct e1000_hw *hw)
 {
-	return hw->phy.ops.get_phy_info(hw);
+	return hw->phy.ops.get_info(hw);
 }
 
 static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw)
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index ae5d73689353..d2a104794609 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2008 Intel Corporation.
+  Copyright(c) 1999 - 2009 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -31,11 +31,6 @@
  * 80003ES2LAN Gigabit Ethernet Controller (Serdes)
  */
 
-#include <linux/netdevice.h>
-#include <linux/ethtool.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-
 #include "e1000.h"
 
 #define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL	 0x00
@@ -104,6 +99,8 @@
  */
 static const u16 e1000_gg82563_cable_length_table[] =
 	 { 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF };
+#define GG82563_CABLE_LENGTH_TABLE_SIZE \
+		ARRAY_SIZE(e1000_gg82563_cable_length_table)
 
 static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw);
 static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
@@ -117,12 +114,11 @@ static s32  e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
                                             u16 *data);
 static s32  e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
                                              u16 data);
+static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw);
 
 /**
  *  e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs.
  *  @hw: pointer to the HW structure
- *
- *  This is a function pointer entry point called by the api module.
  **/
 static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw)
 {
@@ -132,6 +128,9 @@ static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw)
 	if (hw->phy.media_type != e1000_media_type_copper) {
 		phy->type	= e1000_phy_none;
 		return 0;
+	} else {
+		phy->ops.power_up = e1000_power_up_phy_copper;
+		phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan;
 	}
 
 	phy->addr		= 1;
@@ -152,8 +151,6 @@ static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw)
 /**
  *  e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs.
  *  @hw: pointer to the HW structure
- *
- *  This is a function pointer entry point called by the api module.
  **/
 static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
 {
@@ -200,8 +197,6 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
 /**
  *  e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs.
  *  @hw: pointer to the HW structure
- *
- *  This is a function pointer entry point called by the api module.
  **/
 static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
 {
@@ -224,7 +219,8 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
 	/* Set rar entry count */
 	mac->rar_entry_count = E1000_RAR_ENTRIES;
 	/* Set if manageability features are enabled. */
-	mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0;
+	mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK)
+                        ? true : false;
 
 	/* check for link */
 	switch (hw->phy.media_type) {
@@ -272,8 +268,7 @@ static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter)
  *  e1000_acquire_phy_80003es2lan - Acquire rights to access PHY
  *  @hw: pointer to the HW structure
  *
- *  A wrapper to acquire access rights to the correct PHY.  This is a
- *  function pointer entry point called by the api module.
+ *  A wrapper to acquire access rights to the correct PHY.
  **/
 static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw)
 {
@@ -287,8 +282,7 @@ static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw)
  *  e1000_release_phy_80003es2lan - Release rights to access PHY
  *  @hw: pointer to the HW structure
  *
- *  A wrapper to release access rights to the correct PHY.  This is a
- *  function pointer entry point called by the api module.
+ *  A wrapper to release access rights to the correct PHY.
  **/
 static void e1000_release_phy_80003es2lan(struct e1000_hw *hw)
 {
@@ -333,8 +327,7 @@ static void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw)
  *  e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM
  *  @hw: pointer to the HW structure
  *
- *  Acquire the semaphore to access the EEPROM.  This is a function
- *  pointer entry point called by the api module.
+ *  Acquire the semaphore to access the EEPROM.
  **/
 static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw)
 {
@@ -356,8 +349,7 @@ static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw)
  *  e1000_release_nvm_80003es2lan - Relinquish rights to access NVM
  *  @hw: pointer to the HW structure
  *
- *  Release the semaphore used to access the EEPROM.  This is a
- *  function pointer entry point called by the api module.
+ *  Release the semaphore used to access the EEPROM.
  **/
 static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw)
 {
@@ -399,8 +391,7 @@ static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
 	}
 
 	if (i == timeout) {
-		hw_dbg(hw,
-		       "Driver can't access resource, SW_FW_SYNC timeout.\n");
+		e_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
 		return -E1000_ERR_SWFW_SYNC;
 	}
 
@@ -440,8 +431,7 @@ static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
  *  @offset: offset of the register to read
  *  @data: pointer to the data returned from the operation
  *
- *  Read the GG82563 PHY register.  This is a function pointer entry
- *  point called by the api module.
+ *  Read the GG82563 PHY register.
  **/
 static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
 						  u32 offset, u16 *data)
@@ -505,8 +495,7 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
  *  @offset: offset of the register to read
  *  @data: value to write to the register
  *
- *  Write to the GG82563 PHY register.  This is a function pointer entry
- *  point called by the api module.
+ *  Write to the GG82563 PHY register.
  **/
 static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
 						   u32 offset, u16 data)
@@ -571,8 +560,7 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
  *  @words: number of words to write
  *  @data: buffer of data to write to the NVM
  *
- *  Write "words" of data to the ESB2 NVM.  This is a function
- *  pointer entry point called by the api module.
+ *  Write "words" of data to the ESB2 NVM.
  **/
 static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset,
 				       u16 words, u16 *data)
@@ -602,7 +590,7 @@ static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw)
 		timeout--;
 	}
 	if (!timeout) {
-		hw_dbg(hw, "MNG configuration cycle has not completed.\n");
+		e_dbg("MNG configuration cycle has not completed.\n");
 		return -E1000_ERR_RESET;
 	}
 
@@ -635,7 +623,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
 	if (ret_val)
 		return ret_val;
 
-	hw_dbg(hw, "GG82563 PSCR: %X\n", phy_data);
+	e_dbg("GG82563 PSCR: %X\n", phy_data);
 
 	ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
 	if (ret_val)
@@ -653,7 +641,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
 	udelay(1);
 
 	if (hw->phy.autoneg_wait_to_complete) {
-		hw_dbg(hw, "Waiting for forced speed/duplex link "
+		e_dbg("Waiting for forced speed/duplex link "
 			 "on GG82563 phy.\n");
 
 		ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
@@ -712,21 +700,27 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
 static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw)
 {
 	struct e1000_phy_info *phy = &hw->phy;
-	s32 ret_val;
-	u16 phy_data;
-	u16 index;
+	s32 ret_val = 0;
+	u16 phy_data, index;
 
 	ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data);
 	if (ret_val)
-		return ret_val;
+		goto out;
 
 	index = phy_data & GG82563_DSPD_CABLE_LENGTH;
+
+	if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) {
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
 	phy->min_cable_length = e1000_gg82563_cable_length_table[index];
-	phy->max_cable_length = e1000_gg82563_cable_length_table[index+5];
+	phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5];
 
 	phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
 
-	return 0;
+out:
+	return ret_val;
 }
 
 /**
@@ -736,7 +730,6 @@ static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw)
  *  @duplex: pointer to duplex buffer
  *
  *  Retrieve the current speed and duplex configuration.
- *  This is a function pointer entry point called by the api module.
  **/
 static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
 					      u16 *duplex)
@@ -762,12 +755,10 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
  *  @hw: pointer to the HW structure
  *
  *  Perform a global reset to the ESB2 controller.
- *  This is a function pointer entry point called by the api module.
  **/
 static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
 {
-	u32 ctrl;
-	u32 icr;
+	u32 ctrl, icr;
 	s32 ret_val;
 
 	/*
@@ -776,9 +767,9 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
 	 */
 	ret_val = e1000e_disable_pcie_master(hw);
 	if (ret_val)
-		hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
+		e_dbg("PCI-E Master disable polling has failed.\n");
 
-	hw_dbg(hw, "Masking off all interrupts\n");
+	e_dbg("Masking off all interrupts\n");
 	ew32(IMC, 0xffffffff);
 
 	ew32(RCTL, 0);
@@ -790,7 +781,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
 	ctrl = er32(CTRL);
 
 	ret_val = e1000_acquire_phy_80003es2lan(hw);
-	hw_dbg(hw, "Issuing a global reset to MAC\n");
+	e_dbg("Issuing a global reset to MAC\n");
 	ew32(CTRL, ctrl | E1000_CTRL_RST);
 	e1000_release_phy_80003es2lan(hw);
 
@@ -811,7 +802,6 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
  *  @hw: pointer to the HW structure
  *
  *  Initialize the hw bits, LED, VFTA, MTA, link and hw counters.
- *  This is a function pointer entry point called by the api module.
  **/
 static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
 {
@@ -824,20 +814,19 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
 
 	/* Initialize identification LED */
 	ret_val = e1000e_id_led_init(hw);
-	if (ret_val) {
-		hw_dbg(hw, "Error initializing identification LED\n");
-		return ret_val;
-	}
+	if (ret_val)
+		e_dbg("Error initializing identification LED\n");
+		/* This is not fatal and we should not stop init due to this */
 
 	/* Disabling VLAN filtering */
-	hw_dbg(hw, "Initializing the IEEE VLAN\n");
-	e1000e_clear_vfta(hw);
+	e_dbg("Initializing the IEEE VLAN\n");
+	mac->ops.clear_vfta(hw);
 
 	/* Setup the receive address. */
 	e1000e_init_rx_addrs(hw, mac->rar_entry_count);
 
 	/* Zero out the Multicast HASH table */
-	hw_dbg(hw, "Zeroing the MTA\n");
+	e_dbg("Zeroing the MTA\n");
 	for (i = 0; i < mac->mta_reg_count; i++)
 		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
 
@@ -994,7 +983,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
 	/* SW Reset the PHY so all changes take effect */
 	ret_val = e1000e_commit_phy(hw);
 	if (ret_val) {
-		hw_dbg(hw, "Error Resetting the PHY\n");
+		e_dbg("Error Resetting the PHY\n");
 		return ret_val;
 	}
 
@@ -1318,6 +1307,23 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
 }
 
 /**
+ * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw)
+{
+	/* If the management interface is not enabled, then power down */
+	if (!(hw->mac.ops.check_mng_mode(hw) ||
+	      hw->phy.ops.check_reset_block(hw)))
+		e1000_power_down_phy_copper(hw);
+
+	return;
+}
+
+/**
  *  e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters
  *  @hw: pointer to the HW structure
  *
@@ -1325,44 +1331,42 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
  **/
 static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
 {
-	u32 temp;
-
 	e1000e_clear_hw_cntrs_base(hw);
 
-	temp = er32(PRC64);
-	temp = er32(PRC127);
-	temp = er32(PRC255);
-	temp = er32(PRC511);
-	temp = er32(PRC1023);
-	temp = er32(PRC1522);
-	temp = er32(PTC64);
-	temp = er32(PTC127);
-	temp = er32(PTC255);
-	temp = er32(PTC511);
-	temp = er32(PTC1023);
-	temp = er32(PTC1522);
-
-	temp = er32(ALGNERRC);
-	temp = er32(RXERRC);
-	temp = er32(TNCRS);
-	temp = er32(CEXTERR);
-	temp = er32(TSCTC);
-	temp = er32(TSCTFC);
-
-	temp = er32(MGTPRC);
-	temp = er32(MGTPDC);
-	temp = er32(MGTPTC);
-
-	temp = er32(IAC);
-	temp = er32(ICRXOC);
-
-	temp = er32(ICRXPTC);
-	temp = er32(ICRXATC);
-	temp = er32(ICTXPTC);
-	temp = er32(ICTXATC);
-	temp = er32(ICTXQEC);
-	temp = er32(ICTXQMTC);
-	temp = er32(ICRXDMTC);
+	er32(PRC64);
+	er32(PRC127);
+	er32(PRC255);
+	er32(PRC511);
+	er32(PRC1023);
+	er32(PRC1522);
+	er32(PTC64);
+	er32(PTC127);
+	er32(PTC255);
+	er32(PTC511);
+	er32(PTC1023);
+	er32(PTC1522);
+
+	er32(ALGNERRC);
+	er32(RXERRC);
+	er32(TNCRS);
+	er32(CEXTERR);
+	er32(TSCTC);
+	er32(TSCTFC);
+
+	er32(MGTPRC);
+	er32(MGTPDC);
+	er32(MGTPTC);
+
+	er32(IAC);
+	er32(ICRXOC);
+
+	er32(ICRXPTC);
+	er32(ICRXATC);
+	er32(ICTXPTC);
+	er32(ICTXATC);
+	er32(ICTXQEC);
+	er32(ICTXQMTC);
+	er32(ICRXDMTC);
 }
 
 static struct e1000_mac_operations es2_mac_ops = {
@@ -1376,6 +1380,8 @@ static struct e1000_mac_operations es2_mac_ops = {
 	.led_on			= e1000e_led_on_generic,
 	.led_off		= e1000e_led_off_generic,
 	.update_mc_addr_list	= e1000e_update_mc_addr_list_generic,
+	.write_vfta		= e1000_write_vfta_generic,
+	.clear_vfta		= e1000_clear_vfta_generic,
 	.reset_hw		= e1000_reset_hw_80003es2lan,
 	.init_hw		= e1000_init_hw_80003es2lan,
 	.setup_link		= e1000e_setup_link,
@@ -1384,30 +1390,31 @@ static struct e1000_mac_operations es2_mac_ops = {
 };
 
 static struct e1000_phy_operations es2_phy_ops = {
-	.acquire_phy		= e1000_acquire_phy_80003es2lan,
+	.acquire		= e1000_acquire_phy_80003es2lan,
+	.check_polarity		= e1000_check_polarity_m88,
 	.check_reset_block	= e1000e_check_reset_block_generic,
-	.commit_phy	 	= e1000e_phy_sw_reset,
+	.commit		 	= e1000e_phy_sw_reset,
 	.force_speed_duplex 	= e1000_phy_force_speed_duplex_80003es2lan,
 	.get_cfg_done       	= e1000_get_cfg_done_80003es2lan,
 	.get_cable_length   	= e1000_get_cable_length_80003es2lan,
-	.get_phy_info       	= e1000e_get_phy_info_m88,
-	.read_phy_reg       	= e1000_read_phy_reg_gg82563_80003es2lan,
-	.release_phy		= e1000_release_phy_80003es2lan,
-	.reset_phy	  	= e1000e_phy_hw_reset_generic,
+	.get_info       	= e1000e_get_phy_info_m88,
+	.read_reg       	= e1000_read_phy_reg_gg82563_80003es2lan,
+	.release		= e1000_release_phy_80003es2lan,
+	.reset		  	= e1000e_phy_hw_reset_generic,
 	.set_d0_lplu_state  	= NULL,
 	.set_d3_lplu_state  	= e1000e_set_d3_lplu_state,
-	.write_phy_reg      	= e1000_write_phy_reg_gg82563_80003es2lan,
+	.write_reg      	= e1000_write_phy_reg_gg82563_80003es2lan,
 	.cfg_on_link_up      	= e1000_cfg_on_link_up_80003es2lan,
 };
 
 static struct e1000_nvm_operations es2_nvm_ops = {
-	.acquire_nvm		= e1000_acquire_nvm_80003es2lan,
-	.read_nvm		= e1000e_read_nvm_eerd,
-	.release_nvm		= e1000_release_nvm_80003es2lan,
-	.update_nvm		= e1000e_update_nvm_checksum_generic,
+	.acquire		= e1000_acquire_nvm_80003es2lan,
+	.read			= e1000e_read_nvm_eerd,
+	.release		= e1000_release_nvm_80003es2lan,
+	.update			= e1000e_update_nvm_checksum_generic,
 	.valid_led_default	= e1000e_valid_led_default,
-	.validate_nvm		= e1000e_validate_nvm_checksum_generic,
-	.write_nvm		= e1000_write_nvm_80003es2lan,
+	.validate		= e1000e_validate_nvm_checksum_generic,
+	.write			= e1000_write_nvm_80003es2lan,
 };
 
 struct e1000_info e1000_es2_info = {
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index e82638ecae88..0aa50c229c79 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2008 Intel Corporation.
+  Copyright(c) 1999 - 2009 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -35,14 +35,22 @@
 
 #include "e1000.h"
 
+enum {NETDEV_STATS, E1000_STATS};
+
 struct e1000_stats {
 	char stat_string[ETH_GSTRING_LEN];
+	int type;
 	int sizeof_stat;
 	int stat_offset;
 };
 
-#define E1000_STAT(m) sizeof(((struct e1000_adapter *)0)->m), \
-		      offsetof(struct e1000_adapter, m)
+#define E1000_STAT(m)		E1000_STATS, \
+				sizeof(((struct e1000_adapter *)0)->m), \
+		      		offsetof(struct e1000_adapter, m)
+#define E1000_NETDEV_STAT(m)	NETDEV_STATS, \
+				sizeof(((struct net_device *)0)->m), \
+				offsetof(struct net_device, m)
+
 static const struct e1000_stats e1000_gstrings_stats[] = {
 	{ "rx_packets", E1000_STAT(stats.gprc) },
 	{ "tx_packets", E1000_STAT(stats.gptc) },
@@ -52,21 +60,21 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
 	{ "tx_broadcast", E1000_STAT(stats.bptc) },
 	{ "rx_multicast", E1000_STAT(stats.mprc) },
 	{ "tx_multicast", E1000_STAT(stats.mptc) },
-	{ "rx_errors", E1000_STAT(net_stats.rx_errors) },
-	{ "tx_errors", E1000_STAT(net_stats.tx_errors) },
-	{ "tx_dropped", E1000_STAT(net_stats.tx_dropped) },
+	{ "rx_errors", E1000_NETDEV_STAT(stats.rx_errors) },
+	{ "tx_errors", E1000_NETDEV_STAT(stats.tx_errors) },
+	{ "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) },
 	{ "multicast", E1000_STAT(stats.mprc) },
 	{ "collisions", E1000_STAT(stats.colc) },
-	{ "rx_length_errors", E1000_STAT(net_stats.rx_length_errors) },
-	{ "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) },
+	{ "rx_length_errors", E1000_NETDEV_STAT(stats.rx_length_errors) },
+	{ "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) },
 	{ "rx_crc_errors", E1000_STAT(stats.crcerrs) },
-	{ "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) },
+	{ "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) },
 	{ "rx_no_buffer_count", E1000_STAT(stats.rnbc) },
 	{ "rx_missed_errors", E1000_STAT(stats.mpc) },
 	{ "tx_aborted_errors", E1000_STAT(stats.ecol) },
 	{ "tx_carrier_errors", E1000_STAT(stats.tncrs) },
-	{ "tx_fifo_errors", E1000_STAT(net_stats.tx_fifo_errors) },
-	{ "tx_heartbeat_errors", E1000_STAT(net_stats.tx_heartbeat_errors) },
+	{ "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) },
+	{ "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) },
 	{ "tx_window_errors", E1000_STAT(stats.latecol) },
 	{ "tx_abort_late_coll", E1000_STAT(stats.latecol) },
 	{ "tx_deferred_ok", E1000_STAT(stats.dc) },
@@ -182,6 +190,17 @@ static int e1000_get_settings(struct net_device *netdev,
 static u32 e1000_get_link(struct net_device *netdev)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_mac_info *mac = &adapter->hw.mac;
+
+	/*
+	 * If the link is not reported up to netdev, interrupts are disabled,
+	 * and so the physical link state may have changed since we last
+	 * looked. Set get_link_status to make sure that the true link
+	 * state is interrogated, rather than pulling a cached and possibly
+	 * stale link state from the driver.
+	 */
+	if (!netif_carrier_ok(netdev))
+		mac->get_link_status = 1;
 
 	return e1000_has_link(adapter);
 }
@@ -516,7 +535,8 @@ static int e1000_get_eeprom(struct net_device *netdev,
 
 	if (ret_val) {
 		/* a read error occurred, throw away the result */
-		memset(eeprom_buff, 0xff, sizeof(eeprom_buff));
+		memset(eeprom_buff, 0xff, sizeof(u16) *
+		       (last_word - first_word + 1));
 	} else {
 		/* Device's eeprom is always little-endian, word addressable */
 		for (i = 0; i < last_word - first_word + 1; i++)
@@ -596,7 +616,9 @@ static int e1000_set_eeprom(struct net_device *netdev,
 	 * and flush shadow RAM for applicable controllers
 	 */
 	if ((first_word <= NVM_CHECKSUM_REG) ||
-	    (hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82573))
+	    (hw->mac.type == e1000_82583) ||
+	    (hw->mac.type == e1000_82574) ||
+	    (hw->mac.type == e1000_82573))
 		ret_val = e1000e_update_nvm_checksum(hw);
 
 out:
@@ -929,10 +951,10 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
 		e1000e_set_interrupt_capability(adapter);
 	}
 	/* Hook up test interrupt handler just for this test */
-	if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
+	if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
 			 netdev)) {
 		shared_int = 0;
-	} else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED,
+	} else if (request_irq(irq, e1000_test_intr, IRQF_SHARED,
 		 netdev->name, netdev)) {
 		*data = 1;
 		ret_val = -1;
@@ -1239,6 +1261,10 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
 
 	hw->mac.autoneg = 0;
 
+	/* Workaround: K1 must be disabled for stable 1Gbps operation */
+	if (hw->mac.type == e1000_pchlan)
+		e1000_configure_k1_ich8lan(hw, false);
+
 	if (hw->phy.type == e1000_phy_m88) {
 		/* Auto-MDI/MDIX Off */
 		e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
@@ -1769,12 +1795,11 @@ static int e1000_set_wol(struct net_device *netdev,
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 
-	if (wol->wolopts & WAKE_MAGICSECURE)
-		return -EOPNOTSUPP;
-
 	if (!(adapter->flags & FLAG_HAS_WOL) ||
-	    !device_can_wakeup(&adapter->pdev->dev))
-		return wol->wolopts ? -EOPNOTSUPP : 0;
+	    !device_can_wakeup(&adapter->pdev->dev) ||
+	    (wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
+	                      WAKE_MAGIC | WAKE_PHY | WAKE_ARP)))
+		return -EOPNOTSUPP;
 
 	/* these settings will always override what we currently have */
 	adapter->wol = 0;
@@ -1832,6 +1857,7 @@ static int e1000_phys_id(struct net_device *netdev, u32 data)
 
 	if ((hw->phy.type == e1000_phy_ife) ||
 	    (hw->mac.type == e1000_pchlan) ||
+	    (hw->mac.type == e1000_82583) ||
 	    (hw->mac.type == e1000_82574)) {
 		INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task);
 		if (!adapter->blink_timer.function) {
@@ -1912,10 +1938,21 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	int i;
+	char *p = NULL;
 
 	e1000e_update_stats(adapter);
 	for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
-		char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
+		switch (e1000_gstrings_stats[i].type) {
+		case NETDEV_STATS:
+			p = (char *) netdev +
+					e1000_gstrings_stats[i].stat_offset;
+			break;
+		case E1000_STATS:
+			p = (char *) adapter +
+					e1000_gstrings_stats[i].stat_offset;
+			break;
+		}
+
 		data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
 			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
 	}
@@ -1975,6 +2012,8 @@ static const struct ethtool_ops e1000_ethtool_ops = {
 	.get_sset_count		= e1000e_get_sset_count,
 	.get_coalesce		= e1000_get_coalesce,
 	.set_coalesce		= e1000_set_coalesce,
+	.get_flags		= ethtool_op_get_flags,
+	.set_flags		= ethtool_op_set_flags,
 };
 
 void e1000e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index aaea41ef794d..a7d08dae79c4 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2008 Intel Corporation.
+  Copyright(c) 1999 - 2009 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -219,7 +219,7 @@ enum e1e_registers {
 	E1000_HICR      = 0x08F00, /* Host Interface Control */
 };
 
-/* RSS registers */
+#define E1000_MAX_PHY_ADDR		4
 
 /* IGP01E1000 Specific Registers */
 #define IGP01E1000_PHY_PORT_CONFIG	0x10 /* Port Config */
@@ -356,6 +356,7 @@ enum e1e_registers {
 #define E1000_DEV_ID_80003ES2LAN_COPPER_SPT	0x10BA
 #define E1000_DEV_ID_80003ES2LAN_SERDES_SPT	0x10BB
 
+#define E1000_DEV_ID_ICH8_82567V_3		0x1501
 #define E1000_DEV_ID_ICH8_IGP_M_AMT		0x1049
 #define E1000_DEV_ID_ICH8_IGP_AMT		0x104A
 #define E1000_DEV_ID_ICH8_IGP_C			0x104B
@@ -741,6 +742,7 @@ struct e1000_mac_operations {
 	s32  (*check_for_link)(struct e1000_hw *);
 	s32  (*cleanup_led)(struct e1000_hw *);
 	void (*clear_hw_cntrs)(struct e1000_hw *);
+	void (*clear_vfta)(struct e1000_hw *);
 	s32  (*get_bus_info)(struct e1000_hw *);
 	s32  (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
 	s32  (*led_on)(struct e1000_hw *);
@@ -751,38 +753,41 @@ struct e1000_mac_operations {
 	s32  (*setup_link)(struct e1000_hw *);
 	s32  (*setup_physical_interface)(struct e1000_hw *);
 	s32  (*setup_led)(struct e1000_hw *);
+	void (*write_vfta)(struct e1000_hw *, u32, u32);
 };
 
 /* Function pointers for the PHY. */
 struct e1000_phy_operations {
-	s32  (*acquire_phy)(struct e1000_hw *);
+	s32  (*acquire)(struct e1000_hw *);
+	s32  (*cfg_on_link_up)(struct e1000_hw *);
 	s32  (*check_polarity)(struct e1000_hw *);
 	s32  (*check_reset_block)(struct e1000_hw *);
-	s32  (*commit_phy)(struct e1000_hw *);
+	s32  (*commit)(struct e1000_hw *);
 	s32  (*force_speed_duplex)(struct e1000_hw *);
 	s32  (*get_cfg_done)(struct e1000_hw *hw);
 	s32  (*get_cable_length)(struct e1000_hw *);
-	s32  (*get_phy_info)(struct e1000_hw *);
-	s32  (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
-	s32  (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
-	void (*release_phy)(struct e1000_hw *);
-	s32  (*reset_phy)(struct e1000_hw *);
+	s32  (*get_info)(struct e1000_hw *);
+	s32  (*read_reg)(struct e1000_hw *, u32, u16 *);
+	s32  (*read_reg_locked)(struct e1000_hw *, u32, u16 *);
+	void (*release)(struct e1000_hw *);
+	s32  (*reset)(struct e1000_hw *);
 	s32  (*set_d0_lplu_state)(struct e1000_hw *, bool);
 	s32  (*set_d3_lplu_state)(struct e1000_hw *, bool);
-	s32  (*write_phy_reg)(struct e1000_hw *, u32, u16);
-	s32  (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
-	s32  (*cfg_on_link_up)(struct e1000_hw *);
+	s32  (*write_reg)(struct e1000_hw *, u32, u16);
+	s32  (*write_reg_locked)(struct e1000_hw *, u32, u16);
+	void (*power_up)(struct e1000_hw *);
+	void (*power_down)(struct e1000_hw *);
 };
 
 /* Function pointers for the NVM. */
 struct e1000_nvm_operations {
-	s32  (*acquire_nvm)(struct e1000_hw *);
-	s32  (*read_nvm)(struct e1000_hw *, u16, u16, u16 *);
-	void (*release_nvm)(struct e1000_hw *);
-	s32  (*update_nvm)(struct e1000_hw *);
+	s32  (*acquire)(struct e1000_hw *);
+	s32  (*read)(struct e1000_hw *, u16, u16, u16 *);
+	void (*release)(struct e1000_hw *);
+	s32  (*update)(struct e1000_hw *);
 	s32  (*valid_led_default)(struct e1000_hw *, u16 *);
-	s32  (*validate_nvm)(struct e1000_hw *);
-	s32  (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
+	s32  (*validate)(struct e1000_hw *);
+	s32  (*write)(struct e1000_hw *, u16, u16, u16 *);
 };
 
 struct e1000_mac_info {
@@ -925,15 +930,4 @@ struct e1000_hw {
 	} dev_spec;
 };
 
-#ifdef DEBUG
-#define hw_dbg(hw, format, arg...) \
-	printk(KERN_DEBUG "%s: " format, e1000e_get_hw_dev_name(hw), ##arg)
-#else
-static inline int __attribute__ ((format (printf, 2, 3)))
-hw_dbg(struct e1000_hw *hw, const char *format, ...)
-{
-	return 0;
-}
-#endif
-
 #endif
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index eff3f4783655..7b33be98a2ca 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2008 Intel Corporation.
+  Copyright(c) 1999 - 2009 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -54,11 +54,6 @@
  * 82578DC Gigabit Network Connection
  */
 
-#include <linux/netdevice.h>
-#include <linux/ethtool.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-
 #include "e1000.h"
 
 #define ICH_FLASH_GFPREG		0x0000
@@ -200,7 +195,6 @@ union ich8_flash_protected_range {
 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
-static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw);
 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
 						u32 offset, u8 byte);
@@ -222,9 +216,9 @@ static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
 static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
 static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
+static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
 static s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
-static s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
 
 static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
 {
@@ -265,26 +259,37 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
 	phy->addr                     = 1;
 	phy->reset_delay_us           = 100;
 
-	phy->ops.check_polarity       = e1000_check_polarity_ife_ich8lan;
-	phy->ops.read_phy_reg         = e1000_read_phy_reg_hv;
-	phy->ops.read_phy_reg_locked  = e1000_read_phy_reg_hv_locked;
+	phy->ops.read_reg             = e1000_read_phy_reg_hv;
+	phy->ops.read_reg_locked      = e1000_read_phy_reg_hv_locked;
 	phy->ops.set_d0_lplu_state    = e1000_set_lplu_state_pchlan;
 	phy->ops.set_d3_lplu_state    = e1000_set_lplu_state_pchlan;
-	phy->ops.write_phy_reg        = e1000_write_phy_reg_hv;
-	phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked;
+	phy->ops.write_reg            = e1000_write_phy_reg_hv;
+	phy->ops.write_reg_locked     = e1000_write_phy_reg_hv_locked;
+	phy->ops.power_up             = e1000_power_up_phy_copper;
+	phy->ops.power_down           = e1000_power_down_phy_copper_ich8lan;
 	phy->autoneg_mask             = AUTONEG_ADVERTISE_SPEED_DEFAULT;
 
 	phy->id = e1000_phy_unknown;
 	e1000e_get_phy_id(hw);
 	phy->type = e1000e_get_phy_type_from_id(phy->id);
 
-	if (phy->type == e1000_phy_82577) {
+	switch (phy->type) {
+	case e1000_phy_82577:
 		phy->ops.check_polarity = e1000_check_polarity_82577;
 		phy->ops.force_speed_duplex =
 			e1000_phy_force_speed_duplex_82577;
-		phy->ops.get_cable_length   = e1000_get_cable_length_82577;
-		phy->ops.get_phy_info = e1000_get_phy_info_82577;
-		phy->ops.commit_phy = e1000e_phy_sw_reset;
+		phy->ops.get_cable_length = e1000_get_cable_length_82577;
+		phy->ops.get_info = e1000_get_phy_info_82577;
+		phy->ops.commit = e1000e_phy_sw_reset;
+	case e1000_phy_82578:
+		phy->ops.check_polarity = e1000_check_polarity_m88;
+		phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
+		phy->ops.get_cable_length = e1000e_get_cable_length_m88;
+		phy->ops.get_info = e1000e_get_phy_info_m88;
+		break;
+	default:
+		ret_val = -E1000_ERR_PHY;
+		break;
 	}
 
 	return ret_val;
@@ -305,17 +310,22 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
 	phy->addr			= 1;
 	phy->reset_delay_us		= 100;
 
+	phy->ops.power_up               = e1000_power_up_phy_copper;
+	phy->ops.power_down             = e1000_power_down_phy_copper_ich8lan;
+
 	/*
 	 * We may need to do this twice - once for IGP and if that fails,
 	 * we'll set BM func pointers and try again
 	 */
 	ret_val = e1000e_determine_phy_address(hw);
 	if (ret_val) {
-		hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
-		hw->phy.ops.read_phy_reg  = e1000e_read_phy_reg_bm;
+		phy->ops.write_reg = e1000e_write_phy_reg_bm;
+		phy->ops.read_reg  = e1000e_read_phy_reg_bm;
 		ret_val = e1000e_determine_phy_address(hw);
-		if (ret_val)
+		if (ret_val) {
+			e_dbg("Cannot determine PHY addr. Erroring out\n");
 			return ret_val;
+		}
 	}
 
 	phy->id = 0;
@@ -332,29 +342,36 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
 	case IGP03E1000_E_PHY_ID:
 		phy->type = e1000_phy_igp_3;
 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
-		phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked;
-		phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked;
+		phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked;
+		phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked;
+		phy->ops.get_info = e1000e_get_phy_info_igp;
+		phy->ops.check_polarity = e1000_check_polarity_igp;
+		phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp;
 		break;
 	case IFE_E_PHY_ID:
 	case IFE_PLUS_E_PHY_ID:
 	case IFE_C_E_PHY_ID:
 		phy->type = e1000_phy_ife;
 		phy->autoneg_mask = E1000_ALL_NOT_GIG;
+		phy->ops.get_info = e1000_get_phy_info_ife;
+		phy->ops.check_polarity = e1000_check_polarity_ife;
+		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
 		break;
 	case BME1000_E_PHY_ID:
 		phy->type = e1000_phy_bm;
 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
-		hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
-		hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
-		hw->phy.ops.commit_phy = e1000e_phy_sw_reset;
+		phy->ops.read_reg = e1000e_read_phy_reg_bm;
+		phy->ops.write_reg = e1000e_write_phy_reg_bm;
+		phy->ops.commit = e1000e_phy_sw_reset;
+		phy->ops.get_info = e1000e_get_phy_info_m88;
+		phy->ops.check_polarity = e1000_check_polarity_m88;
+		phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
 		break;
 	default:
 		return -E1000_ERR_PHY;
 		break;
 	}
 
-	phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
-
 	return 0;
 }
 
@@ -374,7 +391,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
 
 	/* Can't read flash registers if the register set isn't mapped. */
 	if (!hw->flash_address) {
-		hw_dbg(hw, "ERROR: Flash registers not mapped\n");
+		e_dbg("ERROR: Flash registers not mapped\n");
 		return -E1000_ERR_CONFIG;
 	}
 
@@ -407,7 +424,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
 
 	/* Clear shadow ram */
 	for (i = 0; i < nvm->word_size; i++) {
-		dev_spec->shadow_ram[i].modified = 0;
+		dev_spec->shadow_ram[i].modified = false;
 		dev_spec->shadow_ram[i].value    = 0xFFFF;
 	}
 
@@ -436,7 +453,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
 	if (mac->type == e1000_ich8lan)
 		mac->rar_entry_count--;
 	/* Set if manageability features are enabled. */
-	mac->arc_subsystem_valid = 1;
+	mac->arc_subsystem_valid = true;
 
 	/* LED operations */
 	switch (mac->type) {
@@ -470,7 +487,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
 
 	/* Enable PCS Lock-loss workaround for ICH8 */
 	if (mac->type == e1000_ich8lan)
-		e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, 1);
+		e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
 
 	return 0;
 }
@@ -556,7 +573,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
 	 */
 	ret_val = e1000e_config_fc_after_link_up(hw);
 	if (ret_val)
-		hw_dbg(hw, "Error configuring flow control\n");
+		e_dbg("Error configuring flow control\n");
 
 out:
 	return ret_val;
@@ -636,8 +653,6 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
 	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
 	s32 ret_val = 0;
 
-	might_sleep();
-
 	mutex_lock(&swflag_mutex);
 
 	while (timeout) {
@@ -650,7 +665,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
 	}
 
 	if (!timeout) {
-		hw_dbg(hw, "SW/FW/HW has locked the resource for too long.\n");
+		e_dbg("SW/FW/HW has locked the resource for too long.\n");
 		ret_val = -E1000_ERR_CONFIG;
 		goto out;
 	}
@@ -670,7 +685,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
 	}
 
 	if (!timeout) {
-		hw_dbg(hw, "Failed to acquire the semaphore.\n");
+		e_dbg("Failed to acquire the semaphore.\n");
 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
 		ew32(EXTCNF_CTRL, extcnf_ctrl);
 		ret_val = -E1000_ERR_CONFIG;
@@ -714,7 +729,9 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
  **/
 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
 {
-	u32 fwsm = er32(FWSM);
+	u32 fwsm;
+
+	fwsm = er32(FWSM);
 
 	return (fwsm & E1000_FWSM_MODE_MASK) ==
 		(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
@@ -738,77 +755,6 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
 }
 
 /**
- *  e1000_phy_force_speed_duplex_ich8lan - Force PHY speed & duplex
- *  @hw: pointer to the HW structure
- *
- *  Forces the speed and duplex settings of the PHY.
- *  This is a function pointer entry point only called by
- *  PHY setup routines.
- **/
-static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw)
-{
-	struct e1000_phy_info *phy = &hw->phy;
-	s32 ret_val;
-	u16 data;
-	bool link;
-
-	if (phy->type != e1000_phy_ife) {
-		ret_val = e1000e_phy_force_speed_duplex_igp(hw);
-		return ret_val;
-	}
-
-	ret_val = e1e_rphy(hw, PHY_CONTROL, &data);
-	if (ret_val)
-		return ret_val;
-
-	e1000e_phy_force_speed_duplex_setup(hw, &data);
-
-	ret_val = e1e_wphy(hw, PHY_CONTROL, data);
-	if (ret_val)
-		return ret_val;
-
-	/* Disable MDI-X support for 10/100 */
-	ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data);
-	if (ret_val)
-		return ret_val;
-
-	data &= ~IFE_PMC_AUTO_MDIX;
-	data &= ~IFE_PMC_FORCE_MDIX;
-
-	ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data);
-	if (ret_val)
-		return ret_val;
-
-	hw_dbg(hw, "IFE PMC: %X\n", data);
-
-	udelay(1);
-
-	if (phy->autoneg_wait_to_complete) {
-		hw_dbg(hw, "Waiting for forced speed/duplex link on IFE phy.\n");
-
-		ret_val = e1000e_phy_has_link_generic(hw,
-						     PHY_FORCE_LIMIT,
-						     100000,
-						     &link);
-		if (ret_val)
-			return ret_val;
-
-		if (!link)
-			hw_dbg(hw, "Link taking longer than expected.\n");
-
-		/* Try once more */
-		ret_val = e1000e_phy_has_link_generic(hw,
-						     PHY_FORCE_LIMIT,
-						     100000,
-						     &link);
-		if (ret_val)
-			return ret_val;
-	}
-
-	return 0;
-}
-
-/**
  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
  *  @hw:   pointer to the HW structure
  *
@@ -822,7 +768,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
 	s32 ret_val;
 	u16 word_addr, reg_data, reg_addr, phy_page = 0;
 
-	ret_val = hw->phy.ops.acquire_phy(hw);
+	ret_val = hw->phy.ops.acquire(hw);
 	if (ret_val)
 		return ret_val;
 
@@ -918,7 +864,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
 			reg_addr &= PHY_REG_MASK;
 			reg_addr |= phy_page;
 
-			ret_val = phy->ops.write_phy_reg_locked(hw,
+			ret_val = phy->ops.write_reg_locked(hw,
 			                                    (u32)reg_addr,
 			                                    reg_data);
 			if (ret_val)
@@ -927,7 +873,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
 	}
 
 out:
-	hw->phy.ops.release_phy(hw);
+	hw->phy.ops.release(hw);
 	return ret_val;
 }
 
@@ -951,15 +897,14 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
 		goto out;
 
 	/* Wrap the whole flow with the sw flag */
-	ret_val = hw->phy.ops.acquire_phy(hw);
+	ret_val = hw->phy.ops.acquire(hw);
 	if (ret_val)
 		goto out;
 
 	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
 	if (link) {
 		if (hw->phy.type == e1000_phy_82578) {
-			ret_val = hw->phy.ops.read_phy_reg_locked(hw,
-			                                          BM_CS_STATUS,
+			ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
 			                                          &status_reg);
 			if (ret_val)
 				goto release;
@@ -975,8 +920,7 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
 		}
 
 		if (hw->phy.type == e1000_phy_82577) {
-			ret_val = hw->phy.ops.read_phy_reg_locked(hw,
-			                                          HV_M_STATUS,
+			ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
 			                                          &status_reg);
 			if (ret_val)
 				goto release;
@@ -992,14 +936,14 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
 		}
 
 		/* Link stall fix for link up */
-		ret_val = hw->phy.ops.write_phy_reg_locked(hw, PHY_REG(770, 19),
+		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
 		                                           0x0100);
 		if (ret_val)
 			goto release;
 
 	} else {
 		/* Link stall fix for link down */
-		ret_val = hw->phy.ops.write_phy_reg_locked(hw, PHY_REG(770, 19),
+		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
 		                                           0x4100);
 		if (ret_val)
 			goto release;
@@ -1008,7 +952,7 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
 	ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
 
 release:
-	hw->phy.ops.release_phy(hw);
+	hw->phy.ops.release(hw);
 out:
 	return ret_val;
 }
@@ -1023,7 +967,7 @@ out:
  *
  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
  **/
-static s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
+s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
 {
 	s32 ret_val = 0;
 	u32 ctrl_reg = 0;
@@ -1084,7 +1028,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
 	if (hw->mac.type != e1000_pchlan)
 		return ret_val;
 
-	ret_val = hw->phy.ops.acquire_phy(hw);
+	ret_val = hw->phy.ops.acquire(hw);
 	if (ret_val)
 		return ret_val;
 
@@ -1098,7 +1042,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
 
 	mac_reg = er32(PHY_CTRL);
 
-	ret_val = hw->phy.ops.read_phy_reg_locked(hw, HV_OEM_BITS, &oem_reg);
+	ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
 	if (ret_val)
 		goto out;
 
@@ -1120,10 +1064,10 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
 	/* Restart auto-neg to activate the bits */
 	if (!e1000_check_reset_block(hw))
 		oem_reg |= HV_OEM_BITS_RESTART_AN;
-	ret_val = hw->phy.ops.write_phy_reg_locked(hw, HV_OEM_BITS, oem_reg);
+	ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
 
 out:
-	hw->phy.ops.release_phy(hw);
+	hw->phy.ops.release(hw);
 
 	return ret_val;
 }
@@ -1166,7 +1110,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
 	}
 
 	/* Select page 0 */
-	ret_val = hw->phy.ops.acquire_phy(hw);
+	ret_val = hw->phy.ops.acquire(hw);
 	if (ret_val)
 		return ret_val;
 
@@ -1174,7 +1118,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
 	ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
 	if (ret_val)
 		goto out;
-	hw->phy.ops.release_phy(hw);
+	hw->phy.ops.release(hw);
 
 	/*
 	 * Configure the K1 Si workaround during phy reset assuming there is
@@ -1210,7 +1154,7 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
 	 * leave the PHY in a bad state possibly resulting in no link.
 	 */
 	if (loop == 0)
-		hw_dbg(hw, "LAN_INIT_DONE not set, increase timeout\n");
+		e_dbg("LAN_INIT_DONE not set, increase timeout\n");
 
 	/* Clear the Init Done bit for the next init event */
 	data = er32(STATUS);
@@ -1262,122 +1206,6 @@ out:
 }
 
 /**
- *  e1000_get_phy_info_ife_ich8lan - Retrieves various IFE PHY states
- *  @hw: pointer to the HW structure
- *
- *  Populates "phy" structure with various feature states.
- *  This function is only called by other family-specific
- *  routines.
- **/
-static s32 e1000_get_phy_info_ife_ich8lan(struct e1000_hw *hw)
-{
-	struct e1000_phy_info *phy = &hw->phy;
-	s32 ret_val;
-	u16 data;
-	bool link;
-
-	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
-	if (ret_val)
-		return ret_val;
-
-	if (!link) {
-		hw_dbg(hw, "Phy info is only valid if link is up\n");
-		return -E1000_ERR_CONFIG;
-	}
-
-	ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data);
-	if (ret_val)
-		return ret_val;
-	phy->polarity_correction = (!(data & IFE_PSC_AUTO_POLARITY_DISABLE));
-
-	if (phy->polarity_correction) {
-		ret_val = phy->ops.check_polarity(hw);
-		if (ret_val)
-			return ret_val;
-	} else {
-		/* Polarity is forced */
-		phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY)
-				      ? e1000_rev_polarity_reversed
-				      : e1000_rev_polarity_normal;
-	}
-
-	ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data);
-	if (ret_val)
-		return ret_val;
-
-	phy->is_mdix = (data & IFE_PMC_MDIX_STATUS);
-
-	/* The following parameters are undefined for 10/100 operation. */
-	phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
-	phy->local_rx = e1000_1000t_rx_status_undefined;
-	phy->remote_rx = e1000_1000t_rx_status_undefined;
-
-	return 0;
-}
-
-/**
- *  e1000_get_phy_info_ich8lan - Calls appropriate PHY type get_phy_info
- *  @hw: pointer to the HW structure
- *
- *  Wrapper for calling the get_phy_info routines for the appropriate phy type.
- *  This is a function pointer entry point called by drivers
- *  or other shared routines.
- **/
-static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw)
-{
-	switch (hw->phy.type) {
-	case e1000_phy_ife:
-		return e1000_get_phy_info_ife_ich8lan(hw);
-		break;
-	case e1000_phy_igp_3:
-	case e1000_phy_bm:
-	case e1000_phy_82578:
-	case e1000_phy_82577:
-		return e1000e_get_phy_info_igp(hw);
-		break;
-	default:
-		break;
-	}
-
-	return -E1000_ERR_PHY_TYPE;
-}
-
-/**
- *  e1000_check_polarity_ife_ich8lan - Check cable polarity for IFE PHY
- *  @hw: pointer to the HW structure
- *
- *  Polarity is determined on the polarity reversal feature being enabled.
- *  This function is only called by other family-specific
- *  routines.
- **/
-static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw)
-{
-	struct e1000_phy_info *phy = &hw->phy;
-	s32 ret_val;
-	u16 phy_data, offset, mask;
-
-	/*
-	 * Polarity is determined based on the reversal feature being enabled.
-	 */
-	if (phy->polarity_correction) {
-		offset	= IFE_PHY_EXTENDED_STATUS_CONTROL;
-		mask	= IFE_PESC_POLARITY_REVERSED;
-	} else {
-		offset	= IFE_PHY_SPECIAL_CONTROL;
-		mask	= IFE_PSC_FORCE_POLARITY;
-	}
-
-	ret_val = e1e_rphy(hw, offset, &phy_data);
-
-	if (!ret_val)
-		phy->cable_polarity = (phy_data & mask)
-				      ? e1000_rev_polarity_reversed
-				      : e1000_rev_polarity_normal;
-
-	return ret_val;
-}
-
-/**
  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
  *  @hw: pointer to the HW structure
  *  @active: true to enable LPLU, false to disable
@@ -1412,7 +1240,7 @@ out:
 /**
  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
  *  @hw: pointer to the HW structure
- *  @active: TRUE to enable LPLU, FALSE to disable
+ *  @active: true to enable LPLU, false to disable
  *
  *  Sets the LPLU D0 state according to the active flag.  When
  *  activating LPLU this function also disables smart speed
@@ -1498,7 +1326,7 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
 /**
  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
  *  @hw: pointer to the HW structure
- *  @active: TRUE to enable LPLU, FALSE to disable
+ *  @active: true to enable LPLU, false to disable
  *
  *  Sets the LPLU D3 state according to the active flag.  When
  *  activating LPLU this function also disables smart speed
@@ -1611,7 +1439,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
 
 			return 0;
 		}
-		hw_dbg(hw, "Unable to determine valid NVM bank via EEC - "
+		e_dbg("Unable to determine valid NVM bank via EEC - "
 		       "reading flash signature\n");
 		/* fall-thru */
 	default:
@@ -1641,7 +1469,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
 			return 0;
 		}
 
-		hw_dbg(hw, "ERROR: No valid NVM bank present\n");
+		e_dbg("ERROR: No valid NVM bank present\n");
 		return -E1000_ERR_NVM;
 	}
 
@@ -1669,16 +1497,16 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
 
 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
 	    (words == 0)) {
-		hw_dbg(hw, "nvm parameter(s) out of bounds\n");
+		e_dbg("nvm parameter(s) out of bounds\n");
 		ret_val = -E1000_ERR_NVM;
 		goto out;
 	}
 
-	nvm->ops.acquire_nvm(hw);
+	nvm->ops.acquire(hw);
 
 	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
 	if (ret_val) {
-		hw_dbg(hw, "Could not detect valid bank, assuming bank 0\n");
+		e_dbg("Could not detect valid bank, assuming bank 0\n");
 		bank = 0;
 	}
 
@@ -1700,11 +1528,11 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
 		}
 	}
 
-	nvm->ops.release_nvm(hw);
+	nvm->ops.release(hw);
 
 out:
 	if (ret_val)
-		hw_dbg(hw, "NVM read error: %d\n", ret_val);
+		e_dbg("NVM read error: %d\n", ret_val);
 
 	return ret_val;
 }
@@ -1726,7 +1554,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
 
 	/* Check if the flash descriptor is valid */
 	if (hsfsts.hsf_status.fldesvalid == 0) {
-		hw_dbg(hw, "Flash descriptor invalid.  "
+		e_dbg("Flash descriptor invalid.  "
 			 "SW Sequencing must be used.");
 		return -E1000_ERR_NVM;
 	}
@@ -1749,7 +1577,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
 	if (hsfsts.hsf_status.flcinprog == 0) {
 		/*
 		 * There is no cycle running at present,
-		 * so we can start a cycle
+		 * so we can start a cycle.
 		 * Begin by setting Flash Cycle Done.
 		 */
 		hsfsts.hsf_status.flcdone = 1;
@@ -1757,7 +1585,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
 		ret_val = 0;
 	} else {
 		/*
-		 * otherwise poll for sometime so the current
+		 * Otherwise poll for sometime so the current
 		 * cycle has a chance to end before giving up.
 		 */
 		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
@@ -1776,7 +1604,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
 			hsfsts.hsf_status.flcdone = 1;
 			ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
 		} else {
-			hw_dbg(hw, "Flash controller busy, cannot get access");
+			e_dbg("Flash controller busy, cannot get access");
 		}
 	}
 
@@ -1926,7 +1754,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
 				/* Repeat for some time before giving up. */
 				continue;
 			} else if (hsfsts.hsf_status.flcdone == 0) {
-				hw_dbg(hw, "Timeout error - flash cycle "
+				e_dbg("Timeout error - flash cycle "
 					 "did not complete.");
 				break;
 			}
@@ -1954,18 +1782,18 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
 
 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
 	    (words == 0)) {
-		hw_dbg(hw, "nvm parameter(s) out of bounds\n");
+		e_dbg("nvm parameter(s) out of bounds\n");
 		return -E1000_ERR_NVM;
 	}
 
-	nvm->ops.acquire_nvm(hw);
+	nvm->ops.acquire(hw);
 
 	for (i = 0; i < words; i++) {
-		dev_spec->shadow_ram[offset+i].modified = 1;
+		dev_spec->shadow_ram[offset+i].modified = true;
 		dev_spec->shadow_ram[offset+i].value = data[i];
 	}
 
-	nvm->ops.release_nvm(hw);
+	nvm->ops.release(hw);
 
 	return 0;
 }
@@ -1996,7 +1824,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
 	if (nvm->type != e1000_nvm_flash_sw)
 		goto out;
 
-	nvm->ops.acquire_nvm(hw);
+	nvm->ops.acquire(hw);
 
 	/*
 	 * We're writing to the opposite bank so if we're on bank 1,
@@ -2005,7 +1833,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
 	 */
 	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
 	if (ret_val) {
-		hw_dbg(hw, "Could not detect valid bank, assuming bank 0\n");
+		e_dbg("Could not detect valid bank, assuming bank 0\n");
 		bank = 0;
 	}
 
@@ -2014,7 +1842,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
 		old_bank_offset = 0;
 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
 		if (ret_val) {
-			nvm->ops.release_nvm(hw);
+			nvm->ops.release(hw);
 			goto out;
 		}
 	} else {
@@ -2022,7 +1850,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
 		new_bank_offset = 0;
 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
 		if (ret_val) {
-			nvm->ops.release_nvm(hw);
+			nvm->ops.release(hw);
 			goto out;
 		}
 	}
@@ -2079,8 +1907,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
 	 */
 	if (ret_val) {
 		/* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
-		hw_dbg(hw, "Flash commit failed.\n");
-		nvm->ops.release_nvm(hw);
+		e_dbg("Flash commit failed.\n");
+		nvm->ops.release(hw);
 		goto out;
 	}
 
@@ -2093,7 +1921,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
 	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
 	ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
 	if (ret_val) {
-		nvm->ops.release_nvm(hw);
+		nvm->ops.release(hw);
 		goto out;
 	}
 	data &= 0xBFFF;
@@ -2101,7 +1929,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
 						       act_offset * 2 + 1,
 						       (u8)(data >> 8));
 	if (ret_val) {
-		nvm->ops.release_nvm(hw);
+		nvm->ops.release(hw);
 		goto out;
 	}
 
@@ -2114,17 +1942,17 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
 	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
 	if (ret_val) {
-		nvm->ops.release_nvm(hw);
+		nvm->ops.release(hw);
 		goto out;
 	}
 
 	/* Great!  Everything worked, we can now clear the cached entries. */
 	for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
-		dev_spec->shadow_ram[i].modified = 0;
+		dev_spec->shadow_ram[i].modified = false;
 		dev_spec->shadow_ram[i].value = 0xFFFF;
 	}
 
-	nvm->ops.release_nvm(hw);
+	nvm->ops.release(hw);
 
 	/*
 	 * Reload the EEPROM, or else modifications will not appear
@@ -2135,7 +1963,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
 
 out:
 	if (ret_val)
-		hw_dbg(hw, "NVM update error: %d\n", ret_val);
+		e_dbg("NVM update error: %d\n", ret_val);
 
 	return ret_val;
 }
@@ -2193,7 +2021,7 @@ void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
 	union ich8_hws_flash_status hsfsts;
 	u32 gfpreg;
 
-	nvm->ops.acquire_nvm(hw);
+	nvm->ops.acquire(hw);
 
 	gfpreg = er32flash(ICH_FLASH_GFPREG);
 
@@ -2214,7 +2042,7 @@ void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
 	hsfsts.hsf_status.flockdn = true;
 	ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
 
-	nvm->ops.release_nvm(hw);
+	nvm->ops.release(hw);
 }
 
 /**
@@ -2285,7 +2113,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
 			/* Repeat for some time before giving up. */
 			continue;
 		if (hsfsts.hsf_status.flcdone == 0) {
-			hw_dbg(hw, "Timeout error - flash cycle "
+			e_dbg("Timeout error - flash cycle "
 				 "did not complete.");
 			break;
 		}
@@ -2330,7 +2158,7 @@ static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
 		return ret_val;
 
 	for (program_retries = 0; program_retries < 100; program_retries++) {
-		hw_dbg(hw, "Retrying Byte %2.2X at offset %u\n", byte, offset);
+		e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset);
 		udelay(100);
 		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
 		if (!ret_val)
@@ -2360,9 +2188,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
 	u32 flash_bank_size = nvm->flash_bank_size * 2;
 	s32 ret_val;
 	s32 count = 0;
-	s32 iteration;
-	s32 sector_size;
-	s32 j;
+	s32 j, iteration, sector_size;
 
 	hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
 
@@ -2465,7 +2291,7 @@ static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
 
 	ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
 	if (ret_val) {
-		hw_dbg(hw, "NVM Read Error\n");
+		e_dbg("NVM Read Error\n");
 		return ret_val;
 	}
 
@@ -2595,10 +2421,10 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
 	 */
 	ret_val = e1000e_disable_pcie_master(hw);
 	if (ret_val) {
-		hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
+		e_dbg("PCI-E Master disable polling has failed.\n");
 	}
 
-	hw_dbg(hw, "Masking off all interrupts\n");
+	e_dbg("Masking off all interrupts\n");
 	ew32(IMC, 0xffffffff);
 
 	/*
@@ -2649,8 +2475,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
 		ctrl |= E1000_CTRL_PHY_RST;
 	}
 	ret_val = e1000_acquire_swflag_ich8lan(hw);
-	/* Whether or not the swflag was acquired, we need to reset the part */
-	hw_dbg(hw, "Issuing a global reset to ich8lan\n");
+	e_dbg("Issuing a global reset to ich8lan\n");
 	ew32(CTRL, (ctrl | E1000_CTRL_RST));
 	msleep(20);
 
@@ -2670,7 +2495,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
 			 * return with an error. This can happen in situations
 			 * where there is no eeprom and prevents getting link.
 			 */
-			hw_dbg(hw, "Auto Read Done did not complete\n");
+			e_dbg("Auto Read Done did not complete\n");
 		}
 	}
 	/* Dummy read to clear the phy wakeup bit after lcd reset */
@@ -2731,16 +2556,15 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
 
 	/* Initialize identification LED */
 	ret_val = mac->ops.id_led_init(hw);
-	if (ret_val) {
-		hw_dbg(hw, "Error initializing identification LED\n");
-		return ret_val;
-	}
+	if (ret_val)
+		e_dbg("Error initializing identification LED\n");
+		/* This is not fatal and we should not stop init due to this */
 
 	/* Setup the receive address. */
 	e1000e_init_rx_addrs(hw, mac->rar_entry_count);
 
 	/* Zero out the Multicast HASH table */
-	hw_dbg(hw, "Zeroing the MTA\n");
+	e_dbg("Zeroing the MTA\n");
 	for (i = 0; i < mac->mta_reg_count; i++)
 		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
 
@@ -2750,7 +2574,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
 	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
 	 */
 	if (hw->phy.type == e1000_phy_82578) {
-		hw->phy.ops.read_phy_reg(hw, BM_WUC, &i);
+		hw->phy.ops.read_reg(hw, BM_WUC, &i);
 		ret_val = e1000_phy_hw_reset_ich8lan(hw);
 		if (ret_val)
 			return ret_val;
@@ -2886,7 +2710,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
 	 */
 	hw->fc.current_mode = hw->fc.requested_mode;
 
-	hw_dbg(hw, "After fix-ups FlowControl is now = %x\n",
+	e_dbg("After fix-ups FlowControl is now = %x\n",
 		hw->fc.current_mode);
 
 	/* Continue to configure the copper link. */
@@ -2897,7 +2721,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
 	ew32(FCTTV, hw->fc.pause_time);
 	if ((hw->phy.type == e1000_phy_82578) ||
 	    (hw->phy.type == e1000_phy_82577)) {
-		ret_val = hw->phy.ops.write_phy_reg(hw,
+		ret_val = hw->phy.ops.write_reg(hw,
 		                             PHY_REG(BM_PORT_CTRL_PAGE, 27),
 		                             hw->fc.pause_time);
 		if (ret_val)
@@ -2960,7 +2784,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
 			return ret_val;
 		break;
 	case e1000_phy_ife:
-		ret_val = hw->phy.ops.read_phy_reg(hw, IFE_PHY_MDIX_CONTROL,
+		ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
 		                               &reg_data);
 		if (ret_val)
 			return ret_val;
@@ -2979,7 +2803,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
 			reg_data |= IFE_PMC_AUTO_MDIX;
 			break;
 		}
-		ret_val = hw->phy.ops.write_phy_reg(hw, IFE_PHY_MDIX_CONTROL,
+		ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
 		                                reg_data);
 		if (ret_val)
 			return ret_val;
@@ -3092,8 +2916,8 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
  *  @hw: pointer to the HW structure
  *  @state: boolean value used to set the current Kumeran workaround state
  *
- *  If ICH8, set the current Kumeran workaround state (enabled - TRUE
- *  /disabled - FALSE).
+ *  If ICH8, set the current Kumeran workaround state (enabled - true
+ *  /disabled - false).
  **/
 void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
 						 bool state)
@@ -3101,7 +2925,7 @@ void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
 
 	if (hw->mac.type != e1000_ich8lan) {
-		hw_dbg(hw, "Workaround applies to ICH8 only.\n");
+		e_dbg("Workaround applies to ICH8 only.\n");
 		return;
 	}
 
@@ -3209,6 +3033,7 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
 	u32 phy_ctrl;
 
 	switch (hw->mac.type) {
+	case e1000_ich8lan:
 	case e1000_ich9lan:
 	case e1000_ich10lan:
 	case e1000_pchlan:
@@ -3281,7 +3106,7 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
  **/
 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
 {
-	return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG,
+	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
 					(u16)hw->mac.ledctl_mode1);
 }
 
@@ -3293,7 +3118,7 @@ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
  **/
 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
 {
-	return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG,
+	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
 					(u16)hw->mac.ledctl_default);
 }
 
@@ -3325,7 +3150,7 @@ static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
 		}
 	}
 
-	return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG, data);
+	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
 }
 
 /**
@@ -3356,7 +3181,7 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
 		}
 	}
 
-	return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG, data);
+	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
 }
 
 /**
@@ -3379,8 +3204,7 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
 		if (status & E1000_STATUS_PHYRA)
 			ew32(STATUS, status & ~E1000_STATUS_PHYRA);
 		else
-			hw_dbg(hw,
-			       "PHY Reset Asserted not set - needs delay\n");
+			e_dbg("PHY Reset Asserted not set - needs delay\n");
 	}
 
 	e1000e_get_cfg_done(hw);
@@ -3395,7 +3219,7 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
 	} else {
 		if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
 			/* Maybe we should do a basic PHY config */
-			hw_dbg(hw, "EEPROM not present\n");
+			e_dbg("EEPROM not present\n");
 			return -E1000_ERR_CONFIG;
 		}
 	}
@@ -3404,6 +3228,23 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
 }
 
 /**
+ * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
+{
+	/* If the management interface is not enabled, then power down */
+	if (!(hw->mac.ops.check_mng_mode(hw) ||
+	      hw->phy.ops.check_reset_block(hw)))
+		e1000_power_down_phy_copper(hw);
+
+	return;
+}
+
+/**
  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
  *  @hw: pointer to the HW structure
  *
@@ -3412,42 +3253,41 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
  **/
 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
 {
-	u32 temp;
 	u16 phy_data;
 
 	e1000e_clear_hw_cntrs_base(hw);
 
-	temp = er32(ALGNERRC);
-	temp = er32(RXERRC);
-	temp = er32(TNCRS);
-	temp = er32(CEXTERR);
-	temp = er32(TSCTC);
-	temp = er32(TSCTFC);
+	er32(ALGNERRC);
+	er32(RXERRC);
+	er32(TNCRS);
+	er32(CEXTERR);
+	er32(TSCTC);
+	er32(TSCTFC);
 
-	temp = er32(MGTPRC);
-	temp = er32(MGTPDC);
-	temp = er32(MGTPTC);
+	er32(MGTPRC);
+	er32(MGTPDC);
+	er32(MGTPTC);
 
-	temp = er32(IAC);
-	temp = er32(ICRXOC);
+	er32(IAC);
+	er32(ICRXOC);
 
 	/* Clear PHY statistics registers */
 	if ((hw->phy.type == e1000_phy_82578) ||
 	    (hw->phy.type == e1000_phy_82577)) {
-		hw->phy.ops.read_phy_reg(hw, HV_SCC_UPPER, &phy_data);
-		hw->phy.ops.read_phy_reg(hw, HV_SCC_LOWER, &phy_data);
-		hw->phy.ops.read_phy_reg(hw, HV_ECOL_UPPER, &phy_data);
-		hw->phy.ops.read_phy_reg(hw, HV_ECOL_LOWER, &phy_data);
-		hw->phy.ops.read_phy_reg(hw, HV_MCC_UPPER, &phy_data);
-		hw->phy.ops.read_phy_reg(hw, HV_MCC_LOWER, &phy_data);
-		hw->phy.ops.read_phy_reg(hw, HV_LATECOL_UPPER, &phy_data);
-		hw->phy.ops.read_phy_reg(hw, HV_LATECOL_LOWER, &phy_data);
-		hw->phy.ops.read_phy_reg(hw, HV_COLC_UPPER, &phy_data);
-		hw->phy.ops.read_phy_reg(hw, HV_COLC_LOWER, &phy_data);
-		hw->phy.ops.read_phy_reg(hw, HV_DC_UPPER, &phy_data);
-		hw->phy.ops.read_phy_reg(hw, HV_DC_LOWER, &phy_data);
-		hw->phy.ops.read_phy_reg(hw, HV_TNCRS_UPPER, &phy_data);
-		hw->phy.ops.read_phy_reg(hw, HV_TNCRS_LOWER, &phy_data);
+		hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data);
+		hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data);
+		hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data);
+		hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data);
+		hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data);
+		hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data);
+		hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data);
+		hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data);
+		hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data);
+		hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data);
+		hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data);
+		hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data);
+		hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data);
+		hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data);
 	}
 }
 
@@ -3470,29 +3310,27 @@ static struct e1000_mac_operations ich8_mac_ops = {
 };
 
 static struct e1000_phy_operations ich8_phy_ops = {
-	.acquire_phy		= e1000_acquire_swflag_ich8lan,
+	.acquire		= e1000_acquire_swflag_ich8lan,
 	.check_reset_block	= e1000_check_reset_block_ich8lan,
-	.commit_phy		= NULL,
-	.force_speed_duplex	= e1000_phy_force_speed_duplex_ich8lan,
+	.commit			= NULL,
 	.get_cfg_done		= e1000_get_cfg_done_ich8lan,
 	.get_cable_length	= e1000e_get_cable_length_igp_2,
-	.get_phy_info		= e1000_get_phy_info_ich8lan,
-	.read_phy_reg		= e1000e_read_phy_reg_igp,
-	.release_phy		= e1000_release_swflag_ich8lan,
-	.reset_phy		= e1000_phy_hw_reset_ich8lan,
+	.read_reg		= e1000e_read_phy_reg_igp,
+	.release		= e1000_release_swflag_ich8lan,
+	.reset			= e1000_phy_hw_reset_ich8lan,
 	.set_d0_lplu_state	= e1000_set_d0_lplu_state_ich8lan,
 	.set_d3_lplu_state	= e1000_set_d3_lplu_state_ich8lan,
-	.write_phy_reg		= e1000e_write_phy_reg_igp,
+	.write_reg		= e1000e_write_phy_reg_igp,
 };
 
 static struct e1000_nvm_operations ich8_nvm_ops = {
-	.acquire_nvm		= e1000_acquire_nvm_ich8lan,
-	.read_nvm	 	= e1000_read_nvm_ich8lan,
-	.release_nvm		= e1000_release_nvm_ich8lan,
-	.update_nvm		= e1000_update_nvm_checksum_ich8lan,
+	.acquire		= e1000_acquire_nvm_ich8lan,
+	.read		 	= e1000_read_nvm_ich8lan,
+	.release		= e1000_release_nvm_ich8lan,
+	.update			= e1000_update_nvm_checksum_ich8lan,
 	.valid_led_default	= e1000_valid_led_default_ich8lan,
-	.validate_nvm		= e1000_validate_nvm_checksum_ich8lan,
-	.write_nvm		= e1000_write_nvm_ich8lan,
+	.validate		= e1000_validate_nvm_checksum_ich8lan,
+	.write			= e1000_write_nvm_ich8lan,
 };
 
 struct e1000_info e1000_ich8_info = {
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 99ba2b8a2a05..a86c17548c1e 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2008 Intel Corporation.
+  Copyright(c) 1999 - 2009 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -26,11 +26,6 @@
 
 *******************************************************************************/
 
-#include <linux/netdevice.h>
-#include <linux/ethtool.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-
 #include "e1000.h"
 
 enum e1000_mng_mode {
@@ -87,7 +82,24 @@ s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
 }
 
 /**
- *  e1000e_write_vfta - Write value to VLAN filter table
+ *  e1000_clear_vfta_generic - Clear VLAN filter table
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the register array which contains the VLAN filter table by
+ *  setting all the values to 0.
+ **/
+void e1000_clear_vfta_generic(struct e1000_hw *hw)
+{
+	u32 offset;
+
+	for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+		E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
+		e1e_flush();
+	}
+}
+
+/**
+ *  e1000_write_vfta_generic - Write value to VLAN filter table
  *  @hw: pointer to the HW structure
  *  @offset: register offset in VLAN filter table
  *  @value: register value written to VLAN filter table
@@ -95,7 +107,7 @@ s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
  *  Writes value at the given offset in the register array which stores
  *  the VLAN filter table.
  **/
-void e1000e_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
 {
 	E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
 	e1e_flush();
@@ -115,12 +127,12 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
 	u32 i;
 
 	/* Setup the receive address */
-	hw_dbg(hw, "Programming MAC Address into RAR[0]\n");
+	e_dbg("Programming MAC Address into RAR[0]\n");
 
 	e1000e_rar_set(hw, hw->mac.addr, 0);
 
 	/* Zero out the other (rar_entry_count - 1) receive addresses */
-	hw_dbg(hw, "Clearing RAR[1-%u]\n", rar_count-1);
+	e_dbg("Clearing RAR[1-%u]\n", rar_count-1);
 	for (i = 1; i < rar_count; i++) {
 		E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0);
 		e1e_flush();
@@ -276,7 +288,7 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
 	for (; mc_addr_count > 0; mc_addr_count--) {
 		u32 hash_value, hash_reg, hash_bit, mta;
 		hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
-		hw_dbg(hw, "Hash value = 0x%03X\n", hash_value);
+		e_dbg("Hash value = 0x%03X\n", hash_value);
 		hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
 		hash_bit = hash_value & 0x1F;
 		mta = (1 << hash_bit);
@@ -300,45 +312,43 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
  **/
 void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
 {
-	u32 temp;
-
-	temp = er32(CRCERRS);
-	temp = er32(SYMERRS);
-	temp = er32(MPC);
-	temp = er32(SCC);
-	temp = er32(ECOL);
-	temp = er32(MCC);
-	temp = er32(LATECOL);
-	temp = er32(COLC);
-	temp = er32(DC);
-	temp = er32(SEC);
-	temp = er32(RLEC);
-	temp = er32(XONRXC);
-	temp = er32(XONTXC);
-	temp = er32(XOFFRXC);
-	temp = er32(XOFFTXC);
-	temp = er32(FCRUC);
-	temp = er32(GPRC);
-	temp = er32(BPRC);
-	temp = er32(MPRC);
-	temp = er32(GPTC);
-	temp = er32(GORCL);
-	temp = er32(GORCH);
-	temp = er32(GOTCL);
-	temp = er32(GOTCH);
-	temp = er32(RNBC);
-	temp = er32(RUC);
-	temp = er32(RFC);
-	temp = er32(ROC);
-	temp = er32(RJC);
-	temp = er32(TORL);
-	temp = er32(TORH);
-	temp = er32(TOTL);
-	temp = er32(TOTH);
-	temp = er32(TPR);
-	temp = er32(TPT);
-	temp = er32(MPTC);
-	temp = er32(BPTC);
+	er32(CRCERRS);
+	er32(SYMERRS);
+	er32(MPC);
+	er32(SCC);
+	er32(ECOL);
+	er32(MCC);
+	er32(LATECOL);
+	er32(COLC);
+	er32(DC);
+	er32(SEC);
+	er32(RLEC);
+	er32(XONRXC);
+	er32(XONTXC);
+	er32(XOFFRXC);
+	er32(XOFFTXC);
+	er32(FCRUC);
+	er32(GPRC);
+	er32(BPRC);
+	er32(MPRC);
+	er32(GPTC);
+	er32(GORCL);
+	er32(GORCH);
+	er32(GOTCL);
+	er32(GOTCH);
+	er32(RNBC);
+	er32(RUC);
+	er32(RFC);
+	er32(ROC);
+	er32(RJC);
+	er32(TORL);
+	er32(TORH);
+	er32(TOTL);
+	er32(TOTH);
+	er32(TPR);
+	er32(TPT);
+	er32(MPTC);
+	er32(BPTC);
 }
 
 /**
@@ -376,7 +386,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
 	if (!link)
 		return ret_val; /* No link detected */
 
-	mac->get_link_status = 0;
+	mac->get_link_status = false;
 
 	/*
 	 * Check if there was DownShift, must be checked
@@ -408,7 +418,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
 	 */
 	ret_val = e1000e_config_fc_after_link_up(hw);
 	if (ret_val) {
-		hw_dbg(hw, "Error configuring flow control\n");
+		e_dbg("Error configuring flow control\n");
 	}
 
 	return ret_val;
@@ -448,7 +458,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
 			mac->autoneg_failed = 1;
 			return 0;
 		}
-		hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n");
+		e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n");
 
 		/* Disable auto-negotiation in the TXCW register */
 		ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
@@ -461,7 +471,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
 		/* Configure Flow Control after forcing link up. */
 		ret_val = e1000e_config_fc_after_link_up(hw);
 		if (ret_val) {
-			hw_dbg(hw, "Error configuring flow control\n");
+			e_dbg("Error configuring flow control\n");
 			return ret_val;
 		}
 	} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
@@ -471,7 +481,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
 		 * and disable forced link in the Device Control register
 		 * in an attempt to auto-negotiate with our link partner.
 		 */
-		hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n");
+		e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n");
 		ew32(TXCW, mac->txcw);
 		ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
 
@@ -513,7 +523,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
 			mac->autoneg_failed = 1;
 			return 0;
 		}
-		hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n");
+		e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n");
 
 		/* Disable auto-negotiation in the TXCW register */
 		ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
@@ -526,7 +536,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
 		/* Configure Flow Control after forcing link up. */
 		ret_val = e1000e_config_fc_after_link_up(hw);
 		if (ret_val) {
-			hw_dbg(hw, "Error configuring flow control\n");
+			e_dbg("Error configuring flow control\n");
 			return ret_val;
 		}
 	} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
@@ -536,7 +546,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
 		 * and disable forced link in the Device Control register
 		 * in an attempt to auto-negotiate with our link partner.
 		 */
-		hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n");
+		e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n");
 		ew32(TXCW, mac->txcw);
 		ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
 
@@ -553,11 +563,11 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
 		if (rxcw & E1000_RXCW_SYNCH) {
 			if (!(rxcw & E1000_RXCW_IV)) {
 				mac->serdes_has_link = true;
-				hw_dbg(hw, "SERDES: Link up - forced.\n");
+				e_dbg("SERDES: Link up - forced.\n");
 			}
 		} else {
 			mac->serdes_has_link = false;
-			hw_dbg(hw, "SERDES: Link down - force failed.\n");
+			e_dbg("SERDES: Link down - force failed.\n");
 		}
 	}
 
@@ -570,20 +580,20 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
 			if (rxcw & E1000_RXCW_SYNCH) {
 				if (!(rxcw & E1000_RXCW_IV)) {
 					mac->serdes_has_link = true;
-					hw_dbg(hw, "SERDES: Link up - autoneg "
+					e_dbg("SERDES: Link up - autoneg "
 					   "completed sucessfully.\n");
 				} else {
 					mac->serdes_has_link = false;
-					hw_dbg(hw, "SERDES: Link down - invalid"
+					e_dbg("SERDES: Link down - invalid"
 					   "codewords detected in autoneg.\n");
 				}
 			} else {
 				mac->serdes_has_link = false;
-				hw_dbg(hw, "SERDES: Link down - no sync.\n");
+				e_dbg("SERDES: Link down - no sync.\n");
 			}
 		} else {
 			mac->serdes_has_link = false;
-			hw_dbg(hw, "SERDES: Link down - autoneg failed\n");
+			e_dbg("SERDES: Link down - autoneg failed\n");
 		}
 	}
 
@@ -614,7 +624,7 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
 	ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
 
 	if (ret_val) {
-		hw_dbg(hw, "NVM Read Error\n");
+		e_dbg("NVM Read Error\n");
 		return ret_val;
 	}
 
@@ -667,7 +677,7 @@ s32 e1000e_setup_link(struct e1000_hw *hw)
 	 */
 	hw->fc.current_mode = hw->fc.requested_mode;
 
-	hw_dbg(hw, "After fix-ups FlowControl is now = %x\n",
+	e_dbg("After fix-ups FlowControl is now = %x\n",
 		hw->fc.current_mode);
 
 	/* Call the necessary media_type subroutine to configure the link. */
@@ -681,7 +691,7 @@ s32 e1000e_setup_link(struct e1000_hw *hw)
 	 * control is disabled, because it does not hurt anything to
 	 * initialize these registers.
 	 */
-	hw_dbg(hw, "Initializing the Flow Control address, type and timer regs\n");
+	e_dbg("Initializing the Flow Control address, type and timer regs\n");
 	ew32(FCT, FLOW_CONTROL_TYPE);
 	ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
 	ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
@@ -751,7 +761,7 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
 		txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
 		break;
 	default:
-		hw_dbg(hw, "Flow control param set incorrectly\n");
+		e_dbg("Flow control param set incorrectly\n");
 		return -E1000_ERR_CONFIG;
 		break;
 	}
@@ -789,7 +799,7 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
 			break;
 	}
 	if (i == FIBER_LINK_UP_LIMIT) {
-		hw_dbg(hw, "Never got a valid link from auto-neg!!!\n");
+		e_dbg("Never got a valid link from auto-neg!!!\n");
 		mac->autoneg_failed = 1;
 		/*
 		 * AutoNeg failed to achieve a link, so we'll call
@@ -799,13 +809,13 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
 		 */
 		ret_val = mac->ops.check_for_link(hw);
 		if (ret_val) {
-			hw_dbg(hw, "Error while checking for link\n");
+			e_dbg("Error while checking for link\n");
 			return ret_val;
 		}
 		mac->autoneg_failed = 0;
 	} else {
 		mac->autoneg_failed = 0;
-		hw_dbg(hw, "Valid Link Found\n");
+		e_dbg("Valid Link Found\n");
 	}
 
 	return 0;
@@ -841,7 +851,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
 	 * then the link-up status bit will be set and the flow control enable
 	 * bits (RFCE and TFCE) will be set according to their negotiated value.
 	 */
-	hw_dbg(hw, "Auto-negotiation enabled\n");
+	e_dbg("Auto-negotiation enabled\n");
 
 	ew32(CTRL, ctrl);
 	e1e_flush();
@@ -856,7 +866,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
 	    (er32(CTRL) & E1000_CTRL_SWDPIN1)) {
 		ret_val = e1000_poll_fiber_serdes_link_generic(hw);
 	} else {
-		hw_dbg(hw, "No signal detected\n");
+		e_dbg("No signal detected\n");
 	}
 
 	return 0;
@@ -952,7 +962,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw)
 	 *      3:  Both Rx and Tx flow control (symmetric) is enabled.
 	 *  other:  No other values should be possible at this point.
 	 */
-	hw_dbg(hw, "hw->fc.current_mode = %u\n", hw->fc.current_mode);
+	e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
 
 	switch (hw->fc.current_mode) {
 	case e1000_fc_none:
@@ -970,7 +980,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw)
 		ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
 		break;
 	default:
-		hw_dbg(hw, "Flow control param set incorrectly\n");
+		e_dbg("Flow control param set incorrectly\n");
 		return -E1000_ERR_CONFIG;
 	}
 
@@ -1011,7 +1021,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
 	}
 
 	if (ret_val) {
-		hw_dbg(hw, "Error forcing flow control settings\n");
+		e_dbg("Error forcing flow control settings\n");
 		return ret_val;
 	}
 
@@ -1035,7 +1045,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
 			return ret_val;
 
 		if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
-			hw_dbg(hw, "Copper PHY and Auto Neg "
+			e_dbg("Copper PHY and Auto Neg "
 				 "has not completed.\n");
 			return ret_val;
 		}
@@ -1076,7 +1086,6 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
 		 *   1   |    1    |   0   |    0    | e1000_fc_none
 		 *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
 		 *
-		 *
 		 * Are both PAUSE bits set to 1?  If so, this implies
 		 * Symmetric Flow Control is enabled at both ends.  The
 		 * ASM_DIR bits are irrelevant per the spec.
@@ -1100,10 +1109,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
 			 */
 			if (hw->fc.requested_mode == e1000_fc_full) {
 				hw->fc.current_mode = e1000_fc_full;
-				hw_dbg(hw, "Flow Control = FULL.\r\n");
+				e_dbg("Flow Control = FULL.\r\n");
 			} else {
 				hw->fc.current_mode = e1000_fc_rx_pause;
-				hw_dbg(hw, "Flow Control = "
+				e_dbg("Flow Control = "
 					 "RX PAUSE frames only.\r\n");
 			}
 		}
@@ -1114,14 +1123,13 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
 		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
 		 *-------|---------|-------|---------|--------------------
 		 *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
-		 *
 		 */
 		else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
 			  (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
 			  (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
 			  (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
 			hw->fc.current_mode = e1000_fc_tx_pause;
-			hw_dbg(hw, "Flow Control = Tx PAUSE frames only.\r\n");
+			e_dbg("Flow Control = Tx PAUSE frames only.\r\n");
 		}
 		/*
 		 * For transmitting PAUSE frames ONLY.
@@ -1130,21 +1138,20 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
 		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
 		 *-------|---------|-------|---------|--------------------
 		 *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
-		 *
 		 */
 		else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
 			 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
 			 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
 			 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
 			hw->fc.current_mode = e1000_fc_rx_pause;
-			hw_dbg(hw, "Flow Control = Rx PAUSE frames only.\r\n");
+			e_dbg("Flow Control = Rx PAUSE frames only.\r\n");
 		} else {
 			/*
 			 * Per the IEEE spec, at this point flow control
 			 * should be disabled.
 			 */
 			hw->fc.current_mode = e1000_fc_none;
-			hw_dbg(hw, "Flow Control = NONE.\r\n");
+			e_dbg("Flow Control = NONE.\r\n");
 		}
 
 		/*
@@ -1154,7 +1161,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
 		 */
 		ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
 		if (ret_val) {
-			hw_dbg(hw, "Error getting link speed and duplex\n");
+			e_dbg("Error getting link speed and duplex\n");
 			return ret_val;
 		}
 
@@ -1167,7 +1174,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
 		 */
 		ret_val = e1000e_force_mac_fc(hw);
 		if (ret_val) {
-			hw_dbg(hw, "Error forcing flow control settings\n");
+			e_dbg("Error forcing flow control settings\n");
 			return ret_val;
 		}
 	}
@@ -1191,21 +1198,21 @@ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *dup
 	status = er32(STATUS);
 	if (status & E1000_STATUS_SPEED_1000) {
 		*speed = SPEED_1000;
-		hw_dbg(hw, "1000 Mbs, ");
+		e_dbg("1000 Mbs, ");
 	} else if (status & E1000_STATUS_SPEED_100) {
 		*speed = SPEED_100;
-		hw_dbg(hw, "100 Mbs, ");
+		e_dbg("100 Mbs, ");
 	} else {
 		*speed = SPEED_10;
-		hw_dbg(hw, "10 Mbs, ");
+		e_dbg("10 Mbs, ");
 	}
 
 	if (status & E1000_STATUS_FD) {
 		*duplex = FULL_DUPLEX;
-		hw_dbg(hw, "Full Duplex\n");
+		e_dbg("Full Duplex\n");
 	} else {
 		*duplex = HALF_DUPLEX;
-		hw_dbg(hw, "Half Duplex\n");
+		e_dbg("Half Duplex\n");
 	}
 
 	return 0;
@@ -1251,7 +1258,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
 	}
 
 	if (i == timeout) {
-		hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n");
+		e_dbg("Driver can't access device - SMBI bit is set.\n");
 		return -E1000_ERR_NVM;
 	}
 
@@ -1270,7 +1277,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
 	if (i == timeout) {
 		/* Release semaphores */
 		e1000e_put_hw_semaphore(hw);
-		hw_dbg(hw, "Driver can't access the NVM\n");
+		e_dbg("Driver can't access the NVM\n");
 		return -E1000_ERR_NVM;
 	}
 
@@ -1310,7 +1317,7 @@ s32 e1000e_get_auto_rd_done(struct e1000_hw *hw)
 	}
 
 	if (i == AUTO_READ_DONE_TIMEOUT) {
-		hw_dbg(hw, "Auto read by HW from NVM has not completed.\n");
+		e_dbg("Auto read by HW from NVM has not completed.\n");
 		return -E1000_ERR_RESET;
 	}
 
@@ -1331,7 +1338,7 @@ s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data)
 
 	ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
 	if (ret_val) {
-		hw_dbg(hw, "NVM Read Error\n");
+		e_dbg("NVM Read Error\n");
 		return ret_val;
 	}
 
@@ -1585,7 +1592,7 @@ s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
 	}
 
 	if (!timeout) {
-		hw_dbg(hw, "Master requests are pending.\n");
+		e_dbg("Master requests are pending.\n");
 		return -E1000_ERR_MASTER_REQUESTS_PENDING;
 	}
 
@@ -1608,7 +1615,7 @@ void e1000e_reset_adaptive(struct e1000_hw *hw)
 	mac->ifs_step_size = IFS_STEP;
 	mac->ifs_ratio = IFS_RATIO;
 
-	mac->in_ifs_mode = 0;
+	mac->in_ifs_mode = false;
 	ew32(AIT, 0);
 }
 
@@ -1625,7 +1632,7 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
 
 	if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
 		if (mac->tx_packet_delta > MIN_NUM_XMITS) {
-			mac->in_ifs_mode = 1;
+			mac->in_ifs_mode = true;
 			if (mac->current_ifs_val < mac->ifs_max_val) {
 				if (!mac->current_ifs_val)
 					mac->current_ifs_val = mac->ifs_min_val;
@@ -1639,7 +1646,7 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
 		if (mac->in_ifs_mode &&
 		    (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
 			mac->current_ifs_val = 0;
-			mac->in_ifs_mode = 0;
+			mac->in_ifs_mode = false;
 			ew32(AIT, 0);
 		}
 	}
@@ -1809,7 +1816,7 @@ s32 e1000e_acquire_nvm(struct e1000_hw *hw)
 	if (!timeout) {
 		eecd &= ~E1000_EECD_REQ;
 		ew32(EECD, eecd);
-		hw_dbg(hw, "Could not acquire NVM grant\n");
+		e_dbg("Could not acquire NVM grant\n");
 		return -E1000_ERR_NVM;
 	}
 
@@ -1914,7 +1921,7 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
 		}
 
 		if (!timeout) {
-			hw_dbg(hw, "SPI NVM Status error\n");
+			e_dbg("SPI NVM Status error\n");
 			return -E1000_ERR_NVM;
 		}
 	}
@@ -1943,7 +1950,7 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
 	 */
 	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
 	    (words == 0)) {
-		hw_dbg(hw, "nvm parameter(s) out of bounds\n");
+		e_dbg("nvm parameter(s) out of bounds\n");
 		return -E1000_ERR_NVM;
 	}
 
@@ -1986,11 +1993,11 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
 	 */
 	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
 	    (words == 0)) {
-		hw_dbg(hw, "nvm parameter(s) out of bounds\n");
+		e_dbg("nvm parameter(s) out of bounds\n");
 		return -E1000_ERR_NVM;
 	}
 
-	ret_val = nvm->ops.acquire_nvm(hw);
+	ret_val = nvm->ops.acquire(hw);
 	if (ret_val)
 		return ret_val;
 
@@ -2001,7 +2008,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
 
 		ret_val = e1000_ready_nvm_eeprom(hw);
 		if (ret_val) {
-			nvm->ops.release_nvm(hw);
+			nvm->ops.release(hw);
 			return ret_val;
 		}
 
@@ -2040,7 +2047,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
 	}
 
 	msleep(10);
-	nvm->ops.release_nvm(hw);
+	nvm->ops.release(hw);
 	return 0;
 }
 
@@ -2066,7 +2073,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw)
 		ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
 					 &mac_addr_offset);
 		if (ret_val) {
-			hw_dbg(hw, "NVM Read Error\n");
+			e_dbg("NVM Read Error\n");
 			return ret_val;
 		}
 		if (mac_addr_offset == 0xFFFF)
@@ -2081,7 +2088,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw)
 			ret_val = e1000_read_nvm(hw, mac_addr_offset, 1,
 						 &nvm_data);
 			if (ret_val) {
-				hw_dbg(hw, "NVM Read Error\n");
+				e_dbg("NVM Read Error\n");
 				return ret_val;
 			}
 			if (nvm_data & 0x0001)
@@ -2096,7 +2103,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw)
 		offset = mac_addr_offset + (i >> 1);
 		ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
 		if (ret_val) {
-			hw_dbg(hw, "NVM Read Error\n");
+			e_dbg("NVM Read Error\n");
 			return ret_val;
 		}
 		hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
@@ -2129,14 +2136,14 @@ s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
 	for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
 		ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
 		if (ret_val) {
-			hw_dbg(hw, "NVM Read Error\n");
+			e_dbg("NVM Read Error\n");
 			return ret_val;
 		}
 		checksum += nvm_data;
 	}
 
 	if (checksum != (u16) NVM_SUM) {
-		hw_dbg(hw, "NVM Checksum Invalid\n");
+		e_dbg("NVM Checksum Invalid\n");
 		return -E1000_ERR_NVM;
 	}
 
@@ -2160,7 +2167,7 @@ s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
 	for (i = 0; i < NVM_CHECKSUM_REG; i++) {
 		ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
 		if (ret_val) {
-			hw_dbg(hw, "NVM Read Error while updating checksum.\n");
+			e_dbg("NVM Read Error while updating checksum.\n");
 			return ret_val;
 		}
 		checksum += nvm_data;
@@ -2168,7 +2175,7 @@ s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
 	checksum = (u16) NVM_SUM - checksum;
 	ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
 	if (ret_val)
-		hw_dbg(hw, "NVM Write Error while updating checksum.\n");
+		e_dbg("NVM Write Error while updating checksum.\n");
 
 	return ret_val;
 }
@@ -2231,7 +2238,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
 	/* Check that the host interface is enabled. */
 	hicr = er32(HICR);
 	if ((hicr & E1000_HICR_EN) == 0) {
-		hw_dbg(hw, "E1000_HOST_EN bit disabled.\n");
+		e_dbg("E1000_HOST_EN bit disabled.\n");
 		return -E1000_ERR_HOST_INTERFACE_COMMAND;
 	}
 	/* check the previous command is completed */
@@ -2243,7 +2250,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
 	}
 
 	if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
-		hw_dbg(hw, "Previous command timeout failed .\n");
+		e_dbg("Previous command timeout failed .\n");
 		return -E1000_ERR_HOST_INTERFACE_COMMAND;
 	}
 
@@ -2282,7 +2289,7 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
 
 	/* No manageability, no filtering */
 	if (!e1000e_check_mng_mode(hw)) {
-		hw->mac.tx_pkt_filtering = 0;
+		hw->mac.tx_pkt_filtering = false;
 		return 0;
 	}
 
@@ -2292,7 +2299,7 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
 	 */
 	ret_val = e1000_mng_enable_host_if(hw);
 	if (ret_val != 0) {
-		hw->mac.tx_pkt_filtering = 0;
+		hw->mac.tx_pkt_filtering = false;
 		return ret_val;
 	}
 
@@ -2311,17 +2318,17 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
 	 * take the safe route of assuming Tx filtering is enabled.
 	 */
 	if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
-		hw->mac.tx_pkt_filtering = 1;
+		hw->mac.tx_pkt_filtering = true;
 		return 1;
 	}
 
 	/* Cookie area is valid, make the final check for filtering. */
 	if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
-		hw->mac.tx_pkt_filtering = 0;
+		hw->mac.tx_pkt_filtering = false;
 		return 0;
 	}
 
-	hw->mac.tx_pkt_filtering = 1;
+	hw->mac.tx_pkt_filtering = true;
 	return 1;
 }
 
@@ -2353,7 +2360,7 @@ static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
 }
 
 /**
- *  e1000_mng_host_if_write - Writes to the manageability host interface
+ *  e1000_mng_host_if_write - Write to the manageability host interface
  *  @hw: pointer to the HW structure
  *  @buffer: pointer to the host interface buffer
  *  @length: size of the buffer
@@ -2478,7 +2485,7 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
 {
 	u32 manc;
 	u32 fwsm, factps;
-	bool ret_val = 0;
+	bool ret_val = false;
 
 	manc = er32(MANC);
 
@@ -2493,13 +2500,13 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
 		if (!(factps & E1000_FACTPS_MNGCG) &&
 		    ((fwsm & E1000_FWSM_MODE_MASK) ==
 		     (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
-			ret_val = 1;
+			ret_val = true;
 			return ret_val;
 		}
 	} else {
 		if ((manc & E1000_MANC_SMBUS_EN) &&
 		    !(manc & E1000_MANC_ASF_EN)) {
-			ret_val = 1;
+			ret_val = true;
 			return ret_val;
 		}
 	}
@@ -2514,14 +2521,14 @@ s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num)
 
 	ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
 	if (ret_val) {
-		hw_dbg(hw, "NVM Read Error\n");
+		e_dbg("NVM Read Error\n");
 		return ret_val;
 	}
 	*pba_num = (u32)(nvm_data << 16);
 
 	ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
 	if (ret_val) {
-		hw_dbg(hw, "NVM Read Error\n");
+		e_dbg("NVM Read Error\n");
 		return ret_val;
 	}
 	*pba_num |= nvm_data;
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index fad8f9ea0043..c3105c5087e0 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2008 Intel Corporation.
+  Copyright(c) 1999 - 2009 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -65,17 +65,6 @@ static const struct e1000_info *e1000_info_tbl[] = {
 	[board_pchlan]		= &e1000_pch_info,
 };
 
-#ifdef DEBUG
-/**
- * e1000_get_hw_dev_name - return device name string
- * used by hardware layer to print debugging information
- **/
-char *e1000e_get_hw_dev_name(struct e1000_hw *hw)
-{
-	return hw->adapter->netdev->name;
-}
-#endif
-
 /**
  * e1000_desc_unused - calculate if we have unused descriptors
  **/
@@ -167,7 +156,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 	struct e1000_buffer *buffer_info;
 	struct sk_buff *skb;
 	unsigned int i;
-	unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
+	unsigned int bufsz = adapter->rx_buffer_len;
 
 	i = rx_ring->next_to_use;
 	buffer_info = &rx_ring->buffer_info[i];
@@ -179,20 +168,13 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 			goto map_skb;
 		}
 
-		skb = netdev_alloc_skb(netdev, bufsz);
+		skb = netdev_alloc_skb_ip_align(netdev, bufsz);
 		if (!skb) {
 			/* Better luck next round */
 			adapter->alloc_rx_buff_failed++;
 			break;
 		}
 
-		/*
-		 * Make buffer alignment 2 beyond a 16 byte boundary
-		 * this will result in a 16 byte aligned IP header after
-		 * the 14 byte MAC header is removed
-		 */
-		skb_reserve(skb, NET_IP_ALIGN);
-
 		buffer_info->skb = skb;
 map_skb:
 		buffer_info->dma = pci_map_single(pdev, skb->data,
@@ -284,21 +266,14 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
 			     cpu_to_le64(ps_page->dma);
 		}
 
-		skb = netdev_alloc_skb(netdev,
-				       adapter->rx_ps_bsize0 + NET_IP_ALIGN);
+		skb = netdev_alloc_skb_ip_align(netdev,
+						adapter->rx_ps_bsize0);
 
 		if (!skb) {
 			adapter->alloc_rx_buff_failed++;
 			break;
 		}
 
-		/*
-		 * Make buffer alignment 2 beyond a 16 byte boundary
-		 * this will result in a 16 byte aligned IP header after
-		 * the 14 byte MAC header is removed
-		 */
-		skb_reserve(skb, NET_IP_ALIGN);
-
 		buffer_info->skb = skb;
 		buffer_info->dma = pci_map_single(pdev, skb->data,
 						  adapter->rx_ps_bsize0,
@@ -359,9 +334,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
 	struct e1000_buffer *buffer_info;
 	struct sk_buff *skb;
 	unsigned int i;
-	unsigned int bufsz = 256 -
-	                     16 /* for skb_reserve */ -
-	                     NET_IP_ALIGN;
+	unsigned int bufsz = 256 - 16 /* for skb_reserve */;
 
 	i = rx_ring->next_to_use;
 	buffer_info = &rx_ring->buffer_info[i];
@@ -373,19 +346,13 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
 			goto check_page;
 		}
 
-		skb = netdev_alloc_skb(netdev, bufsz);
+		skb = netdev_alloc_skb_ip_align(netdev, bufsz);
 		if (unlikely(!skb)) {
 			/* Better luck next round */
 			adapter->alloc_rx_buff_failed++;
 			break;
 		}
 
-		/* Make buffer alignment 2 beyond a 16 byte boundary
-		 * this will result in a 16 byte aligned IP header after
-		 * the 14 byte MAC header is removed
-		 */
-		skb_reserve(skb, NET_IP_ALIGN);
-
 		buffer_info->skb = skb;
 check_page:
 		/* allocate a new page if necessary */
@@ -437,6 +404,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 {
 	struct net_device *netdev = adapter->netdev;
 	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_hw *hw = &adapter->hw;
 	struct e1000_ring *rx_ring = adapter->rx_ring;
 	struct e1000_rx_desc *rx_desc, *next_rxd;
 	struct e1000_buffer *buffer_info, *next_buffer;
@@ -486,8 +454,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 		 * packet, also make sure the frame isn't just CRC only */
 		if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {
 			/* All receives must fit into a single buffer */
-			e_dbg("%s: Receive packet consumed multiple buffers\n",
-			      netdev->name);
+			e_dbg("Receive packet consumed multiple buffers\n");
 			/* recycle */
 			buffer_info->skb = skb;
 			goto next_desc;
@@ -513,9 +480,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 		 */
 		if (length < copybreak) {
 			struct sk_buff *new_skb =
-			    netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
+			    netdev_alloc_skb_ip_align(netdev, length);
 			if (new_skb) {
-				skb_reserve(new_skb, NET_IP_ALIGN);
 				skb_copy_to_linear_data_offset(new_skb,
 							       -NET_IP_ALIGN,
 							       (skb->data -
@@ -560,33 +526,52 @@ next_desc:
 
 	adapter->total_rx_bytes += total_rx_bytes;
 	adapter->total_rx_packets += total_rx_packets;
-	adapter->net_stats.rx_bytes += total_rx_bytes;
-	adapter->net_stats.rx_packets += total_rx_packets;
+	netdev->stats.rx_bytes += total_rx_bytes;
+	netdev->stats.rx_packets += total_rx_packets;
 	return cleaned;
 }
 
 static void e1000_put_txbuf(struct e1000_adapter *adapter,
 			     struct e1000_buffer *buffer_info)
 {
-	buffer_info->dma = 0;
+	if (buffer_info->dma) {
+		if (buffer_info->mapped_as_page)
+			pci_unmap_page(adapter->pdev, buffer_info->dma,
+				       buffer_info->length, PCI_DMA_TODEVICE);
+		else
+			pci_unmap_single(adapter->pdev,	buffer_info->dma,
+					 buffer_info->length,
+					 PCI_DMA_TODEVICE);
+		buffer_info->dma = 0;
+	}
 	if (buffer_info->skb) {
-		skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
-		              DMA_TO_DEVICE);
 		dev_kfree_skb_any(buffer_info->skb);
 		buffer_info->skb = NULL;
 	}
 	buffer_info->time_stamp = 0;
 }
 
-static void e1000_print_tx_hang(struct e1000_adapter *adapter)
+static void e1000_print_hw_hang(struct work_struct *work)
 {
+	struct e1000_adapter *adapter = container_of(work,
+	                                             struct e1000_adapter,
+	                                             print_hang_task);
 	struct e1000_ring *tx_ring = adapter->tx_ring;
 	unsigned int i = tx_ring->next_to_clean;
 	unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
 	struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
+	struct e1000_hw *hw = &adapter->hw;
+	u16 phy_status, phy_1000t_status, phy_ext_status;
+	u16 pci_status;
 
-	/* detected Tx unit hang */
-	e_err("Detected Tx Unit Hang:\n"
+	e1e_rphy(hw, PHY_STATUS, &phy_status);
+	e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
+	e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
+
+	pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
+
+	/* detected Hardware unit hang */
+	e_err("Detected Hardware Unit Hang:\n"
 	      "  TDH                  <%x>\n"
 	      "  TDT                  <%x>\n"
 	      "  next_to_use          <%x>\n"
@@ -595,7 +580,12 @@ static void e1000_print_tx_hang(struct e1000_adapter *adapter)
 	      "  time_stamp           <%lx>\n"
 	      "  next_to_watch        <%x>\n"
 	      "  jiffies              <%lx>\n"
-	      "  next_to_watch.status <%x>\n",
+	      "  next_to_watch.status <%x>\n"
+	      "MAC Status             <%x>\n"
+	      "PHY Status             <%x>\n"
+	      "PHY 1000BASE-T Status  <%x>\n"
+	      "PHY Extended Status    <%x>\n"
+	      "PCI Status             <%x>\n",
 	      readl(adapter->hw.hw_addr + tx_ring->head),
 	      readl(adapter->hw.hw_addr + tx_ring->tail),
 	      tx_ring->next_to_use,
@@ -603,7 +593,12 @@ static void e1000_print_tx_hang(struct e1000_adapter *adapter)
 	      tx_ring->buffer_info[eop].time_stamp,
 	      eop,
 	      jiffies,
-	      eop_desc->upper.fields.status);
+	      eop_desc->upper.fields.status,
+	      er32(STATUS),
+	      phy_status,
+	      phy_1000t_status,
+	      phy_ext_status,
+	      pci_status);
 }
 
 /**
@@ -677,21 +672,23 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
 	}
 
 	if (adapter->detect_tx_hung) {
-		/* Detect a transmit hang in hardware, this serializes the
-		 * check with the clearing of time_stamp and movement of i */
+		/*
+		 * Detect a transmit hang in hardware, this serializes the
+		 * check with the clearing of time_stamp and movement of i
+		 */
 		adapter->detect_tx_hung = 0;
 		if (tx_ring->buffer_info[i].time_stamp &&
 		    time_after(jiffies, tx_ring->buffer_info[i].time_stamp
-			       + (adapter->tx_timeout_factor * HZ))
-		    && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
-			e1000_print_tx_hang(adapter);
+			       + (adapter->tx_timeout_factor * HZ)) &&
+		    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
+			schedule_work(&adapter->print_hang_task);
 			netif_stop_queue(netdev);
 		}
 	}
 	adapter->total_tx_bytes += total_tx_bytes;
 	adapter->total_tx_packets += total_tx_packets;
-	adapter->net_stats.tx_bytes += total_tx_bytes;
-	adapter->net_stats.tx_packets += total_tx_packets;
+	netdev->stats.tx_bytes += total_tx_bytes;
+	netdev->stats.tx_packets += total_tx_packets;
 	return (count < tx_ring->count);
 }
 
@@ -705,6 +702,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
 static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
 				  int *work_done, int work_to_do)
 {
+	struct e1000_hw *hw = &adapter->hw;
 	union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
 	struct net_device *netdev = adapter->netdev;
 	struct pci_dev *pdev = adapter->pdev;
@@ -748,8 +746,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
 		buffer_info->dma = 0;
 
 		if (!(staterr & E1000_RXD_STAT_EOP)) {
-			e_dbg("%s: Packet Split buffers didn't pick up the "
-			      "full packet\n", netdev->name);
+			e_dbg("Packet Split buffers didn't pick up the full "
+			      "packet\n");
 			dev_kfree_skb_irq(skb);
 			goto next_desc;
 		}
@@ -762,8 +760,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
 		length = le16_to_cpu(rx_desc->wb.middle.length0);
 
 		if (!length) {
-			e_dbg("%s: Last part of the packet spanning multiple "
-			      "descriptors\n", netdev->name);
+			e_dbg("Last part of the packet spanning multiple "
+			      "descriptors\n");
 			dev_kfree_skb_irq(skb);
 			goto next_desc;
 		}
@@ -871,8 +869,8 @@ next_desc:
 
 	adapter->total_rx_bytes += total_rx_bytes;
 	adapter->total_rx_packets += total_rx_packets;
-	adapter->net_stats.rx_bytes += total_rx_bytes;
-	adapter->net_stats.rx_packets += total_rx_packets;
+	netdev->stats.rx_bytes += total_rx_bytes;
+	netdev->stats.rx_packets += total_rx_packets;
 	return cleaned;
 }
 
@@ -1051,8 +1049,8 @@ next_desc:
 
 	adapter->total_rx_bytes += total_rx_bytes;
 	adapter->total_rx_packets += total_rx_packets;
-	adapter->net_stats.rx_bytes += total_rx_bytes;
-	adapter->net_stats.rx_packets += total_rx_packets;
+	netdev->stats.rx_bytes += total_rx_bytes;
+	netdev->stats.rx_packets += total_rx_packets;
 	return cleaned;
 }
 
@@ -1199,7 +1197,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
 	struct e1000_hw *hw = &adapter->hw;
 	u32 rctl, icr = er32(ICR);
 
-	if (!icr)
+	if (!icr || test_bit(__E1000_DOWN, &adapter->state))
 		return IRQ_NONE;  /* Not our interrupt */
 
 	/*
@@ -1481,7 +1479,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
 	else
 		memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
 	err = request_irq(adapter->msix_entries[vector].vector,
-			  &e1000_intr_msix_rx, 0, adapter->rx_ring->name,
+			  e1000_intr_msix_rx, 0, adapter->rx_ring->name,
 			  netdev);
 	if (err)
 		goto out;
@@ -1494,7 +1492,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
 	else
 		memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
 	err = request_irq(adapter->msix_entries[vector].vector,
-			  &e1000_intr_msix_tx, 0, adapter->tx_ring->name,
+			  e1000_intr_msix_tx, 0, adapter->tx_ring->name,
 			  netdev);
 	if (err)
 		goto out;
@@ -1503,7 +1501,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
 	vector++;
 
 	err = request_irq(adapter->msix_entries[vector].vector,
-			  &e1000_msix_other, 0, netdev->name, netdev);
+			  e1000_msix_other, 0, netdev->name, netdev);
 	if (err)
 		goto out;
 
@@ -1534,7 +1532,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
 		e1000e_set_interrupt_capability(adapter);
 	}
 	if (adapter->flags & FLAG_MSI_ENABLED) {
-		err = request_irq(adapter->pdev->irq, &e1000_intr_msi, 0,
+		err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
 				  netdev->name, netdev);
 		if (!err)
 			return err;
@@ -1544,7 +1542,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
 		adapter->int_mode = E1000E_INT_MODE_LEGACY;
 	}
 
-	err = request_irq(adapter->pdev->irq, &e1000_intr, IRQF_SHARED,
+	err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
 			  netdev->name, netdev);
 	if (err)
 		e_err("Unable to allocate interrupt, Error: %d\n", err);
@@ -2040,11 +2038,14 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
 	    (vid == adapter->mng_vlan_id))
 		return;
+
 	/* add VID to filter table */
-	index = (vid >> 5) & 0x7F;
-	vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
-	vfta |= (1 << (vid & 0x1F));
-	e1000e_write_vfta(hw, index, vfta);
+	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
+		index = (vid >> 5) & 0x7F;
+		vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
+		vfta |= (1 << (vid & 0x1F));
+		hw->mac.ops.write_vfta(hw, index, vfta);
+	}
 }
 
 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
@@ -2069,10 +2070,12 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 	}
 
 	/* remove VID from filter table */
-	index = (vid >> 5) & 0x7F;
-	vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
-	vfta &= ~(1 << (vid & 0x1F));
-	e1000e_write_vfta(hw, index, vfta);
+	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
+		index = (vid >> 5) & 0x7F;
+		vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
+		vfta &= ~(1 << (vid & 0x1F));
+		hw->mac.ops.write_vfta(hw, index, vfta);
+	}
 }
 
 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
@@ -2464,8 +2467,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
 		ew32(ITR, 1000000000 / (adapter->itr * 256));
 
 	ctrl_ext = er32(CTRL_EXT);
-	/* Reset delay timers after every interrupt */
-	ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
 	/* Auto-Mask interrupts upon ICR access */
 	ctrl_ext |= E1000_CTRL_EXT_IAME;
 	ew32(IAM, 0xffffffff);
@@ -2507,21 +2508,23 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
 	 * packet size is equal or larger than the specified value (in 8 byte
 	 * units), e.g. using jumbo frames when setting to E1000_ERT_2048
 	 */
-	if ((adapter->flags & FLAG_HAS_ERT) &&
-	    (adapter->netdev->mtu > ETH_DATA_LEN)) {
-		u32 rxdctl = er32(RXDCTL(0));
-		ew32(RXDCTL(0), rxdctl | 0x3);
-		ew32(ERT, E1000_ERT_2048 | (1 << 13));
-		/*
-		 * With jumbo frames and early-receive enabled, excessive
-		 * C4->C2 latencies result in dropped transactions.
-		 */
-		pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
-					  e1000e_driver_name, 55);
-	} else {
-		pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
-					  e1000e_driver_name,
-					  PM_QOS_DEFAULT_VALUE);
+	if (adapter->flags & FLAG_HAS_ERT) {
+		if (adapter->netdev->mtu > ETH_DATA_LEN) {
+			u32 rxdctl = er32(RXDCTL(0));
+			ew32(RXDCTL(0), rxdctl | 0x3);
+			ew32(ERT, E1000_ERT_2048 | (1 << 13));
+			/*
+			 * With jumbo frames and early-receive enabled,
+			 * excessive C-state transition latencies result in
+			 * dropped transactions.
+			 */
+			pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
+						  adapter->netdev->name, 55);
+		} else {
+			pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
+						  adapter->netdev->name,
+						  PM_QOS_DEFAULT_VALUE);
+		}
 	}
 
 	/* Enable Receives */
@@ -2645,18 +2648,8 @@ static void e1000_configure(struct e1000_adapter *adapter)
  **/
 void e1000e_power_up_phy(struct e1000_adapter *adapter)
 {
-	u16 mii_reg = 0;
-
-	/* Just clear the power down bit to wake the phy back up */
-	if (adapter->hw.phy.media_type == e1000_media_type_copper) {
-		/*
-		 * According to the manual, the phy will retain its
-		 * settings across a power-down/up cycle
-		 */
-		e1e_rphy(&adapter->hw, PHY_CONTROL, &mii_reg);
-		mii_reg &= ~MII_CR_POWER_DOWN;
-		e1e_wphy(&adapter->hw, PHY_CONTROL, mii_reg);
-	}
+	if (adapter->hw.phy.ops.power_up)
+		adapter->hw.phy.ops.power_up(&adapter->hw);
 
 	adapter->hw.mac.ops.setup_link(&adapter->hw);
 }
@@ -2664,35 +2657,17 @@ void e1000e_power_up_phy(struct e1000_adapter *adapter)
 /**
  * e1000_power_down_phy - Power down the PHY
  *
- * Power down the PHY so no link is implied when interface is down
- * The PHY cannot be powered down is management or WoL is active
+ * Power down the PHY so no link is implied when interface is down.
+ * The PHY cannot be powered down if management or WoL is active.
  */
 static void e1000_power_down_phy(struct e1000_adapter *adapter)
 {
-	struct e1000_hw *hw = &adapter->hw;
-	u16 mii_reg;
-
 	/* WoL is enabled */
 	if (adapter->wol)
 		return;
 
-	/* non-copper PHY? */
-	if (adapter->hw.phy.media_type != e1000_media_type_copper)
-		return;
-
-	/* reset is blocked because of a SoL/IDER session */
-	if (e1000e_check_mng_mode(hw) || e1000_check_reset_block(hw))
-		return;
-
-	/* manageability (AMT) is enabled */
-	if (er32(MANC) & E1000_MANC_SMBUS_EN)
-		return;
-
-	/* power down the PHY */
-	e1e_rphy(hw, PHY_CONTROL, &mii_reg);
-	mii_reg |= MII_CR_POWER_DOWN;
-	e1e_wphy(hw, PHY_CONTROL, mii_reg);
-	mdelay(1);
+	if (adapter->hw.phy.ops.power_down)
+		adapter->hw.phy.ops.power_down(&adapter->hw);
 }
 
 /**
@@ -2856,6 +2831,12 @@ int e1000e_up(struct e1000_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
 
+	/* DMA latency requirement to workaround early-receive/jumbo issue */
+	if (adapter->flags & FLAG_HAS_ERT)
+		pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY,
+		                       adapter->netdev->name,
+				       PM_QOS_DEFAULT_VALUE);
+
 	/* hardware has been reset, we need to reload some things */
 	e1000_configure(adapter);
 
@@ -2916,6 +2897,10 @@ void e1000e_down(struct e1000_adapter *adapter)
 	e1000_clean_tx_ring(adapter);
 	e1000_clean_rx_ring(adapter);
 
+	if (adapter->flags & FLAG_HAS_ERT)
+		pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY,
+		                          adapter->netdev->name);
+
 	/*
 	 * TODO: for power management, we could drop the link and
 	 * pci_disable_device here.
@@ -2973,7 +2958,7 @@ static irqreturn_t e1000_intr_msi_test(int irq, void *data)
 	struct e1000_hw *hw = &adapter->hw;
 	u32 icr = er32(ICR);
 
-	e_dbg("%s: icr is %08X\n", netdev->name, icr);
+	e_dbg("icr is %08X\n", icr);
 	if (icr & E1000_ICR_RXSEQ) {
 		adapter->flags &= ~FLAG_MSI_TEST_FAILED;
 		wmb();
@@ -3010,7 +2995,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
 	if (err)
 		goto msi_test_failed;
 
-	err = request_irq(adapter->pdev->irq, &e1000_intr_msi_test, 0,
+	err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
 			  netdev->name, netdev);
 	if (err) {
 		pci_disable_msi(adapter->pdev);
@@ -3043,7 +3028,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
 		goto msi_test_failed;
 
 	/* okay so the test worked, restore settings */
-	e_dbg("%s: MSI interrupt test succeeded!\n", netdev->name);
+	e_dbg("MSI interrupt test succeeded!\n");
 msi_test_failed:
 	e1000e_set_interrupt_capability(adapter);
 	e1000_request_irq(adapter);
@@ -3304,6 +3289,7 @@ static void e1000_update_phy_info(unsigned long data)
  **/
 void e1000e_update_stats(struct e1000_adapter *adapter)
 {
+	struct net_device *netdev = adapter->netdev;
 	struct e1000_hw *hw = &adapter->hw;
 	struct pci_dev *pdev = adapter->pdev;
 	u16 phy_data;
@@ -3398,8 +3384,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
 	adapter->stats.tsctfc += er32(TSCTFC);
 
 	/* Fill out the OS statistics structure */
-	adapter->net_stats.multicast = adapter->stats.mprc;
-	adapter->net_stats.collisions = adapter->stats.colc;
+	netdev->stats.multicast = adapter->stats.mprc;
+	netdev->stats.collisions = adapter->stats.colc;
 
 	/* Rx Errors */
 
@@ -3407,22 +3393,22 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
 	 * RLEC on some newer hardware can be incorrect so build
 	 * our own version based on RUC and ROC
 	 */
-	adapter->net_stats.rx_errors = adapter->stats.rxerrc +
+	netdev->stats.rx_errors = adapter->stats.rxerrc +
 		adapter->stats.crcerrs + adapter->stats.algnerrc +
 		adapter->stats.ruc + adapter->stats.roc +
 		adapter->stats.cexterr;
-	adapter->net_stats.rx_length_errors = adapter->stats.ruc +
+	netdev->stats.rx_length_errors = adapter->stats.ruc +
 					      adapter->stats.roc;
-	adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
-	adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
-	adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
+	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
+	netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
+	netdev->stats.rx_missed_errors = adapter->stats.mpc;
 
 	/* Tx Errors */
-	adapter->net_stats.tx_errors = adapter->stats.ecol +
+	netdev->stats.tx_errors = adapter->stats.ecol +
 				       adapter->stats.latecol;
-	adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
-	adapter->net_stats.tx_window_errors = adapter->stats.latecol;
-	adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
+	netdev->stats.tx_aborted_errors = adapter->stats.ecol;
+	netdev->stats.tx_window_errors = adapter->stats.latecol;
+	netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
 
 	/* Tx Dropped needs to be maintained elsewhere */
 
@@ -3776,68 +3762,64 @@ static int e1000_tso(struct e1000_adapter *adapter,
 	u8 ipcss, ipcso, tucss, tucso, hdr_len;
 	int err;
 
-	if (skb_is_gso(skb)) {
-		if (skb_header_cloned(skb)) {
-			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
-			if (err)
-				return err;
-		}
+	if (!skb_is_gso(skb))
+		return 0;
 
-		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
-		mss = skb_shinfo(skb)->gso_size;
-		if (skb->protocol == htons(ETH_P_IP)) {
-			struct iphdr *iph = ip_hdr(skb);
-			iph->tot_len = 0;
-			iph->check = 0;
-			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
-								 iph->daddr, 0,
-								 IPPROTO_TCP,
-								 0);
-			cmd_length = E1000_TXD_CMD_IP;
-			ipcse = skb_transport_offset(skb) - 1;
-		} else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
-			ipv6_hdr(skb)->payload_len = 0;
-			tcp_hdr(skb)->check =
-				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-						 &ipv6_hdr(skb)->daddr,
-						 0, IPPROTO_TCP, 0);
-			ipcse = 0;
-		}
-		ipcss = skb_network_offset(skb);
-		ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
-		tucss = skb_transport_offset(skb);
-		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
-		tucse = 0;
+	if (skb_header_cloned(skb)) {
+		err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+		if (err)
+			return err;
+	}
 
-		cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
-			       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
+	hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+	mss = skb_shinfo(skb)->gso_size;
+	if (skb->protocol == htons(ETH_P_IP)) {
+		struct iphdr *iph = ip_hdr(skb);
+		iph->tot_len = 0;
+		iph->check = 0;
+		tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+		                                         0, IPPROTO_TCP, 0);
+		cmd_length = E1000_TXD_CMD_IP;
+		ipcse = skb_transport_offset(skb) - 1;
+	} else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
+		ipv6_hdr(skb)->payload_len = 0;
+		tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+		                                       &ipv6_hdr(skb)->daddr,
+		                                       0, IPPROTO_TCP, 0);
+		ipcse = 0;
+	}
+	ipcss = skb_network_offset(skb);
+	ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
+	tucss = skb_transport_offset(skb);
+	tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
+	tucse = 0;
 
-		i = tx_ring->next_to_use;
-		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
-		buffer_info = &tx_ring->buffer_info[i];
+	cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
+	               E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
 
-		context_desc->lower_setup.ip_fields.ipcss  = ipcss;
-		context_desc->lower_setup.ip_fields.ipcso  = ipcso;
-		context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
-		context_desc->upper_setup.tcp_fields.tucss = tucss;
-		context_desc->upper_setup.tcp_fields.tucso = tucso;
-		context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
-		context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
-		context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
-		context_desc->cmd_and_length = cpu_to_le32(cmd_length);
+	i = tx_ring->next_to_use;
+	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+	buffer_info = &tx_ring->buffer_info[i];
 
-		buffer_info->time_stamp = jiffies;
-		buffer_info->next_to_watch = i;
+	context_desc->lower_setup.ip_fields.ipcss  = ipcss;
+	context_desc->lower_setup.ip_fields.ipcso  = ipcso;
+	context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
+	context_desc->upper_setup.tcp_fields.tucss = tucss;
+	context_desc->upper_setup.tcp_fields.tucso = tucso;
+	context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
+	context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
+	context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
+	context_desc->cmd_and_length = cpu_to_le32(cmd_length);
 
-		i++;
-		if (i == tx_ring->count)
-			i = 0;
-		tx_ring->next_to_use = i;
+	buffer_info->time_stamp = jiffies;
+	buffer_info->next_to_watch = i;
 
-		return 1;
-	}
+	i++;
+	if (i == tx_ring->count)
+		i = 0;
+	tx_ring->next_to_use = i;
 
-	return 0;
+	return 1;
 }
 
 static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
@@ -3909,23 +3891,14 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
 			unsigned int mss)
 {
 	struct e1000_ring *tx_ring = adapter->tx_ring;
+	struct pci_dev *pdev = adapter->pdev;
 	struct e1000_buffer *buffer_info;
 	unsigned int len = skb_headlen(skb);
-	unsigned int offset, size, count = 0, i;
+	unsigned int offset = 0, size, count = 0, i;
 	unsigned int f;
-	dma_addr_t *map;
 
 	i = tx_ring->next_to_use;
 
-	if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
-		dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
-		adapter->tx_dma_failed++;
-		return 0;
-	}
-
-	map = skb_shinfo(skb)->dma_maps;
-	offset = 0;
-
 	while (len) {
 		buffer_info = &tx_ring->buffer_info[i];
 		size = min(len, max_per_txd);
@@ -3933,11 +3906,15 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
 		buffer_info->length = size;
 		buffer_info->time_stamp = jiffies;
 		buffer_info->next_to_watch = i;
-		buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
-		count++;
+		buffer_info->dma = pci_map_single(pdev,	skb->data + offset,
+						  size,	PCI_DMA_TODEVICE);
+		buffer_info->mapped_as_page = false;
+		if (pci_dma_mapping_error(pdev, buffer_info->dma))
+			goto dma_error;
 
 		len -= size;
 		offset += size;
+		count++;
 
 		if (len) {
 			i++;
@@ -3951,7 +3928,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
 
 		frag = &skb_shinfo(skb)->frags[f];
 		len = frag->size;
-		offset = 0;
+		offset = frag->page_offset;
 
 		while (len) {
 			i++;
@@ -3964,7 +3941,12 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
 			buffer_info->length = size;
 			buffer_info->time_stamp = jiffies;
 			buffer_info->next_to_watch = i;
-			buffer_info->dma = map[f] + offset;
+			buffer_info->dma = pci_map_page(pdev, frag->page,
+							offset, size,
+							PCI_DMA_TODEVICE);
+			buffer_info->mapped_as_page = true;
+			if (pci_dma_mapping_error(pdev, buffer_info->dma))
+				goto dma_error;
 
 			len -= size;
 			offset += size;
@@ -3976,6 +3958,22 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
 	tx_ring->buffer_info[first].next_to_watch = i;
 
 	return count;
+
+dma_error:
+	dev_err(&pdev->dev, "TX DMA map failed\n");
+	buffer_info->dma = 0;
+	count--;
+
+	while (count >= 0) {
+		count--;
+		i--;
+		if (i < 0)
+			i += tx_ring->count;
+		buffer_info = &tx_ring->buffer_info[i];
+		e1000_put_txbuf(adapter, buffer_info);;
+	}
+
+	return 0;
 }
 
 static void e1000_tx_queue(struct e1000_adapter *adapter,
@@ -4048,8 +4046,8 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
 	u16 length, offset;
 
 	if (vlan_tx_tag_present(skb)) {
-		if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id)
-		    && (adapter->hw.mng_cookie.status &
+		if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
+		    (adapter->hw.mng_cookie.status &
 			E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
 			return 0;
 	}
@@ -4271,10 +4269,8 @@ static void e1000_reset_task(struct work_struct *work)
  **/
 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
 {
-	struct e1000_adapter *adapter = netdev_priv(netdev);
-
 	/* only return the current stats */
-	return &adapter->net_stats;
+	return &netdev->stats;
 }
 
 /**
@@ -4362,6 +4358,8 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
 		data->phy_id = adapter->hw.phy.addr;
 		break;
 	case SIOCGMIIREG:
+		e1000_phy_read_status(adapter);
+
 		switch (data->reg_num & 0x1F) {
 		case MII_BMCR:
 			data->val_out = adapter->phy_regs.bmcr;
@@ -4469,7 +4467,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
 	e1e_wphy(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
 
 	/* activate PHY wakeup */
-	retval = hw->phy.ops.acquire_phy(hw);
+	retval = hw->phy.ops.acquire(hw);
 	if (retval) {
 		e_err("Could not acquire PHY\n");
 		return retval;
@@ -4486,7 +4484,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
 	if (retval)
 		e_err("Could not set PHY Host Wakeup bit\n");
 out:
-	hw->phy.ops.release_phy(hw);
+	hw->phy.ops.release(hw);
 
 	return retval;
 }
@@ -5160,6 +5158,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
 	INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
 	INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
 	INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
+	INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
 
 	/* Initialize link parameters. User can change them with ethtool */
 	adapter->hw.mac.autoneg = 1;
@@ -5283,19 +5282,24 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
 	del_timer_sync(&adapter->watchdog_timer);
 	del_timer_sync(&adapter->phy_info_timer);
 
+	cancel_work_sync(&adapter->reset_task);
+	cancel_work_sync(&adapter->watchdog_task);
+	cancel_work_sync(&adapter->downshift_task);
+	cancel_work_sync(&adapter->update_phy_task);
+	cancel_work_sync(&adapter->print_hang_task);
 	flush_scheduled_work();
 
+	if (!(netdev->flags & IFF_UP))
+		e1000_power_down_phy(adapter);
+
+	unregister_netdev(netdev);
+
 	/*
 	 * Release control of h/w to f/w.  If f/w is AMT enabled, this
 	 * would have already happened in close and is redundant.
 	 */
 	e1000_release_hw_control(adapter);
 
-	unregister_netdev(netdev);
-
-	if (!e1000_check_reset_block(&adapter->hw))
-		e1000_phy_hw_reset(&adapter->hw);
-
 	e1000e_reset_interrupt_capability(adapter);
 	kfree(adapter->tx_ring);
 	kfree(adapter->rx_ring);
@@ -5361,6 +5365,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
 
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
@@ -5414,12 +5419,10 @@ static int __init e1000_init_module(void)
 	int ret;
 	printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n",
 	       e1000e_driver_name, e1000e_driver_version);
-	printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n",
+	printk(KERN_INFO "%s: Copyright (c) 1999 - 2009 Intel Corporation.\n",
 	       e1000e_driver_name);
 	ret = pci_register_driver(&e1000_driver);
-	pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name,
-			       PM_QOS_DEFAULT_VALUE);
-				
+
 	return ret;
 }
 module_init(e1000_init_module);
@@ -5433,7 +5436,6 @@ module_init(e1000_init_module);
 static void __exit e1000_exit_module(void)
 {
 	pci_unregister_driver(&e1000_driver);
-	pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name);
 }
 module_exit(e1000_exit_module);
 
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index 1342e0b1815c..2e399778cae5 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2008 Intel Corporation.
+  Copyright(c) 1999 - 2009 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 85f955f70417..55a2c0acfee7 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2008 Intel Corporation.
+  Copyright(c) 1999 - 2009 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -44,6 +44,8 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
 /* Cable length tables */
 static const u16 e1000_m88_cable_length_table[] =
 	{ 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
+#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
+		ARRAY_SIZE(e1000_m88_cable_length_table)
 
 static const u16 e1000_igp_2_cable_length_table[] =
 	{ 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3,
@@ -130,7 +132,7 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
 	u16 phy_id;
 	u16 retry_count = 0;
 
-	if (!(phy->ops.read_phy_reg))
+	if (!(phy->ops.read_reg))
 		goto out;
 
 	while (retry_count < 2) {
@@ -151,29 +153,29 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
 			goto out;
 
 		/*
-		 * If the PHY ID is still unknown, we may have an 82577i
-		 * without link.  We will try again after setting Slow
-		 * MDIC mode. No harm in trying again in this case since
-		 * the PHY ID is unknown at this point anyway
+		 * If the PHY ID is still unknown, we may have an 82577
+		 * without link.  We will try again after setting Slow MDIC
+		 * mode. No harm in trying again in this case since the PHY
+		 * ID is unknown at this point anyway.
 		 */
-		ret_val = phy->ops.acquire_phy(hw);
+		ret_val = phy->ops.acquire(hw);
 		if (ret_val)
 			goto out;
 		ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
 		if (ret_val)
 			goto out;
-		phy->ops.release_phy(hw);
+		phy->ops.release(hw);
 
 		retry_count++;
 	}
 out:
 	/* Revert to MDIO fast mode, if applicable */
 	if (retry_count) {
-		ret_val = phy->ops.acquire_phy(hw);
+		ret_val = phy->ops.acquire(hw);
 		if (ret_val)
 			return ret_val;
 		ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
-		phy->ops.release_phy(hw);
+		phy->ops.release(hw);
 	}
 
 	return ret_val;
@@ -211,7 +213,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
 	u32 i, mdic = 0;
 
 	if (offset > MAX_PHY_REG_ADDRESS) {
-		hw_dbg(hw, "PHY Address %d is out of range\n", offset);
+		e_dbg("PHY Address %d is out of range\n", offset);
 		return -E1000_ERR_PARAM;
 	}
 
@@ -238,11 +240,11 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
 			break;
 	}
 	if (!(mdic & E1000_MDIC_READY)) {
-		hw_dbg(hw, "MDI Read did not complete\n");
+		e_dbg("MDI Read did not complete\n");
 		return -E1000_ERR_PHY;
 	}
 	if (mdic & E1000_MDIC_ERROR) {
-		hw_dbg(hw, "MDI Error\n");
+		e_dbg("MDI Error\n");
 		return -E1000_ERR_PHY;
 	}
 	*data = (u16) mdic;
@@ -264,7 +266,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
 	u32 i, mdic = 0;
 
 	if (offset > MAX_PHY_REG_ADDRESS) {
-		hw_dbg(hw, "PHY Address %d is out of range\n", offset);
+		e_dbg("PHY Address %d is out of range\n", offset);
 		return -E1000_ERR_PARAM;
 	}
 
@@ -292,11 +294,11 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
 			break;
 	}
 	if (!(mdic & E1000_MDIC_READY)) {
-		hw_dbg(hw, "MDI Write did not complete\n");
+		e_dbg("MDI Write did not complete\n");
 		return -E1000_ERR_PHY;
 	}
 	if (mdic & E1000_MDIC_ERROR) {
-		hw_dbg(hw, "MDI Error\n");
+		e_dbg("MDI Error\n");
 		return -E1000_ERR_PHY;
 	}
 
@@ -317,14 +319,14 @@ s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data)
 {
 	s32 ret_val;
 
-	ret_val = hw->phy.ops.acquire_phy(hw);
+	ret_val = hw->phy.ops.acquire(hw);
 	if (ret_val)
 		return ret_val;
 
 	ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
 					   data);
 
-	hw->phy.ops.release_phy(hw);
+	hw->phy.ops.release(hw);
 
 	return ret_val;
 }
@@ -342,14 +344,14 @@ s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
 {
 	s32 ret_val;
 
-	ret_val = hw->phy.ops.acquire_phy(hw);
+	ret_val = hw->phy.ops.acquire(hw);
 	if (ret_val)
 		return ret_val;
 
 	ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
 					    data);
 
-	hw->phy.ops.release_phy(hw);
+	hw->phy.ops.release(hw);
 
 	return ret_val;
 }
@@ -371,10 +373,10 @@ static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
 	s32 ret_val = 0;
 
 	if (!locked) {
-		if (!(hw->phy.ops.acquire_phy))
+		if (!(hw->phy.ops.acquire))
 			goto out;
 
-		ret_val = hw->phy.ops.acquire_phy(hw);
+		ret_val = hw->phy.ops.acquire(hw);
 		if (ret_val)
 			goto out;
 	}
@@ -392,7 +394,7 @@ static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
 
 release:
 	if (!locked)
-		hw->phy.ops.release_phy(hw);
+		hw->phy.ops.release(hw);
 out:
 	return ret_val;
 }
@@ -442,10 +444,10 @@ static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
 	s32 ret_val = 0;
 
 	if (!locked) {
-		if (!(hw->phy.ops.acquire_phy))
+		if (!(hw->phy.ops.acquire))
 			goto out;
 
-		ret_val = hw->phy.ops.acquire_phy(hw);
+		ret_val = hw->phy.ops.acquire(hw);
 		if (ret_val)
 			goto out;
 	}
@@ -463,7 +465,7 @@ static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
 
 release:
 	if (!locked)
-		hw->phy.ops.release_phy(hw);
+		hw->phy.ops.release(hw);
 
 out:
 	return ret_val;
@@ -515,10 +517,10 @@ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
 	s32 ret_val = 0;
 
 	if (!locked) {
-		if (!(hw->phy.ops.acquire_phy))
+		if (!(hw->phy.ops.acquire))
 			goto out;
 
-		ret_val = hw->phy.ops.acquire_phy(hw);
+		ret_val = hw->phy.ops.acquire(hw);
 		if (ret_val)
 			goto out;
 	}
@@ -533,7 +535,7 @@ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
 	*data = (u16)kmrnctrlsta;
 
 	if (!locked)
-		hw->phy.ops.release_phy(hw);
+		hw->phy.ops.release(hw);
 
 out:
 	return ret_val;
@@ -587,10 +589,10 @@ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
 	s32 ret_val = 0;
 
 	if (!locked) {
-		if (!(hw->phy.ops.acquire_phy))
+		if (!(hw->phy.ops.acquire))
 			goto out;
 
-		ret_val = hw->phy.ops.acquire_phy(hw);
+		ret_val = hw->phy.ops.acquire(hw);
 		if (ret_val)
 			goto out;
 	}
@@ -602,7 +604,7 @@ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
 	udelay(2);
 
 	if (!locked)
-		hw->phy.ops.release_phy(hw);
+		hw->phy.ops.release(hw);
 
 out:
 	return ret_val;
@@ -649,7 +651,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
 	u16 phy_data;
 
 	/* Enable CRS on TX. This must be set for half-duplex operation. */
-	ret_val = phy->ops.read_phy_reg(hw, I82577_CFG_REG, &phy_data);
+	ret_val = phy->ops.read_reg(hw, I82577_CFG_REG, &phy_data);
 	if (ret_val)
 		goto out;
 
@@ -658,7 +660,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
 	/* Enable downshift */
 	phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
 
-	ret_val = phy->ops.write_phy_reg(hw, I82577_CFG_REG, phy_data);
+	ret_val = phy->ops.write_reg(hw, I82577_CFG_REG, phy_data);
 
 out:
 	return ret_val;
@@ -776,12 +778,12 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
 	/* Commit the changes. */
 	ret_val = e1000e_commit_phy(hw);
 	if (ret_val) {
-		hw_dbg(hw, "Error committing the PHY changes\n");
+		e_dbg("Error committing the PHY changes\n");
 		return ret_val;
 	}
 
 	if (phy->type == e1000_phy_82578) {
-		ret_val = phy->ops.read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+		ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
 		                            &phy_data);
 		if (ret_val)
 			return ret_val;
@@ -789,7 +791,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
 		/* 82578 PHY - set the downshift count to 1x. */
 		phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE;
 		phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK;
-		ret_val = phy->ops.write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+		ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
 		                             phy_data);
 		if (ret_val)
 			return ret_val;
@@ -813,7 +815,7 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
 
 	ret_val = e1000_phy_hw_reset(hw);
 	if (ret_val) {
-		hw_dbg(hw, "Error resetting the PHY.\n");
+		e_dbg("Error resetting the PHY.\n");
 		return ret_val;
 	}
 
@@ -824,9 +826,9 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
 	msleep(100);
 
 	/* disable lplu d0 during driver init */
-	ret_val = e1000_set_d0_lplu_state(hw, 0);
+	ret_val = e1000_set_d0_lplu_state(hw, false);
 	if (ret_val) {
-		hw_dbg(hw, "Error Disabling LPLU D0\n");
+		e_dbg("Error Disabling LPLU D0\n");
 		return ret_val;
 	}
 	/* Configure mdi-mdix settings */
@@ -962,39 +964,39 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
 				 NWAY_AR_10T_HD_CAPS);
 	mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
 
-	hw_dbg(hw, "autoneg_advertised %x\n", phy->autoneg_advertised);
+	e_dbg("autoneg_advertised %x\n", phy->autoneg_advertised);
 
 	/* Do we want to advertise 10 Mb Half Duplex? */
 	if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
-		hw_dbg(hw, "Advertise 10mb Half duplex\n");
+		e_dbg("Advertise 10mb Half duplex\n");
 		mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
 	}
 
 	/* Do we want to advertise 10 Mb Full Duplex? */
 	if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
-		hw_dbg(hw, "Advertise 10mb Full duplex\n");
+		e_dbg("Advertise 10mb Full duplex\n");
 		mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
 	}
 
 	/* Do we want to advertise 100 Mb Half Duplex? */
 	if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
-		hw_dbg(hw, "Advertise 100mb Half duplex\n");
+		e_dbg("Advertise 100mb Half duplex\n");
 		mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
 	}
 
 	/* Do we want to advertise 100 Mb Full Duplex? */
 	if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
-		hw_dbg(hw, "Advertise 100mb Full duplex\n");
+		e_dbg("Advertise 100mb Full duplex\n");
 		mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
 	}
 
 	/* We do not allow the Phy to advertise 1000 Mb Half Duplex */
 	if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
-		hw_dbg(hw, "Advertise 1000mb Half duplex request denied!\n");
+		e_dbg("Advertise 1000mb Half duplex request denied!\n");
 
 	/* Do we want to advertise 1000 Mb Full Duplex? */
 	if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
-		hw_dbg(hw, "Advertise 1000mb Full duplex\n");
+		e_dbg("Advertise 1000mb Full duplex\n");
 		mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
 	}
 
@@ -1053,7 +1055,7 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
 		mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
 		break;
 	default:
-		hw_dbg(hw, "Flow control param set incorrectly\n");
+		e_dbg("Flow control param set incorrectly\n");
 		ret_val = -E1000_ERR_CONFIG;
 		return ret_val;
 	}
@@ -1062,7 +1064,7 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
 	if (ret_val)
 		return ret_val;
 
-	hw_dbg(hw, "Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+	e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
 
 	if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
 		ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
@@ -1099,13 +1101,13 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
 	if (phy->autoneg_advertised == 0)
 		phy->autoneg_advertised = phy->autoneg_mask;
 
-	hw_dbg(hw, "Reconfiguring auto-neg advertisement params\n");
+	e_dbg("Reconfiguring auto-neg advertisement params\n");
 	ret_val = e1000_phy_setup_autoneg(hw);
 	if (ret_val) {
-		hw_dbg(hw, "Error Setting up Auto-Negotiation\n");
+		e_dbg("Error Setting up Auto-Negotiation\n");
 		return ret_val;
 	}
-	hw_dbg(hw, "Restarting Auto-Neg\n");
+	e_dbg("Restarting Auto-Neg\n");
 
 	/*
 	 * Restart auto-negotiation by setting the Auto Neg Enable bit and
@@ -1127,7 +1129,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
 	if (phy->autoneg_wait_to_complete) {
 		ret_val = e1000_wait_autoneg(hw);
 		if (ret_val) {
-			hw_dbg(hw, "Error while waiting for "
+			e_dbg("Error while waiting for "
 				 "autoneg to complete\n");
 			return ret_val;
 		}
@@ -1165,10 +1167,10 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw)
 		 * PHY will be set to 10H, 10F, 100H or 100F
 		 * depending on user settings.
 		 */
-		hw_dbg(hw, "Forcing Speed and Duplex\n");
+		e_dbg("Forcing Speed and Duplex\n");
 		ret_val = e1000_phy_force_speed_duplex(hw);
 		if (ret_val) {
-			hw_dbg(hw, "Error Forcing Speed and Duplex\n");
+			e_dbg("Error Forcing Speed and Duplex\n");
 			return ret_val;
 		}
 	}
@@ -1185,11 +1187,11 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw)
 		return ret_val;
 
 	if (link) {
-		hw_dbg(hw, "Valid link established!!!\n");
+		e_dbg("Valid link established!!!\n");
 		e1000e_config_collision_dist(hw);
 		ret_val = e1000e_config_fc_after_link_up(hw);
 	} else {
-		hw_dbg(hw, "Unable to establish link!!!\n");
+		e_dbg("Unable to establish link!!!\n");
 	}
 
 	return ret_val;
@@ -1235,12 +1237,12 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
 	if (ret_val)
 		return ret_val;
 
-	hw_dbg(hw, "IGP PSCR: %X\n", phy_data);
+	e_dbg("IGP PSCR: %X\n", phy_data);
 
 	udelay(1);
 
 	if (phy->autoneg_wait_to_complete) {
-		hw_dbg(hw, "Waiting for forced speed/duplex link on IGP phy.\n");
+		e_dbg("Waiting for forced speed/duplex link on IGP phy.\n");
 
 		ret_val = e1000e_phy_has_link_generic(hw,
 						     PHY_FORCE_LIMIT,
@@ -1250,7 +1252,7 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
 			return ret_val;
 
 		if (!link)
-			hw_dbg(hw, "Link taking longer than expected.\n");
+			e_dbg("Link taking longer than expected.\n");
 
 		/* Try once more */
 		ret_val = e1000e_phy_has_link_generic(hw,
@@ -1294,7 +1296,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
 	if (ret_val)
 		return ret_val;
 
-	hw_dbg(hw, "M88E1000 PSCR: %X\n", phy_data);
+	e_dbg("M88E1000 PSCR: %X\n", phy_data);
 
 	ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
 	if (ret_val)
@@ -1312,7 +1314,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
 		return ret_val;
 
 	if (phy->autoneg_wait_to_complete) {
-		hw_dbg(hw, "Waiting for forced speed/duplex link on M88 phy.\n");
+		e_dbg("Waiting for forced speed/duplex link on M88 phy.\n");
 
 		ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
 						     100000, &link);
@@ -1320,17 +1322,22 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
 			return ret_val;
 
 		if (!link) {
-			/*
-			 * We didn't get link.
-			 * Reset the DSP and cross our fingers.
-			 */
-			ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT,
-					   0x001d);
-			if (ret_val)
-				return ret_val;
-			ret_val = e1000e_phy_reset_dsp(hw);
-			if (ret_val)
-				return ret_val;
+			if (hw->phy.type != e1000_phy_m88) {
+				e_dbg("Link taking longer than expected.\n");
+			} else {
+				/*
+				 * We didn't get link.
+				 * Reset the DSP and cross our fingers.
+				 */
+				ret_val = e1e_wphy(hw,
+						M88E1000_PHY_PAGE_SELECT,
+						0x001d);
+				if (ret_val)
+					return ret_val;
+				ret_val = e1000e_phy_reset_dsp(hw);
+				if (ret_val)
+					return ret_val;
+			}
 		}
 
 		/* Try once more */
@@ -1340,6 +1347,9 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
 			return ret_val;
 	}
 
+	if (hw->phy.type != e1000_phy_m88)
+		return 0;
+
 	ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
 	if (ret_val)
 		return ret_val;
@@ -1369,6 +1379,73 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
 }
 
 /**
+ *  e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex
+ *  @hw: pointer to the HW structure
+ *
+ *  Forces the speed and duplex settings of the PHY.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+	bool link;
+
+	ret_val = e1e_rphy(hw, PHY_CONTROL, &data);
+	if (ret_val)
+		goto out;
+
+	e1000e_phy_force_speed_duplex_setup(hw, &data);
+
+	ret_val = e1e_wphy(hw, PHY_CONTROL, data);
+	if (ret_val)
+		goto out;
+
+	/* Disable MDI-X support for 10/100 */
+	ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data);
+	if (ret_val)
+		goto out;
+
+	data &= ~IFE_PMC_AUTO_MDIX;
+	data &= ~IFE_PMC_FORCE_MDIX;
+
+	ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data);
+	if (ret_val)
+		goto out;
+
+	e_dbg("IFE PMC: %X\n", data);
+
+	udelay(1);
+
+	if (phy->autoneg_wait_to_complete) {
+		e_dbg("Waiting for forced speed/duplex link on IFE phy.\n");
+
+		ret_val = e1000e_phy_has_link_generic(hw,
+		                                     PHY_FORCE_LIMIT,
+		                                     100000,
+		                                     &link);
+		if (ret_val)
+			goto out;
+
+		if (!link)
+			e_dbg("Link taking longer than expected.\n");
+
+		/* Try once more */
+		ret_val = e1000e_phy_has_link_generic(hw,
+		                                     PHY_FORCE_LIMIT,
+		                                     100000,
+		                                     &link);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
  *  e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
  *  @hw: pointer to the HW structure
  *  @phy_ctrl: pointer to current value of PHY_CONTROL
@@ -1403,11 +1480,11 @@ void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
 	if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
 		ctrl &= ~E1000_CTRL_FD;
 		*phy_ctrl &= ~MII_CR_FULL_DUPLEX;
-		hw_dbg(hw, "Half Duplex\n");
+		e_dbg("Half Duplex\n");
 	} else {
 		ctrl |= E1000_CTRL_FD;
 		*phy_ctrl |= MII_CR_FULL_DUPLEX;
-		hw_dbg(hw, "Full Duplex\n");
+		e_dbg("Full Duplex\n");
 	}
 
 	/* Forcing 10mb or 100mb? */
@@ -1415,12 +1492,12 @@ void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
 		ctrl |= E1000_CTRL_SPD_100;
 		*phy_ctrl |= MII_CR_SPEED_100;
 		*phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
-		hw_dbg(hw, "Forcing 100mb\n");
+		e_dbg("Forcing 100mb\n");
 	} else {
 		ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
 		*phy_ctrl |= MII_CR_SPEED_10;
 		*phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
-		hw_dbg(hw, "Forcing 10mb\n");
+		e_dbg("Forcing 10mb\n");
 	}
 
 	e1000e_config_collision_dist(hw);
@@ -1523,8 +1600,8 @@ s32 e1000e_check_downshift(struct e1000_hw *hw)
 	switch (phy->type) {
 	case e1000_phy_m88:
 	case e1000_phy_gg82563:
+	case e1000_phy_bm:
 	case e1000_phy_82578:
-	case e1000_phy_82577:
 		offset	= M88E1000_PHY_SPEC_STATUS;
 		mask	= M88E1000_PSSR_DOWNSHIFT;
 		break;
@@ -1535,7 +1612,7 @@ s32 e1000e_check_downshift(struct e1000_hw *hw)
 		break;
 	default:
 		/* speed downshift not supported */
-		phy->speed_downgraded = 0;
+		phy->speed_downgraded = false;
 		return 0;
 	}
 
@@ -1555,7 +1632,7 @@ s32 e1000e_check_downshift(struct e1000_hw *hw)
  *
  *  Polarity is determined based on the PHY specific status register.
  **/
-static s32 e1000_check_polarity_m88(struct e1000_hw *hw)
+s32 e1000_check_polarity_m88(struct e1000_hw *hw)
 {
 	struct e1000_phy_info *phy = &hw->phy;
 	s32 ret_val;
@@ -1580,7 +1657,7 @@ static s32 e1000_check_polarity_m88(struct e1000_hw *hw)
  *  Polarity is determined based on the PHY port status register, and the
  *  current speed (since there is no polarity at 100Mbps).
  **/
-static s32 e1000_check_polarity_igp(struct e1000_hw *hw)
+s32 e1000_check_polarity_igp(struct e1000_hw *hw)
 {
 	struct e1000_phy_info *phy = &hw->phy;
 	s32 ret_val;
@@ -1618,6 +1695,39 @@ static s32 e1000_check_polarity_igp(struct e1000_hw *hw)
 }
 
 /**
+ *  e1000_check_polarity_ife - Check cable polarity for IFE PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Polarity is determined on the polarity reversal feature being enabled.
+ **/
+s32 e1000_check_polarity_ife(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, offset, mask;
+
+	/*
+	 * Polarity is determined based on the reversal feature being enabled.
+	 */
+	if (phy->polarity_correction) {
+		offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
+		mask = IFE_PESC_POLARITY_REVERSED;
+	} else {
+		offset = IFE_PHY_SPECIAL_CONTROL;
+		mask = IFE_PSC_FORCE_POLARITY;
+	}
+
+	ret_val = e1e_rphy(hw, offset, &phy_data);
+
+	if (!ret_val)
+		phy->cable_polarity = (phy_data & mask)
+		                       ? e1000_rev_polarity_reversed
+		                       : e1000_rev_polarity_normal;
+
+	return ret_val;
+}
+
+/**
  *  e1000_wait_autoneg - Wait for auto-neg completion
  *  @hw: pointer to the HW structure
  *
@@ -1717,15 +1827,21 @@ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw)
 
 	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
 	if (ret_val)
-		return ret_val;
+		goto out;
 
 	index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
-		M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+	        M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+	if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
 	phy->min_cable_length = e1000_m88_cable_length_table[index];
-	phy->max_cable_length = e1000_m88_cable_length_table[index+1];
+	phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
 
 	phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
 
+out:
 	return ret_val;
 }
 
@@ -1736,7 +1852,7 @@ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw)
  *  The automatic gain control (agc) normalizes the amplitude of the
  *  received signal, adjusting for the attenuation produced by the
  *  cable.  By reading the AGC registers, which represent the
- *  combination of course and fine gain value, the value can be put
+ *  combination of coarse and fine gain value, the value can be put
  *  into a lookup table to obtain the approximate cable length
  *  for each channel.
  **/
@@ -1761,7 +1877,7 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
 
 		/*
 		 * Getting bits 15:9, which represent the combination of
-		 * course and fine gain values.  The result is a number
+		 * coarse and fine gain values.  The result is a number
 		 * that can be put into the lookup table to obtain the
 		 * approximate cable length.
 		 */
@@ -1815,8 +1931,8 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw)
 	u16 phy_data;
 	bool link;
 
-	if (hw->phy.media_type != e1000_media_type_copper) {
-		hw_dbg(hw, "Phy info is only valid for copper media\n");
+	if (phy->media_type != e1000_media_type_copper) {
+		e_dbg("Phy info is only valid for copper media\n");
 		return -E1000_ERR_CONFIG;
 	}
 
@@ -1825,7 +1941,7 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw)
 		return ret_val;
 
 	if (!link) {
-		hw_dbg(hw, "Phy info is only valid if link is up\n");
+		e_dbg("Phy info is only valid if link is up\n");
 		return -E1000_ERR_CONFIG;
 	}
 
@@ -1893,11 +2009,11 @@ s32 e1000e_get_phy_info_igp(struct e1000_hw *hw)
 		return ret_val;
 
 	if (!link) {
-		hw_dbg(hw, "Phy info is only valid if link is up\n");
+		e_dbg("Phy info is only valid if link is up\n");
 		return -E1000_ERR_CONFIG;
 	}
 
-	phy->polarity_correction = 1;
+	phy->polarity_correction = true;
 
 	ret_val = e1000_check_polarity_igp(hw);
 	if (ret_val)
@@ -1936,6 +2052,61 @@ s32 e1000e_get_phy_info_igp(struct e1000_hw *hw)
 }
 
 /**
+ *  e1000_get_phy_info_ife - Retrieves various IFE PHY states
+ *  @hw: pointer to the HW structure
+ *
+ *  Populates "phy" structure with various feature states.
+ **/
+s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+	bool link;
+
+	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link) {
+		e_dbg("Phy info is only valid if link is up\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data);
+	if (ret_val)
+		goto out;
+	phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE)
+	                           ? false : true;
+
+	if (phy->polarity_correction) {
+		ret_val = e1000_check_polarity_ife(hw);
+		if (ret_val)
+			goto out;
+	} else {
+		/* Polarity is forced */
+		phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY)
+		                      ? e1000_rev_polarity_reversed
+		                      : e1000_rev_polarity_normal;
+	}
+
+	ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data);
+	if (ret_val)
+		goto out;
+
+	phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? true : false;
+
+	/* The following parameters are undefined for 10/100 operation. */
+	phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+	phy->local_rx = e1000_1000t_rx_status_undefined;
+	phy->remote_rx = e1000_1000t_rx_status_undefined;
+
+out:
+	return ret_val;
+}
+
+/**
  *  e1000e_phy_sw_reset - PHY software reset
  *  @hw: pointer to the HW structure
  *
@@ -1980,7 +2151,7 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
 	if (ret_val)
 		return 0;
 
-	ret_val = phy->ops.acquire_phy(hw);
+	ret_val = phy->ops.acquire(hw);
 	if (ret_val)
 		return ret_val;
 
@@ -1995,7 +2166,7 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
 
 	udelay(150);
 
-	phy->ops.release_phy(hw);
+	phy->ops.release(hw);
 
 	return e1000_get_phy_cfg_done(hw);
 }
@@ -2021,7 +2192,7 @@ s32 e1000e_get_cfg_done(struct e1000_hw *hw)
  **/
 s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw)
 {
-	hw_dbg(hw, "Running IGP 3 PHY init script\n");
+	e_dbg("Running IGP 3 PHY init script\n");
 
 	/* PHY init IGP 3 */
 	/* Enable rise/fall, 10-mode work in class-A */
@@ -2189,28 +2360,34 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id)
 s32 e1000e_determine_phy_address(struct e1000_hw *hw)
 {
 	s32 ret_val = -E1000_ERR_PHY_TYPE;
-	u32 phy_addr= 0;
-	u32 i = 0;
+	u32 phy_addr = 0;
+	u32 i;
 	enum e1000_phy_type phy_type = e1000_phy_unknown;
 
-	do {
-		for (phy_addr = 0; phy_addr < 4; phy_addr++) {
-			hw->phy.addr = phy_addr;
+	hw->phy.id = phy_type;
+
+	for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) {
+		hw->phy.addr = phy_addr;
+		i = 0;
+
+		do {
 			e1000e_get_phy_id(hw);
 			phy_type = e1000e_get_phy_type_from_id(hw->phy.id);
 
-			/* 
+			/*
 			 * If phy_type is valid, break - we found our
 			 * PHY address
 			 */
 			if (phy_type  != e1000_phy_unknown) {
 				ret_val = 0;
-				break;
+				goto out;
 			}
-		}
-		i++;
-	} while ((ret_val != 0) && (i < 100));
+			msleep(1);
+			i++;
+		} while (i < 10);
+	}
 
+out:
 	return ret_val;
 }
 
@@ -2246,7 +2423,7 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
 	u32 page = offset >> IGP_PAGE_SHIFT;
 	u32 page_shift = 0;
 
-	ret_val = hw->phy.ops.acquire_phy(hw);
+	ret_val = hw->phy.ops.acquire(hw);
 	if (ret_val)
 		return ret_val;
 
@@ -2284,7 +2461,7 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
 	                                    data);
 
 out:
-	hw->phy.ops.release_phy(hw);
+	hw->phy.ops.release(hw);
 	return ret_val;
 }
 
@@ -2305,7 +2482,7 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
 	u32 page = offset >> IGP_PAGE_SHIFT;
 	u32 page_shift = 0;
 
-	ret_val = hw->phy.ops.acquire_phy(hw);
+	ret_val = hw->phy.ops.acquire(hw);
 	if (ret_val)
 		return ret_val;
 
@@ -2342,7 +2519,7 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
 	ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
 	                                   data);
 out:
-	hw->phy.ops.release_phy(hw);
+	hw->phy.ops.release(hw);
 	return ret_val;
 }
 
@@ -2361,7 +2538,7 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
 	s32 ret_val;
 	u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
 
-	ret_val = hw->phy.ops.acquire_phy(hw);
+	ret_val = hw->phy.ops.acquire(hw);
 	if (ret_val)
 		return ret_val;
 
@@ -2387,7 +2564,7 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
 	ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
 					   data);
 out:
-	hw->phy.ops.release_phy(hw);
+	hw->phy.ops.release(hw);
 	return ret_val;
 }
 
@@ -2405,7 +2582,7 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
 	s32 ret_val;
 	u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
 
-	ret_val = hw->phy.ops.acquire_phy(hw);
+	ret_val = hw->phy.ops.acquire(hw);
 	if (ret_val)
 		return ret_val;
 
@@ -2431,7 +2608,7 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
 					    data);
 
 out:
-	hw->phy.ops.release_phy(hw);
+	hw->phy.ops.release(hw);
 	return ret_val;
 }
 
@@ -2464,7 +2641,7 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
 	/* Gig must be disabled for MDIO accesses to page 800 */
 	if ((hw->mac.type == e1000_pchlan) &&
 	   (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)))
-		hw_dbg(hw, "Attempting to access page 800 while gig enabled\n");
+		e_dbg("Attempting to access page 800 while gig enabled.\n");
 
 	/* All operations in this function are phy address 1 */
 	hw->phy.addr = 1;
@@ -2474,20 +2651,26 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
 	                          (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
 
 	ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg);
-	if (ret_val)
+	if (ret_val) {
+		e_dbg("Could not read PHY page 769\n");
 		goto out;
+	}
 
 	/* First clear bit 4 to avoid a power state change */
 	phy_reg &= ~(BM_WUC_HOST_WU_BIT);
 	ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
-	if (ret_val)
+	if (ret_val) {
+		e_dbg("Could not clear PHY page 769 bit 4\n");
 		goto out;
+	}
 
 	/* Write bit 2 = 1, and clear bit 4 to 769_17 */
 	ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG,
 	                                    phy_reg | BM_WUC_ENABLE_BIT);
-	if (ret_val)
+	if (ret_val) {
+		e_dbg("Could not write PHY page 769 bit 2\n");
 		goto out;
+	}
 
 	/* Select page 800 */
 	ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
@@ -2495,21 +2678,25 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
 
 	/* Write the page 800 offset value using opcode 0x11 */
 	ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg);
-	if (ret_val)
+	if (ret_val) {
+		e_dbg("Could not write address opcode to page 800\n");
 		goto out;
+	}
 
 	if (read) {
 	        /* Read the page 800 value using opcode 0x12 */
 		ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
 		                                   data);
 	} else {
-	        /* Read the page 800 value using opcode 0x12 */
+	        /* Write the page 800 value using opcode 0x12 */
 		ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
 						    *data);
 	}
 
-	if (ret_val)
+	if (ret_val) {
+		e_dbg("Could not access data value from page 800\n");
 		goto out;
+	}
 
 	/*
 	 * Restore 769_17.2 to its original value
@@ -2520,12 +2707,53 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
 
 	/* Clear 769_17.2 */
 	ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
+	if (ret_val) {
+		e_dbg("Could not clear PHY page 769 bit 2\n");
+		goto out;
+	}
 
 out:
 	return ret_val;
 }
 
 /**
+ * e1000_power_up_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
+ **/
+void e1000_power_up_phy_copper(struct e1000_hw *hw)
+{
+	u16 mii_reg = 0;
+
+	/* The PHY will retain its settings across a power down/up cycle */
+	e1e_rphy(hw, PHY_CONTROL, &mii_reg);
+	mii_reg &= ~MII_CR_POWER_DOWN;
+	e1e_wphy(hw, PHY_CONTROL, mii_reg);
+}
+
+/**
+ * e1000_power_down_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
+ **/
+void e1000_power_down_phy_copper(struct e1000_hw *hw)
+{
+	u16 mii_reg = 0;
+
+	/* The PHY will retain its settings across a power down/up cycle */
+	e1e_rphy(hw, PHY_CONTROL, &mii_reg);
+	mii_reg |= MII_CR_POWER_DOWN;
+	e1e_wphy(hw, PHY_CONTROL, mii_reg);
+	msleep(1);
+}
+
+/**
  *  e1000e_commit_phy - Soft PHY reset
  *  @hw: pointer to the HW structure
  *
@@ -2534,8 +2762,8 @@ out:
  **/
 s32 e1000e_commit_phy(struct e1000_hw *hw)
 {
-	if (hw->phy.ops.commit_phy)
-		return hw->phy.ops.commit_phy(hw);
+	if (hw->phy.ops.commit)
+		return hw->phy.ops.commit(hw);
 
 	return 0;
 }
@@ -2614,7 +2842,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
 	bool in_slow_mode = false;
 
 	if (!locked) {
-		ret_val = hw->phy.ops.acquire_phy(hw);
+		ret_val = hw->phy.ops.acquire(hw);
 		if (ret_val)
 			return ret_val;
 	}
@@ -2670,7 +2898,7 @@ out:
 		ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
 
 	if (!locked)
-		hw->phy.ops.release_phy(hw);
+		hw->phy.ops.release(hw);
 
 	return ret_val;
 }
@@ -2723,7 +2951,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
 	bool in_slow_mode = false;
 
 	if (!locked) {
-		ret_val = hw->phy.ops.acquire_phy(hw);
+		ret_val = hw->phy.ops.acquire(hw);
 		if (ret_val)
 			return ret_val;
 	}
@@ -2796,7 +3024,7 @@ out:
 		ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
 
 	if (!locked)
-		hw->phy.ops.release_phy(hw);
+		hw->phy.ops.release(hw);
 
 	return ret_val;
 }
@@ -2872,7 +3100,7 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
 	/* masking with 0x3F to remove the page from offset */
 	ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F);
 	if (ret_val) {
-		hw_dbg(hw, "Could not write PHY the HV address register\n");
+		e_dbg("Could not write PHY the HV address register\n");
 		goto out;
 	}
 
@@ -2883,7 +3111,7 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
 		ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data);
 
 	if (ret_val) {
-		hw_dbg(hw, "Could not read data value from HV data register\n");
+		e_dbg("Could not read data value from HV data register\n");
 		goto out;
 	}
 
@@ -2911,12 +3139,12 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
 		goto out;
 
 	/* Do not apply workaround if in PHY loopback bit 14 set */
-	hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &data);
+	hw->phy.ops.read_reg(hw, PHY_CONTROL, &data);
 	if (data & PHY_CONTROL_LB)
 		goto out;
 
 	/* check if link is up and at 1Gbps */
-	ret_val = hw->phy.ops.read_phy_reg(hw, BM_CS_STATUS, &data);
+	ret_val = hw->phy.ops.read_reg(hw, BM_CS_STATUS, &data);
 	if (ret_val)
 		goto out;
 
@@ -2932,13 +3160,13 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
 	mdelay(200);
 
 	/* flush the packets in the fifo buffer */
-	ret_val = hw->phy.ops.write_phy_reg(hw, HV_MUX_DATA_CTRL,
+	ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL,
 	                                HV_MUX_DATA_CTRL_GEN_TO_MAC |
 	                                HV_MUX_DATA_CTRL_FORCE_SPEED);
 	if (ret_val)
 		goto out;
 
-	ret_val = hw->phy.ops.write_phy_reg(hw, HV_MUX_DATA_CTRL,
+	ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL,
 	                                HV_MUX_DATA_CTRL_GEN_TO_MAC);
 
 out:
@@ -2959,7 +3187,7 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw)
 	s32 ret_val;
 	u16 data;
 
-	ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_STATUS_2, &data);
+	ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
 
 	if (!ret_val)
 		phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
@@ -2984,13 +3212,13 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
 	u16 phy_data;
 	bool link;
 
-	ret_val = phy->ops.read_phy_reg(hw, PHY_CONTROL, &phy_data);
+	ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
 	if (ret_val)
 		goto out;
 
 	e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
 
-	ret_val = phy->ops.write_phy_reg(hw, PHY_CONTROL, phy_data);
+	ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
 	if (ret_val)
 		goto out;
 
@@ -2998,23 +3226,23 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
 	 * Clear Auto-Crossover to force MDI manually.  82577 requires MDI
 	 * forced whenever speed and duplex are forced.
 	 */
-	ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_CTRL_2, &phy_data);
+	ret_val = phy->ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data);
 	if (ret_val)
 		goto out;
 
 	phy_data &= ~I82577_PHY_CTRL2_AUTO_MDIX;
 	phy_data &= ~I82577_PHY_CTRL2_FORCE_MDI_MDIX;
 
-	ret_val = phy->ops.write_phy_reg(hw, I82577_PHY_CTRL_2, phy_data);
+	ret_val = phy->ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data);
 	if (ret_val)
 		goto out;
 
-	hw_dbg(hw, "I82577_PHY_CTRL_2: %X\n", phy_data);
+	e_dbg("I82577_PHY_CTRL_2: %X\n", phy_data);
 
 	udelay(1);
 
 	if (phy->autoneg_wait_to_complete) {
-		hw_dbg(hw, "Waiting for forced speed/duplex link on 82577 phy\n");
+		e_dbg("Waiting for forced speed/duplex link on 82577 phy\n");
 
 		ret_val = e1000e_phy_has_link_generic(hw,
 		                                     PHY_FORCE_LIMIT,
@@ -3024,7 +3252,7 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
 			goto out;
 
 		if (!link)
-			hw_dbg(hw, "Link taking longer than expected.\n");
+			e_dbg("Link taking longer than expected.\n");
 
 		/* Try once more */
 		ret_val = e1000e_phy_has_link_generic(hw,
@@ -3060,7 +3288,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
 		goto out;
 
 	if (!link) {
-		hw_dbg(hw, "Phy info is only valid if link is up\n");
+		e_dbg("Phy info is only valid if link is up\n");
 		ret_val = -E1000_ERR_CONFIG;
 		goto out;
 	}
@@ -3071,7 +3299,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
 	if (ret_val)
 		goto out;
 
-	ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_STATUS_2, &data);
+	ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
 	if (ret_val)
 		goto out;
 
@@ -3083,7 +3311,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
 		if (ret_val)
 			goto out;
 
-		ret_val = phy->ops.read_phy_reg(hw, PHY_1000T_STATUS, &data);
+		ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
 		if (ret_val)
 			goto out;
 
@@ -3117,7 +3345,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
 	s32 ret_val;
 	u16 phy_data, length;
 
-	ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data);
+	ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data);
 	if (ret_val)
 		goto out;
 
@@ -3125,7 +3353,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
 	         I82577_DSTATUS_CABLE_LENGTH_SHIFT;
 
 	if (length == E1000_CABLE_LENGTH_UNDEFINED)
-		ret_val = E1000_ERR_PHY;
+		ret_val = -E1000_ERR_PHY;
 
 	phy->cable_length = length;
 
diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c
index d2f6ee1a6290..ca93c9a9d372 100644
--- a/drivers/net/e2100.c
+++ b/drivers/net/e2100.c
@@ -186,9 +186,9 @@ static int __init e21_probe1(struct net_device *dev, int ioaddr)
 		return -EBUSY;
 
 	/* First check the station address for the Ctron prefix. */
-	if (inb(ioaddr + E21_SAPROM + 0) != 0x00
-		|| inb(ioaddr + E21_SAPROM + 1) != 0x00
-		|| inb(ioaddr + E21_SAPROM + 2) != 0x1d) {
+	if (inb(ioaddr + E21_SAPROM + 0) != 0x00 ||
+	    inb(ioaddr + E21_SAPROM + 1) != 0x00 ||
+	    inb(ioaddr + E21_SAPROM + 2) != 0x1d) {
 		retval = -ENODEV;
 		goto out;
 	}
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 1e934160062c..94c59498cdb6 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -990,7 +990,7 @@ static int eepro_open(struct net_device *dev)
 		return -EAGAIN;
 	}
 
-	if (request_irq(dev->irq , &eepro_interrupt, 0, dev->name, dev)) {
+	if (request_irq(dev->irq , eepro_interrupt, 0, dev->name, dev)) {
 		printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq);
 		return -EAGAIN;
 	}
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 592de8f1668a..6fbfc8eee632 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -457,7 +457,7 @@ static int eexp_open(struct net_device *dev)
 	if (!dev->irq || !irqrmap[dev->irq])
 		return -ENXIO;
 
-	ret = request_irq(dev->irq, &eexp_irq, 0, dev->name, dev);
+	ret = request_irq(dev->irq, eexp_irq, 0, dev->name, dev);
 	if (ret)
 		return ret;
 
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 41bd7aeafd82..7b62336e6736 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -189,8 +189,8 @@ static void ehea_update_firmware_handles(void)
 		for (k = 0; k < EHEA_MAX_PORTS; k++) {
 			struct ehea_port *port = adapter->port[k];
 
-			if (!port || (port->state != EHEA_PORT_UP)
-				|| (num_ports == 0))
+			if (!port || (port->state != EHEA_PORT_UP) ||
+			    (num_ports == 0))
 				continue;
 
 			for (l = 0;
@@ -447,7 +447,9 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
 	max_index_mask = q_skba->len - 1;
 	for (i = 0; i < fill_wqes; i++) {
 		u64 tmp_addr;
-		struct sk_buff *skb = netdev_alloc_skb(dev, packet_size);
+		struct sk_buff *skb;
+
+		skb = netdev_alloc_skb_ip_align(dev, packet_size);
 		if (!skb) {
 			q_skba->os_skbs = fill_wqes - i;
 			if (q_skba->os_skbs == q_skba->len - 2) {
@@ -457,7 +459,6 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
 			}
 			break;
 		}
-		skb_reserve(skb, NET_IP_ALIGN);
 
 		skb_arr[index] = skb;
 		tmp_addr = ehea_map_vaddr(skb->data);
@@ -500,7 +501,7 @@ static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
 {
 	return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
 				  nr_of_wqes, EHEA_RWQE2_TYPE,
-				  EHEA_RQ2_PKT_SIZE + NET_IP_ALIGN);
+				  EHEA_RQ2_PKT_SIZE);
 }
 
 
@@ -508,7 +509,7 @@ static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
 {
 	return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
 				  nr_of_wqes, EHEA_RWQE3_TYPE,
-				  EHEA_MAX_PACKET_SIZE + NET_IP_ALIGN);
+				  EHEA_MAX_PACKET_SIZE);
 }
 
 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
@@ -656,8 +657,8 @@ static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
 static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
 			  struct sk_buff *skb)
 {
-	int vlan_extracted = (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
-		&& pr->port->vgrp;
+	int vlan_extracted = ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) &&
+			      pr->port->vgrp);
 
 	if (use_lro) {
 		if (vlan_extracted)
@@ -1388,8 +1389,8 @@ out:
 
 int ehea_rem_smrs(struct ehea_port_res *pr)
 {
-	if ((ehea_rem_mr(&pr->send_mr))
-	    || (ehea_rem_mr(&pr->recv_mr)))
+	if ((ehea_rem_mr(&pr->send_mr)) ||
+	    (ehea_rem_mr(&pr->recv_mr)))
 		return -EIO;
 	else
 		return 0;
@@ -2030,8 +2031,8 @@ static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
 		write_ip_start_end(swqe, skb);
 
 		if (iph->protocol == IPPROTO_UDP) {
-			if ((iph->frag_off & IP_MF)
-			    || (iph->frag_off & IP_OFFSET))
+			if ((iph->frag_off & IP_MF) ||
+			    (iph->frag_off & IP_OFFSET))
 				/* IP fragment, so don't change cs */
 				swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
 			else
@@ -2076,8 +2077,8 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
 			write_tcp_offset_end(swqe, skb);
 
 		} else if (iph->protocol == IPPROTO_UDP) {
-			if ((iph->frag_off & IP_MF)
-			    || (iph->frag_off & IP_OFFSET))
+			if ((iph->frag_off & IP_MF) ||
+			    (iph->frag_off & IP_OFFSET))
 				/* IP fragment, so don't change cs */
 				swqe->tx_control |= EHEA_SWQE_CRC
 						 | EHEA_SWQE_IMM_DATA_PRESENT;
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index bc7c5b7abb88..18d405f78c0f 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -837,8 +837,8 @@ static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
 		hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0,
 						0, pt_abs, EHEA_MAX_RPAGE);
 
-		if ((hret != H_SUCCESS)
-		    && (hret != H_PAGE_REGISTERED)) {
+		if ((hret != H_SUCCESS) &&
+		    (hret != H_PAGE_REGISTERED)) {
 			ehea_h_free_resource(adapter->handle, mr->handle,
 					     FORCE_FREE);
 			ehea_error("register_rpage_mr failed");
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index d69d52ed7726..f875751af15e 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -870,19 +870,6 @@ static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
 	dev_kfree_skb_any(buf->os_buf);
 }
 
-static inline struct sk_buff *enic_rq_alloc_skb(struct net_device *netdev,
-	unsigned int size)
-{
-	struct sk_buff *skb;
-
-	skb = netdev_alloc_skb(netdev, size + NET_IP_ALIGN);
-
-	if (skb)
-		skb_reserve(skb, NET_IP_ALIGN);
-
-	return skb;
-}
-
 static int enic_rq_alloc_buf(struct vnic_rq *rq)
 {
 	struct enic *enic = vnic_dev_priv(rq->vdev);
@@ -892,7 +879,7 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
 	unsigned int os_buf_index = 0;
 	dma_addr_t dma_addr;
 
-	skb = enic_rq_alloc_skb(netdev, len);
+	skb = netdev_alloc_skb_ip_align(netdev, len);
 	if (!skb)
 		return -ENOMEM;
 
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 641a10d2e843..41494f7b2ec8 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -630,8 +630,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int location)
 		barrier();
 		if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) {
 			/* Work around read failure bug. */
-			if (phy_id == 1 && location < 6
-				&& inw(ioaddr + MIIData) == 0xffff) {
+			if (phy_id == 1 && location < 6 &&
+			    inw(ioaddr + MIIData) == 0xffff) {
 				outl(read_cmd, ioaddr + MIICtrl);
 				continue;
 			}
@@ -668,7 +668,7 @@ static int epic_open(struct net_device *dev)
 	outl(0x4001, ioaddr + GENCTL);
 
 	napi_enable(&ep->napi);
-	if ((retval = request_irq(dev->irq, &epic_interrupt, IRQF_SHARED, dev->name, dev))) {
+	if ((retval = request_irq(dev->irq, epic_interrupt, IRQF_SHARED, dev->name, dev))) {
 		napi_disable(&ep->napi);
 		return retval;
 	}
@@ -1205,8 +1205,8 @@ static int epic_rx(struct net_device *dev, int budget)
 			}
 			/* Check if the packet is long enough to accept without copying
 			   to a minimally-sized skbuff. */
-			if (pkt_len < rx_copybreak
-				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+			if (pkt_len < rx_copybreak &&
+			    (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
 				pci_dma_sync_single_for_cpu(ep->pci_dev,
 							    ep->rx_ring[entry].bufaddr,
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index f7d9ac8324cb..bd1db92aec1b 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -406,10 +406,10 @@ static int ethoc_rx(struct net_device *dev, int limit)
 
 		if (ethoc_update_rx_stats(priv, &bd) == 0) {
 			int size = bd.stat >> 16;
-			struct sk_buff *skb = netdev_alloc_skb(dev, size);
+			struct sk_buff *skb;
 
 			size -= 4; /* strip the CRC */
-			skb_reserve(skb, 2); /* align TCP/IP header */
+			skb = netdev_alloc_skb_ip_align(dev, size);
 
 			if (likely(skb)) {
 				void *src = phys_to_virt(bd.addr);
@@ -641,7 +641,7 @@ static int ethoc_mdio_probe(struct net_device *dev)
 		return -ENXIO;
 	}
 
-	phy = phy_connect(dev, dev_name(&phy->dev), &ethoc_mdio_poll, 0,
+	phy = phy_connect(dev, dev_name(&phy->dev), ethoc_mdio_poll, 0,
 			PHY_INTERFACE_MODE_GMII);
 	if (IS_ERR(phy)) {
 		dev_err(&dev->dev, "could not attach to PHY\n");
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index 18d5fbb9673e..dac4e595589e 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -839,7 +839,7 @@ static int netdev_open(struct net_device *dev)
 
 	iowrite32(0x00000001, ioaddr + BCR);	/* Reset */
 
-	if (request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev))
+	if (request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev))
 		return -EAGAIN;
 
 	for (i = 0; i < 3; i++)
@@ -1629,8 +1629,8 @@ static int netdev_rx(struct net_device *dev)
 		if (debug)
 			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n", rx_status);
 
-		if ((!((rx_status & RXFSD) && (rx_status & RXLSD)))
-		    || (rx_status & ErrorSummary)) {
+		if ((!((rx_status & RXFSD) && (rx_status & RXLSD))) ||
+		    (rx_status & ErrorSummary)) {
 			if (rx_status & ErrorSummary) {	/* there was a fatal error */
 				if (debug)
 					printk(KERN_DEBUG
@@ -1655,8 +1655,8 @@ static int netdev_rx(struct net_device *dev)
 					cur = np->cur_rx;
 					while (desno <= np->really_rx_count) {
 						++desno;
-						if ((!(cur->status & RXOWN))
-						    && (cur->status & RXLSD))
+						if ((!(cur->status & RXOWN)) &&
+						    (cur->status & RXLSD))
 							break;
 						/* goto next rx descriptor */
 						cur = cur->next_desc_logical;
@@ -1786,8 +1786,8 @@ static void __set_rx_mode(struct net_device *dev)
 	if (dev->flags & IFF_PROMISC) {	/* Set promiscuous. */
 		memset(mc_filter, 0xff, sizeof(mc_filter));
 		rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM;
-	} else if ((dev->mc_count > multicast_filter_limit)
-		   || (dev->flags & IFF_ALLMULTI)) {
+	} else if ((dev->mc_count > multicast_filter_limit) ||
+		   (dev->flags & IFF_ALLMULTI)) {
 		/* Too many to match, or accept all multicasts. */
 		memset(mc_filter, 0xff, sizeof(mc_filter));
 		rx_mode = CR_W_AB | CR_W_AM;
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index 66dace6d324f..6407672b28e9 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -226,17 +226,17 @@ static int mpc52xx_fec_open(struct net_device *dev)
 		phy_start(priv->phydev);
 	}
 
-	if (request_irq(dev->irq, &mpc52xx_fec_interrupt, IRQF_SHARED,
+	if (request_irq(dev->irq, mpc52xx_fec_interrupt, IRQF_SHARED,
 	                DRIVER_NAME "_ctrl", dev)) {
 		dev_err(&dev->dev, "ctrl interrupt request failed\n");
 		goto free_phy;
 	}
-	if (request_irq(priv->r_irq, &mpc52xx_fec_rx_interrupt, 0,
+	if (request_irq(priv->r_irq, mpc52xx_fec_rx_interrupt, 0,
 	                DRIVER_NAME "_rx", dev)) {
 		dev_err(&dev->dev, "rx interrupt request failed\n");
 		goto free_ctrl_irq;
 	}
-	if (request_irq(priv->t_irq, &mpc52xx_fec_tx_interrupt, 0,
+	if (request_irq(priv->t_irq, mpc52xx_fec_tx_interrupt, 0,
 	                DRIVER_NAME "_tx", dev)) {
 		dev_err(&dev->dev, "tx interrupt request failed\n");
 		goto free_2irqs;
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 3116601dbfea..3c340489804a 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -4004,7 +4004,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
 				/* Request irq for rx handling */
 				sprintf(np->name_rx, "%s-rx", dev->name);
 				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
-						&nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
+						nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
 					printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
 					pci_disable_msix(np->pci_dev);
 					np->msi_flags &= ~NV_MSI_X_ENABLED;
@@ -4013,7 +4013,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
 				/* Request irq for tx handling */
 				sprintf(np->name_tx, "%s-tx", dev->name);
 				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
-						&nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
+						nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
 					printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
 					pci_disable_msix(np->pci_dev);
 					np->msi_flags &= ~NV_MSI_X_ENABLED;
@@ -4022,7 +4022,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
 				/* Request irq for link and timer handling */
 				sprintf(np->name_other, "%s-other", dev->name);
 				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
-						&nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
+						nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
 					printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
 					pci_disable_msix(np->pci_dev);
 					np->msi_flags &= ~NV_MSI_X_ENABLED;
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index efbf67689eca..25fabb3eedc5 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -3,8 +3,9 @@
  * Provides Bus interface for MIIM regs
  *
  * Author: Andy Fleming <afleming@freescale.com>
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
  *
- * Copyright (c) 2002-2004,2008 Freescale Semiconductor, Inc.
+ * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc.
  *
  * Based on gianfar_mii.c and ucc_geth_mii.c (Li Yang, Kim Phillips)
  *
@@ -102,13 +103,18 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
 	return value;
 }
 
+static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus)
+{
+	return (void __iomem __force *)bus->priv;
+}
+
 /*
  * Write value to the PHY at mii_id at register regnum,
  * on the bus, waiting until the write is done before returning.
  */
 int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
 {
-	struct fsl_pq_mdio __iomem *regs = (void __iomem *)bus->priv;
+	struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
 
 	/* Write to the local MII regs */
 	return(fsl_pq_local_mdio_write(regs, mii_id, regnum, value));
@@ -120,7 +126,7 @@ int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
  */
 int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 {
-	struct fsl_pq_mdio __iomem *regs = (void __iomem *)bus->priv;
+	struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
 
 	/* Read the local MII regs */
 	return(fsl_pq_local_mdio_read(regs, mii_id, regnum));
@@ -129,7 +135,7 @@ int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 /* Reset the MIIM registers, and wait for the bus to free */
 static int fsl_pq_mdio_reset(struct mii_bus *bus)
 {
-	struct fsl_pq_mdio __iomem *regs = (void __iomem *)bus->priv;
+	struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
 	int timeout = PHY_INIT_TIMEOUT;
 
 	mutex_lock(&bus->mdio_lock);
@@ -189,19 +195,29 @@ static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
 
 
 #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
-static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs)
+static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
 {
 	struct gfar __iomem *enet_regs;
+	u32 __iomem *ioremap_tbipa;
+	u64 addr, size;
 
 	/*
 	 * This is mildly evil, but so is our hardware for doing this.
 	 * Also, we have to cast back to struct gfar because of
 	 * definition weirdness done in gianfar.h.
 	 */
-	enet_regs = (struct gfar __iomem *)
-		((char __iomem *)regs - offsetof(struct gfar, gfar_mii_regs));
-
-	return &enet_regs->tbipa;
+	if(of_device_is_compatible(np, "fsl,gianfar-mdio") ||
+		of_device_is_compatible(np, "fsl,gianfar-tbi") ||
+		of_device_is_compatible(np, "gianfar")) {
+		enet_regs = (struct gfar __iomem *)regs;
+		return &enet_regs->tbipa;
+	} else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
+			of_device_is_compatible(np, "fsl,etsec2-tbi")) {
+		addr = of_translate_address(np, of_get_address(np, 1, &size, NULL));
+		ioremap_tbipa = ioremap(addr, size);
+		return ioremap_tbipa;
+	} else
+		return NULL;
 }
 #endif
 
@@ -250,11 +266,12 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
 {
 	struct device_node *np = ofdev->node;
 	struct device_node *tbi;
-	struct fsl_pq_mdio __iomem *regs;
+	struct fsl_pq_mdio __iomem *regs = NULL;
+	void __iomem *map;
 	u32 __iomem *tbipa;
 	struct mii_bus *new_bus;
 	int tbiaddr = -1;
-	u64 addr, size;
+	u64 addr = 0, size = 0;
 	int err = 0;
 
 	new_bus = mdiobus_alloc();
@@ -269,13 +286,19 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
 
 	/* Set the PHY base address */
 	addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
-	regs = ioremap(addr, size);
-
-	if (NULL == regs) {
+	map = ioremap(addr, size);
+	if (!map) {
 		err = -ENOMEM;
 		goto err_free_bus;
 	}
 
+	if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
+			of_device_is_compatible(np, "fsl,gianfar-tbi") ||
+			of_device_is_compatible(np, "fsl,ucc-mdio") ||
+			of_device_is_compatible(np, "ucc_geth_phy"))
+		map -= offsetof(struct fsl_pq_mdio, miimcfg);
+	regs = map;
+
 	new_bus->priv = (void __force *)regs;
 
 	new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
@@ -290,9 +313,15 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
 
 	if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
 			of_device_is_compatible(np, "fsl,gianfar-tbi") ||
+			of_device_is_compatible(np, "fsl,etsec2-mdio") ||
+			of_device_is_compatible(np, "fsl,etsec2-tbi") ||
 			of_device_is_compatible(np, "gianfar")) {
 #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
-		tbipa = get_gfar_tbipa(regs);
+		tbipa = get_gfar_tbipa(regs, np);
+		if (!tbipa) {
+			err = -EINVAL;
+			goto err_free_irqs;
+		}
 #else
 		err = -ENODEV;
 		goto err_free_irqs;
@@ -380,7 +409,7 @@ static int fsl_pq_mdio_remove(struct of_device *ofdev)
 
 	dev_set_drvdata(device, NULL);
 
-	iounmap((void __iomem *)bus->priv);
+	iounmap(fsl_pq_mdio_get_regs(bus));
 	bus->priv = NULL;
 	mdiobus_free(bus);
 
@@ -405,6 +434,12 @@ static struct of_device_id fsl_pq_mdio_match[] = {
 	{
 		.compatible = "fsl,gianfar-mdio",
 	},
+	{
+		.compatible = "fsl,etsec2-tbi",
+	},
+	{
+		.compatible = "fsl,etsec2-mdio",
+	},
 	{},
 };
 MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
diff --git a/drivers/net/fsl_pq_mdio.h b/drivers/net/fsl_pq_mdio.h
index 36dad527410b..1f7d865cedb6 100644
--- a/drivers/net/fsl_pq_mdio.h
+++ b/drivers/net/fsl_pq_mdio.h
@@ -3,8 +3,9 @@
  * Driver for the MDIO bus controller on Freescale PowerQUICC processors
  *
  * Author: Andy Fleming
+ * Modifier: Sandeep Gopalpet
  *
- * Copyright (c) 2002-2004,2008 Freescale Semiconductor, Inc.
+ * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc.
  *
  * This program is free software; you can redistribute  it and/or modify it
  * under  the terms of  the GNU General  Public License as published by the
@@ -23,6 +24,12 @@
 #define MII_READ_COMMAND       0x00000001
 
 struct fsl_pq_mdio {
+	u8 res1[16];
+	u32 ieventm;	/* MDIO Interrupt event register (for etsec2)*/
+	u32 imaskm;	/* MDIO Interrupt mask register (for etsec2)*/
+	u8 res2[4];
+	u32 emapm;	/* MDIO Event mapping register (for etsec2)*/
+	u8 res3[1280];
 	u32 miimcfg;		/* MII management configuration reg */
 	u32 miimcom;		/* MII management command reg */
 	u32 miimadd;		/* MII management address reg */
@@ -31,9 +38,9 @@ struct fsl_pq_mdio {
 	u32 miimind;		/* MII management indication reg */
 	u8 reserved[28];	/* Space holder */
 	u32 utbipar;		/* TBI phy address reg (only on UCC) */
+	u8 res4[2728];
 } __attribute__ ((packed));
 
-
 int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
 int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
 int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 5bf31f1509c9..16def131c390 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -8,9 +8,10 @@
  *
  * Author: Andy Fleming
  * Maintainer: Kumar Gala
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
  *
- * Copyright (c) 2002-2006 Freescale Semiconductor, Inc.
- * Copyright (c) 2007 MontaVista Software, Inc.
+ * Copyright 2002-2009 Freescale Semiconductor, Inc.
+ * Copyright 2007 MontaVista Software, Inc.
  *
  * This program is free software; you can redistribute  it and/or modify it
  * under  the terms of  the GNU General  Public License as published by the
@@ -109,7 +110,7 @@ static void gfar_reset_task(struct work_struct *work);
 static void gfar_timeout(struct net_device *dev);
 static int gfar_close(struct net_device *dev);
 struct sk_buff *gfar_new_skb(struct net_device *dev);
-static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
+static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
 		struct sk_buff *skb);
 static int gfar_set_mac_address(struct net_device *dev);
 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
@@ -130,8 +131,8 @@ static int gfar_poll(struct napi_struct *napi, int budget);
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void gfar_netpoll(struct net_device *dev);
 #endif
-int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
-static int gfar_clean_tx_ring(struct net_device *dev);
+int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
+static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
 			      int amount_pull);
 static void gfar_vlan_rx_register(struct net_device *netdev,
@@ -142,11 +143,277 @@ void gfar_start(struct net_device *dev);
 static void gfar_clear_exact_match(struct net_device *dev);
 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb);
 
 MODULE_AUTHOR("Freescale Semiconductor, Inc");
 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
 MODULE_LICENSE("GPL");
 
+static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
+			    dma_addr_t buf)
+{
+	u32 lstatus;
+
+	bdp->bufPtr = buf;
+
+	lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
+	if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
+		lstatus |= BD_LFLAG(RXBD_WRAP);
+
+	eieio();
+
+	bdp->lstatus = lstatus;
+}
+
+static int gfar_init_bds(struct net_device *ndev)
+{
+	struct gfar_private *priv = netdev_priv(ndev);
+	struct gfar_priv_tx_q *tx_queue = NULL;
+	struct gfar_priv_rx_q *rx_queue = NULL;
+	struct txbd8 *txbdp;
+	struct rxbd8 *rxbdp;
+	int i, j;
+
+	for (i = 0; i < priv->num_tx_queues; i++) {
+		tx_queue = priv->tx_queue[i];
+		/* Initialize some variables in our dev structure */
+		tx_queue->num_txbdfree = tx_queue->tx_ring_size;
+		tx_queue->dirty_tx = tx_queue->tx_bd_base;
+		tx_queue->cur_tx = tx_queue->tx_bd_base;
+		tx_queue->skb_curtx = 0;
+		tx_queue->skb_dirtytx = 0;
+
+		/* Initialize Transmit Descriptor Ring */
+		txbdp = tx_queue->tx_bd_base;
+		for (j = 0; j < tx_queue->tx_ring_size; j++) {
+			txbdp->lstatus = 0;
+			txbdp->bufPtr = 0;
+			txbdp++;
+		}
+
+		/* Set the last descriptor in the ring to indicate wrap */
+		txbdp--;
+		txbdp->status |= TXBD_WRAP;
+	}
+
+	for (i = 0; i < priv->num_rx_queues; i++) {
+		rx_queue = priv->rx_queue[i];
+		rx_queue->cur_rx = rx_queue->rx_bd_base;
+		rx_queue->skb_currx = 0;
+		rxbdp = rx_queue->rx_bd_base;
+
+		for (j = 0; j < rx_queue->rx_ring_size; j++) {
+			struct sk_buff *skb = rx_queue->rx_skbuff[j];
+
+			if (skb) {
+				gfar_init_rxbdp(rx_queue, rxbdp,
+						rxbdp->bufPtr);
+			} else {
+				skb = gfar_new_skb(ndev);
+				if (!skb) {
+					pr_err("%s: Can't allocate RX buffers\n",
+							ndev->name);
+					goto err_rxalloc_fail;
+				}
+				rx_queue->rx_skbuff[j] = skb;
+
+				gfar_new_rxbdp(rx_queue, rxbdp, skb);
+			}
+
+			rxbdp++;
+		}
+
+	}
+
+	return 0;
+
+err_rxalloc_fail:
+	free_skb_resources(priv);
+	return -ENOMEM;
+}
+
+static int gfar_alloc_skb_resources(struct net_device *ndev)
+{
+	void *vaddr;
+	dma_addr_t addr;
+	int i, j, k;
+	struct gfar_private *priv = netdev_priv(ndev);
+	struct device *dev = &priv->ofdev->dev;
+	struct gfar_priv_tx_q *tx_queue = NULL;
+	struct gfar_priv_rx_q *rx_queue = NULL;
+
+	priv->total_tx_ring_size = 0;
+	for (i = 0; i < priv->num_tx_queues; i++)
+		priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
+
+	priv->total_rx_ring_size = 0;
+	for (i = 0; i < priv->num_rx_queues; i++)
+		priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
+
+	/* Allocate memory for the buffer descriptors */
+	vaddr = dma_alloc_coherent(dev,
+			sizeof(struct txbd8) * priv->total_tx_ring_size +
+			sizeof(struct rxbd8) * priv->total_rx_ring_size,
+			&addr, GFP_KERNEL);
+	if (!vaddr) {
+		if (netif_msg_ifup(priv))
+			pr_err("%s: Could not allocate buffer descriptors!\n",
+			       ndev->name);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < priv->num_tx_queues; i++) {
+		tx_queue = priv->tx_queue[i];
+		tx_queue->tx_bd_base = (struct txbd8 *) vaddr;
+		tx_queue->tx_bd_dma_base = addr;
+		tx_queue->dev = ndev;
+		/* enet DMA only understands physical addresses */
+		addr    += sizeof(struct txbd8) *tx_queue->tx_ring_size;
+		vaddr   += sizeof(struct txbd8) *tx_queue->tx_ring_size;
+	}
+
+	/* Start the rx descriptor ring where the tx ring leaves off */
+	for (i = 0; i < priv->num_rx_queues; i++) {
+		rx_queue = priv->rx_queue[i];
+		rx_queue->rx_bd_base = (struct rxbd8 *) vaddr;
+		rx_queue->rx_bd_dma_base = addr;
+		rx_queue->dev = ndev;
+		addr    += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
+		vaddr   += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
+	}
+
+	/* Setup the skbuff rings */
+	for (i = 0; i < priv->num_tx_queues; i++) {
+		tx_queue = priv->tx_queue[i];
+		tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
+				  tx_queue->tx_ring_size, GFP_KERNEL);
+		if (!tx_queue->tx_skbuff) {
+			if (netif_msg_ifup(priv))
+				pr_err("%s: Could not allocate tx_skbuff\n",
+						ndev->name);
+			goto cleanup;
+		}
+
+		for (k = 0; k < tx_queue->tx_ring_size; k++)
+			tx_queue->tx_skbuff[k] = NULL;
+	}
+
+	for (i = 0; i < priv->num_rx_queues; i++) {
+		rx_queue = priv->rx_queue[i];
+		rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
+				  rx_queue->rx_ring_size, GFP_KERNEL);
+
+		if (!rx_queue->rx_skbuff) {
+			if (netif_msg_ifup(priv))
+				pr_err("%s: Could not allocate rx_skbuff\n",
+				       ndev->name);
+			goto cleanup;
+		}
+
+		for (j = 0; j < rx_queue->rx_ring_size; j++)
+			rx_queue->rx_skbuff[j] = NULL;
+	}
+
+	if (gfar_init_bds(ndev))
+		goto cleanup;
+
+	return 0;
+
+cleanup:
+	free_skb_resources(priv);
+	return -ENOMEM;
+}
+
+static void gfar_init_tx_rx_base(struct gfar_private *priv)
+{
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+	u32 __iomem *baddr;
+	int i;
+
+	baddr = &regs->tbase0;
+	for(i = 0; i < priv->num_tx_queues; i++) {
+		gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
+		baddr	+= 2;
+	}
+
+	baddr = &regs->rbase0;
+	for(i = 0; i < priv->num_rx_queues; i++) {
+		gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
+		baddr   += 2;
+	}
+}
+
+static void gfar_init_mac(struct net_device *ndev)
+{
+	struct gfar_private *priv = netdev_priv(ndev);
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+	u32 rctrl = 0;
+	u32 tctrl = 0;
+	u32 attrs = 0;
+
+	/* write the tx/rx base registers */
+	gfar_init_tx_rx_base(priv);
+
+	/* Configure the coalescing support */
+	gfar_configure_coalescing(priv, 0xFF, 0xFF);
+
+	if (priv->rx_filer_enable)
+		rctrl |= RCTRL_FILREN;
+
+	if (priv->rx_csum_enable)
+		rctrl |= RCTRL_CHECKSUMMING;
+
+	if (priv->extended_hash) {
+		rctrl |= RCTRL_EXTHASH;
+
+		gfar_clear_exact_match(ndev);
+		rctrl |= RCTRL_EMEN;
+	}
+
+	if (priv->padding) {
+		rctrl &= ~RCTRL_PAL_MASK;
+		rctrl |= RCTRL_PADDING(priv->padding);
+	}
+
+	/* keep vlan related bits if it's enabled */
+	if (priv->vlgrp) {
+		rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
+		tctrl |= TCTRL_VLINS;
+	}
+
+	/* Init rctrl based on our settings */
+	gfar_write(&regs->rctrl, rctrl);
+
+	if (ndev->features & NETIF_F_IP_CSUM)
+		tctrl |= TCTRL_INIT_CSUM;
+
+	tctrl |= TCTRL_TXSCHED_PRIO;
+
+	gfar_write(&regs->tctrl, tctrl);
+
+	/* Set the extraction length and index */
+	attrs = ATTRELI_EL(priv->rx_stash_size) |
+		ATTRELI_EI(priv->rx_stash_index);
+
+	gfar_write(&regs->attreli, attrs);
+
+	/* Start with defaults, and add stashing or locking
+	 * depending on the approprate variables */
+	attrs = ATTR_INIT_SETTINGS;
+
+	if (priv->bd_stash_en)
+		attrs |= ATTR_BDSTASH;
+
+	if (priv->rx_stash_size != 0)
+		attrs |= ATTR_BUFSTASH;
+
+	gfar_write(&regs->attr, attrs);
+
+	gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
+	gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
+	gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
+}
+
 static const struct net_device_ops gfar_netdev_ops = {
 	.ndo_open = gfar_enet_open,
 	.ndo_start_xmit = gfar_start_xmit,
@@ -155,6 +422,7 @@ static const struct net_device_ops gfar_netdev_ops = {
 	.ndo_set_multicast_list = gfar_set_multi,
 	.ndo_tx_timeout = gfar_timeout,
 	.ndo_do_ioctl = gfar_ioctl,
+	.ndo_select_queue = gfar_select_queue,
 	.ndo_vlan_rx_register = gfar_vlan_rx_register,
 	.ndo_set_mac_address = eth_mac_addr,
 	.ndo_validate_addr = eth_validate_addr,
@@ -163,56 +431,252 @@ static const struct net_device_ops gfar_netdev_ops = {
 #endif
 };
 
+unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
+unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
+
+void lock_rx_qs(struct gfar_private *priv)
+{
+	int i = 0x0;
+
+	for (i = 0; i < priv->num_rx_queues; i++)
+		spin_lock(&priv->rx_queue[i]->rxlock);
+}
+
+void lock_tx_qs(struct gfar_private *priv)
+{
+	int i = 0x0;
+
+	for (i = 0; i < priv->num_tx_queues; i++)
+		spin_lock(&priv->tx_queue[i]->txlock);
+}
+
+void unlock_rx_qs(struct gfar_private *priv)
+{
+	int i = 0x0;
+
+	for (i = 0; i < priv->num_rx_queues; i++)
+		spin_unlock(&priv->rx_queue[i]->rxlock);
+}
+
+void unlock_tx_qs(struct gfar_private *priv)
+{
+	int i = 0x0;
+
+	for (i = 0; i < priv->num_tx_queues; i++)
+		spin_unlock(&priv->tx_queue[i]->txlock);
+}
+
 /* Returns 1 if incoming frames use an FCB */
 static inline int gfar_uses_fcb(struct gfar_private *priv)
 {
 	return priv->vlgrp || priv->rx_csum_enable;
 }
 
-static int gfar_of_init(struct net_device *dev)
+u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+	return skb_get_queue_mapping(skb);
+}
+static void free_tx_pointers(struct gfar_private *priv)
+{
+	int i = 0;
+
+	for (i = 0; i < priv->num_tx_queues; i++)
+		kfree(priv->tx_queue[i]);
+}
+
+static void free_rx_pointers(struct gfar_private *priv)
+{
+	int i = 0;
+
+	for (i = 0; i < priv->num_rx_queues; i++)
+		kfree(priv->rx_queue[i]);
+}
+
+static void unmap_group_regs(struct gfar_private *priv)
+{
+	int i = 0;
+
+	for (i = 0; i < MAXGROUPS; i++)
+		if (priv->gfargrp[i].regs)
+			iounmap(priv->gfargrp[i].regs);
+}
+
+static void disable_napi(struct gfar_private *priv)
+{
+	int i = 0;
+
+	for (i = 0; i < priv->num_grps; i++)
+		napi_disable(&priv->gfargrp[i].napi);
+}
+
+static void enable_napi(struct gfar_private *priv)
+{
+	int i = 0;
+
+	for (i = 0; i < priv->num_grps; i++)
+		napi_enable(&priv->gfargrp[i].napi);
+}
+
+static int gfar_parse_group(struct device_node *np,
+		struct gfar_private *priv, const char *model)
+{
+	u32 *queue_mask;
+	u64 addr, size;
+
+	addr = of_translate_address(np,
+			of_get_address(np, 0, &size, NULL));
+	priv->gfargrp[priv->num_grps].regs = ioremap(addr, size);
+
+	if (!priv->gfargrp[priv->num_grps].regs)
+		return -ENOMEM;
+
+	priv->gfargrp[priv->num_grps].interruptTransmit =
+			irq_of_parse_and_map(np, 0);
+
+	/* If we aren't the FEC we have multiple interrupts */
+	if (model && strcasecmp(model, "FEC")) {
+		priv->gfargrp[priv->num_grps].interruptReceive =
+			irq_of_parse_and_map(np, 1);
+		priv->gfargrp[priv->num_grps].interruptError =
+			irq_of_parse_and_map(np,2);
+		if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 ||
+			priv->gfargrp[priv->num_grps].interruptReceive < 0 ||
+			priv->gfargrp[priv->num_grps].interruptError < 0) {
+			return -EINVAL;
+		}
+	}
+
+	priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
+	priv->gfargrp[priv->num_grps].priv = priv;
+	spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
+	if(priv->mode == MQ_MG_MODE) {
+		queue_mask = (u32 *)of_get_property(np,
+					"fsl,rx-bit-map", NULL);
+		priv->gfargrp[priv->num_grps].rx_bit_map =
+			queue_mask ?  *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
+		queue_mask = (u32 *)of_get_property(np,
+					"fsl,tx-bit-map", NULL);
+		priv->gfargrp[priv->num_grps].tx_bit_map =
+			queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
+	} else {
+		priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
+		priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
+	}
+	priv->num_grps++;
+
+	return 0;
+}
+
+static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
 {
 	const char *model;
 	const char *ctype;
 	const void *mac_addr;
-	u64 addr, size;
-	int err = 0;
-	struct gfar_private *priv = netdev_priv(dev);
-	struct device_node *np = priv->node;
+	int err = 0, i;
+	struct net_device *dev = NULL;
+	struct gfar_private *priv = NULL;
+	struct device_node *np = ofdev->node;
+	struct device_node *child = NULL;
 	const u32 *stash;
 	const u32 *stash_len;
 	const u32 *stash_idx;
+	unsigned int num_tx_qs, num_rx_qs;
+	u32 *tx_queues, *rx_queues;
 
 	if (!np || !of_device_is_available(np))
 		return -ENODEV;
 
-	/* get a pointer to the register memory */
-	addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
-	priv->regs = ioremap(addr, size);
+	/* parse the num of tx and rx queues */
+	tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
+	num_tx_qs = tx_queues ? *tx_queues : 1;
+
+	if (num_tx_qs > MAX_TX_QS) {
+		printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
+				num_tx_qs, MAX_TX_QS);
+		printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
+		return -EINVAL;
+	}
+
+	rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
+	num_rx_qs = rx_queues ? *rx_queues : 1;
 
-	if (priv->regs == NULL)
+	if (num_rx_qs > MAX_RX_QS) {
+		printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
+				num_tx_qs, MAX_TX_QS);
+		printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
+		return -EINVAL;
+	}
+
+	*pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
+	dev = *pdev;
+	if (NULL == dev)
 		return -ENOMEM;
 
-	priv->interruptTransmit = irq_of_parse_and_map(np, 0);
+	priv = netdev_priv(dev);
+	priv->node = ofdev->node;
+	priv->ndev = dev;
+
+	dev->num_tx_queues = num_tx_qs;
+	dev->real_num_tx_queues = num_tx_qs;
+	priv->num_tx_queues = num_tx_qs;
+	priv->num_rx_queues = num_rx_qs;
+	priv->num_grps = 0x0;
 
 	model = of_get_property(np, "model", NULL);
 
-	/* If we aren't the FEC we have multiple interrupts */
-	if (model && strcasecmp(model, "FEC")) {
-		priv->interruptReceive = irq_of_parse_and_map(np, 1);
+	for (i = 0; i < MAXGROUPS; i++)
+		priv->gfargrp[i].regs = NULL;
+
+	/* Parse and initialize group specific information */
+	if (of_device_is_compatible(np, "fsl,etsec2")) {
+		priv->mode = MQ_MG_MODE;
+		for_each_child_of_node(np, child) {
+			err = gfar_parse_group(child, priv, model);
+			if (err)
+				goto err_grp_init;
+		}
+	} else {
+		priv->mode = SQ_SG_MODE;
+		err = gfar_parse_group(np, priv, model);
+		if(err)
+			goto err_grp_init;
+	}
 
-		priv->interruptError = irq_of_parse_and_map(np, 2);
+	for (i = 0; i < priv->num_tx_queues; i++)
+	       priv->tx_queue[i] = NULL;
+	for (i = 0; i < priv->num_rx_queues; i++)
+		priv->rx_queue[i] = NULL;
+
+	for (i = 0; i < priv->num_tx_queues; i++) {
+		priv->tx_queue[i] =  (struct gfar_priv_tx_q *)kmalloc(
+				sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
+		if (!priv->tx_queue[i]) {
+			err = -ENOMEM;
+			goto tx_alloc_failed;
+		}
+		priv->tx_queue[i]->tx_skbuff = NULL;
+		priv->tx_queue[i]->qindex = i;
+		priv->tx_queue[i]->dev = dev;
+		spin_lock_init(&(priv->tx_queue[i]->txlock));
+	}
 
-		if (priv->interruptTransmit < 0 ||
-				priv->interruptReceive < 0 ||
-				priv->interruptError < 0) {
-			err = -EINVAL;
-			goto err_out;
+	for (i = 0; i < priv->num_rx_queues; i++) {
+		priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc(
+					sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
+		if (!priv->rx_queue[i]) {
+			err = -ENOMEM;
+			goto rx_alloc_failed;
 		}
+		priv->rx_queue[i]->rx_skbuff = NULL;
+		priv->rx_queue[i]->qindex = i;
+		priv->rx_queue[i]->dev = dev;
+		spin_lock_init(&(priv->rx_queue[i]->rxlock));
 	}
 
+
 	stash = of_get_property(np, "bd-stash", NULL);
 
-	if(stash) {
+	if (stash) {
 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
 		priv->bd_stash_en = 1;
 	}
@@ -270,8 +734,13 @@ static int gfar_of_init(struct net_device *dev)
 
 	return 0;
 
-err_out:
-	iounmap(priv->regs);
+rx_alloc_failed:
+	free_rx_pointers(priv);
+tx_alloc_failed:
+	free_tx_pointers(priv);
+err_grp_init:
+	unmap_group_regs(priv);
+	free_netdev(dev);
 	return err;
 }
 
@@ -289,6 +758,85 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 	return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
 }
 
+static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
+{
+	unsigned int new_bit_map = 0x0;
+	int mask = 0x1 << (max_qs - 1), i;
+	for (i = 0; i < max_qs; i++) {
+		if (bit_map & mask)
+			new_bit_map = new_bit_map + (1 << i);
+		mask = mask >> 0x1;
+	}
+	return new_bit_map;
+}
+
+static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
+				   u32 class)
+{
+	u32 rqfpr = FPR_FILER_MASK;
+	u32 rqfcr = 0x0;
+
+	rqfar--;
+	rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
+	ftp_rqfpr[rqfar] = rqfpr;
+	ftp_rqfcr[rqfar] = rqfcr;
+	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+	rqfar--;
+	rqfcr = RQFCR_CMP_NOMATCH;
+	ftp_rqfpr[rqfar] = rqfpr;
+	ftp_rqfcr[rqfar] = rqfcr;
+	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+	rqfar--;
+	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
+	rqfpr = class;
+	ftp_rqfcr[rqfar] = rqfcr;
+	ftp_rqfpr[rqfar] = rqfpr;
+	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+	rqfar--;
+	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
+	rqfpr = class;
+	ftp_rqfcr[rqfar] = rqfcr;
+	ftp_rqfpr[rqfar] = rqfpr;
+	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+	return rqfar;
+}
+
+static void gfar_init_filer_table(struct gfar_private *priv)
+{
+	int i = 0x0;
+	u32 rqfar = MAX_FILER_IDX;
+	u32 rqfcr = 0x0;
+	u32 rqfpr = FPR_FILER_MASK;
+
+	/* Default rule */
+	rqfcr = RQFCR_CMP_MATCH;
+	ftp_rqfcr[rqfar] = rqfcr;
+	ftp_rqfpr[rqfar] = rqfpr;
+	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
+	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
+	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
+	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
+	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
+	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
+
+	/* cur_filer_idx indicated the fisrt non-masked rule */
+	priv->cur_filer_idx = rqfar;
+
+	/* Rest are masked rules */
+	rqfcr = RQFCR_CMP_NOMATCH;
+	for (i = 0; i < rqfar; i++) {
+		ftp_rqfcr[i] = rqfcr;
+		ftp_rqfpr[i] = rqfpr;
+		gfar_write_filer(priv, i, rqfcr, rqfpr);
+	}
+}
+
 /* Set up the ethernet device structure, private data,
  * and anything else we need before we start */
 static int gfar_probe(struct of_device *ofdev,
@@ -297,14 +845,17 @@ static int gfar_probe(struct of_device *ofdev,
 	u32 tempval;
 	struct net_device *dev = NULL;
 	struct gfar_private *priv = NULL;
-	int err = 0;
+	struct gfar __iomem *regs = NULL;
+	int err = 0, i, grp_idx = 0;
 	int len_devname;
+	u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
+	u32 isrg = 0;
+	u32 __iomem *baddr;
 
-	/* Create an ethernet device instance */
-	dev = alloc_etherdev(sizeof (*priv));
+	err = gfar_of_init(ofdev, &dev);
 
-	if (NULL == dev)
-		return -ENOMEM;
+	if (err)
+		return err;
 
 	priv = netdev_priv(dev);
 	priv->ndev = dev;
@@ -312,50 +863,46 @@ static int gfar_probe(struct of_device *ofdev,
 	priv->node = ofdev->node;
 	SET_NETDEV_DEV(dev, &ofdev->dev);
 
-	err = gfar_of_init(dev);
-
-	if (err)
-		goto regs_fail;
-
-	spin_lock_init(&priv->txlock);
-	spin_lock_init(&priv->rxlock);
 	spin_lock_init(&priv->bflock);
 	INIT_WORK(&priv->reset_task, gfar_reset_task);
 
 	dev_set_drvdata(&ofdev->dev, priv);
+	regs = priv->gfargrp[0].regs;
 
 	/* Stop the DMA engine now, in case it was running before */
 	/* (The firmware could have used it, and left it running). */
 	gfar_halt(dev);
 
 	/* Reset MAC layer */
-	gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
+	gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
 
 	/* We need to delay at least 3 TX clocks */
 	udelay(2);
 
 	tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
-	gfar_write(&priv->regs->maccfg1, tempval);
+	gfar_write(&regs->maccfg1, tempval);
 
 	/* Initialize MACCFG2. */
-	gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS);
+	gfar_write(&regs->maccfg2, MACCFG2_INIT_SETTINGS);
 
 	/* Initialize ECNTRL */
-	gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS);
+	gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
 
 	/* Set the dev->base_addr to the gfar reg region */
-	dev->base_addr = (unsigned long) (priv->regs);
+	dev->base_addr = (unsigned long) regs;
 
 	SET_NETDEV_DEV(dev, &ofdev->dev);
 
 	/* Fill in the dev structure */
 	dev->watchdog_timeo = TX_TIMEOUT;
-	netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
 	dev->mtu = 1500;
-
 	dev->netdev_ops = &gfar_netdev_ops;
 	dev->ethtool_ops = &gfar_ethtool_ops;
 
+	/* Register for napi ...We are registering NAPI for each grp */
+	for (i = 0; i < priv->num_grps; i++)
+		netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
+
 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
 		priv->rx_csum_enable = 1;
 		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
@@ -371,35 +918,35 @@ static int gfar_probe(struct of_device *ofdev,
 		priv->extended_hash = 1;
 		priv->hash_width = 9;
 
-		priv->hash_regs[0] = &priv->regs->igaddr0;
-		priv->hash_regs[1] = &priv->regs->igaddr1;
-		priv->hash_regs[2] = &priv->regs->igaddr2;
-		priv->hash_regs[3] = &priv->regs->igaddr3;
-		priv->hash_regs[4] = &priv->regs->igaddr4;
-		priv->hash_regs[5] = &priv->regs->igaddr5;
-		priv->hash_regs[6] = &priv->regs->igaddr6;
-		priv->hash_regs[7] = &priv->regs->igaddr7;
-		priv->hash_regs[8] = &priv->regs->gaddr0;
-		priv->hash_regs[9] = &priv->regs->gaddr1;
-		priv->hash_regs[10] = &priv->regs->gaddr2;
-		priv->hash_regs[11] = &priv->regs->gaddr3;
-		priv->hash_regs[12] = &priv->regs->gaddr4;
-		priv->hash_regs[13] = &priv->regs->gaddr5;
-		priv->hash_regs[14] = &priv->regs->gaddr6;
-		priv->hash_regs[15] = &priv->regs->gaddr7;
+		priv->hash_regs[0] = &regs->igaddr0;
+		priv->hash_regs[1] = &regs->igaddr1;
+		priv->hash_regs[2] = &regs->igaddr2;
+		priv->hash_regs[3] = &regs->igaddr3;
+		priv->hash_regs[4] = &regs->igaddr4;
+		priv->hash_regs[5] = &regs->igaddr5;
+		priv->hash_regs[6] = &regs->igaddr6;
+		priv->hash_regs[7] = &regs->igaddr7;
+		priv->hash_regs[8] = &regs->gaddr0;
+		priv->hash_regs[9] = &regs->gaddr1;
+		priv->hash_regs[10] = &regs->gaddr2;
+		priv->hash_regs[11] = &regs->gaddr3;
+		priv->hash_regs[12] = &regs->gaddr4;
+		priv->hash_regs[13] = &regs->gaddr5;
+		priv->hash_regs[14] = &regs->gaddr6;
+		priv->hash_regs[15] = &regs->gaddr7;
 
 	} else {
 		priv->extended_hash = 0;
 		priv->hash_width = 8;
 
-		priv->hash_regs[0] = &priv->regs->gaddr0;
-		priv->hash_regs[1] = &priv->regs->gaddr1;
-		priv->hash_regs[2] = &priv->regs->gaddr2;
-		priv->hash_regs[3] = &priv->regs->gaddr3;
-		priv->hash_regs[4] = &priv->regs->gaddr4;
-		priv->hash_regs[5] = &priv->regs->gaddr5;
-		priv->hash_regs[6] = &priv->regs->gaddr6;
-		priv->hash_regs[7] = &priv->regs->gaddr7;
+		priv->hash_regs[0] = &regs->gaddr0;
+		priv->hash_regs[1] = &regs->gaddr1;
+		priv->hash_regs[2] = &regs->gaddr2;
+		priv->hash_regs[3] = &regs->gaddr3;
+		priv->hash_regs[4] = &regs->gaddr4;
+		priv->hash_regs[5] = &regs->gaddr5;
+		priv->hash_regs[6] = &regs->gaddr6;
+		priv->hash_regs[7] = &regs->gaddr7;
 	}
 
 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
@@ -410,15 +957,70 @@ static int gfar_probe(struct of_device *ofdev,
 	if (dev->features & NETIF_F_IP_CSUM)
 		dev->hard_header_len += GMAC_FCB_LEN;
 
+	/* Program the isrg regs only if number of grps > 1 */
+	if (priv->num_grps > 1) {
+		baddr = &regs->isrg0;
+		for (i = 0; i < priv->num_grps; i++) {
+			isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
+			isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
+			gfar_write(baddr, isrg);
+			baddr++;
+			isrg = 0x0;
+		}
+	}
+
+	/* Need to reverse the bit maps as  bit_map's MSB is q0
+	 * but, for_each_bit parses from right to left, which
+	 * basically reverses the queue numbers */
+	for (i = 0; i< priv->num_grps; i++) {
+		priv->gfargrp[i].tx_bit_map = reverse_bitmap(
+				priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
+		priv->gfargrp[i].rx_bit_map = reverse_bitmap(
+				priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
+	}
+
+	/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
+	 * also assign queues to groups */
+	for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
+		priv->gfargrp[grp_idx].num_rx_queues = 0x0;
+		for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
+				priv->num_rx_queues) {
+			priv->gfargrp[grp_idx].num_rx_queues++;
+			priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
+			rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
+			rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
+		}
+		priv->gfargrp[grp_idx].num_tx_queues = 0x0;
+		for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map,
+				priv->num_tx_queues) {
+			priv->gfargrp[grp_idx].num_tx_queues++;
+			priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
+			tstat = tstat | (TSTAT_CLEAR_THALT >> i);
+			tqueue = tqueue | (TQUEUE_EN0 >> i);
+		}
+		priv->gfargrp[grp_idx].rstat = rstat;
+		priv->gfargrp[grp_idx].tstat = tstat;
+		rstat = tstat =0;
+	}
+
+	gfar_write(&regs->rqueue, rqueue);
+	gfar_write(&regs->tqueue, tqueue);
+
 	priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
-	priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
-	priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
-	priv->num_txbdfree = DEFAULT_TX_RING_SIZE;
 
-	priv->txcoalescing = DEFAULT_TX_COALESCE;
-	priv->txic = DEFAULT_TXIC;
-	priv->rxcoalescing = DEFAULT_RX_COALESCE;
-	priv->rxic = DEFAULT_RXIC;
+	/* Initializing some of the rx/tx queue level parameters */
+	for (i = 0; i < priv->num_tx_queues; i++) {
+		priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
+		priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
+		priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
+		priv->tx_queue[i]->txic = DEFAULT_TXIC;
+	}
+
+	for (i = 0; i < priv->num_rx_queues; i++) {
+		priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
+		priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
+		priv->rx_queue[i]->rxic = DEFAULT_RXIC;
+	}
 
 	/* Enable most messages by default */
 	priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
@@ -439,20 +1041,43 @@ static int gfar_probe(struct of_device *ofdev,
 
 	/* fill out IRQ number and name fields */
 	len_devname = strlen(dev->name);
-	strncpy(&priv->int_name_tx[0], dev->name, len_devname);
-	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
-		strncpy(&priv->int_name_tx[len_devname],
-			"_tx", sizeof("_tx") + 1);
-
-		strncpy(&priv->int_name_rx[0], dev->name, len_devname);
-		strncpy(&priv->int_name_rx[len_devname],
-			"_rx", sizeof("_rx") + 1);
+	for (i = 0; i < priv->num_grps; i++) {
+		strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
+				len_devname);
+		if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+			strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
+				"_g", sizeof("_g"));
+			priv->gfargrp[i].int_name_tx[
+				strlen(priv->gfargrp[i].int_name_tx)] = i+48;
+			strncpy(&priv->gfargrp[i].int_name_tx[strlen(
+				priv->gfargrp[i].int_name_tx)],
+				"_tx", sizeof("_tx") + 1);
+
+			strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
+					len_devname);
+			strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
+					"_g", sizeof("_g"));
+			priv->gfargrp[i].int_name_rx[
+				strlen(priv->gfargrp[i].int_name_rx)] = i+48;
+			strncpy(&priv->gfargrp[i].int_name_rx[strlen(
+				priv->gfargrp[i].int_name_rx)],
+				"_rx", sizeof("_rx") + 1);
+
+			strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
+					len_devname);
+			strncpy(&priv->gfargrp[i].int_name_er[len_devname],
+				"_g", sizeof("_g"));
+			priv->gfargrp[i].int_name_er[strlen(
+					priv->gfargrp[i].int_name_er)] = i+48;
+			strncpy(&priv->gfargrp[i].int_name_er[strlen(\
+				priv->gfargrp[i].int_name_er)],
+				"_er", sizeof("_er") + 1);
+		} else
+			priv->gfargrp[i].int_name_tx[len_devname] = '\0';
+	}
 
-		strncpy(&priv->int_name_er[0], dev->name, len_devname);
-		strncpy(&priv->int_name_er[len_devname],
-			"_er", sizeof("_er") + 1);
-	} else
-		priv->int_name_tx[len_devname] = '\0';
+	/* Initialize the filer table */
+	gfar_init_filer_table(priv);
 
 	/* Create all the sysfs files */
 	gfar_init_sysfs(dev);
@@ -463,14 +1088,19 @@ static int gfar_probe(struct of_device *ofdev,
 	/* Even more device info helps when determining which kernel */
 	/* provided which set of benchmarks. */
 	printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
-	printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
-	       dev->name, priv->rx_ring_size, priv->tx_ring_size);
+	for (i = 0; i < priv->num_rx_queues; i++)
+		printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n",
+			dev->name, i, priv->rx_queue[i]->rx_ring_size);
+	for(i = 0; i < priv->num_tx_queues; i++)
+		 printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n",
+			dev->name, i, priv->tx_queue[i]->tx_ring_size);
 
 	return 0;
 
 register_fail:
-	iounmap(priv->regs);
-regs_fail:
+	unmap_group_regs(priv);
+	free_tx_pointers(priv);
+	free_rx_pointers(priv);
 	if (priv->phy_node)
 		of_node_put(priv->phy_node);
 	if (priv->tbi_node)
@@ -491,54 +1121,59 @@ static int gfar_remove(struct of_device *ofdev)
 	dev_set_drvdata(&ofdev->dev, NULL);
 
 	unregister_netdev(priv->ndev);
-	iounmap(priv->regs);
+	unmap_group_regs(priv);
 	free_netdev(priv->ndev);
 
 	return 0;
 }
 
 #ifdef CONFIG_PM
-static int gfar_suspend(struct of_device *ofdev, pm_message_t state)
+
+static int gfar_suspend(struct device *dev)
 {
-	struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
-	struct net_device *dev = priv->ndev;
+	struct gfar_private *priv = dev_get_drvdata(dev);
+	struct net_device *ndev = priv->ndev;
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	unsigned long flags;
 	u32 tempval;
 
 	int magic_packet = priv->wol_en &&
 		(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
 
-	netif_device_detach(dev);
+	netif_device_detach(ndev);
 
-	if (netif_running(dev)) {
-		spin_lock_irqsave(&priv->txlock, flags);
-		spin_lock(&priv->rxlock);
+	if (netif_running(ndev)) {
 
-		gfar_halt_nodisable(dev);
+		local_irq_save(flags);
+		lock_tx_qs(priv);
+		lock_rx_qs(priv);
+
+		gfar_halt_nodisable(ndev);
 
 		/* Disable Tx, and Rx if wake-on-LAN is disabled. */
-		tempval = gfar_read(&priv->regs->maccfg1);
+		tempval = gfar_read(&regs->maccfg1);
 
 		tempval &= ~MACCFG1_TX_EN;
 
 		if (!magic_packet)
 			tempval &= ~MACCFG1_RX_EN;
 
-		gfar_write(&priv->regs->maccfg1, tempval);
+		gfar_write(&regs->maccfg1, tempval);
 
-		spin_unlock(&priv->rxlock);
-		spin_unlock_irqrestore(&priv->txlock, flags);
+		unlock_rx_qs(priv);
+		unlock_tx_qs(priv);
+		local_irq_restore(flags);
 
-		napi_disable(&priv->napi);
+		disable_napi(priv);
 
 		if (magic_packet) {
 			/* Enable interrupt on Magic Packet */
-			gfar_write(&priv->regs->imask, IMASK_MAG);
+			gfar_write(&regs->imask, IMASK_MAG);
 
 			/* Enable Magic Packet mode */
-			tempval = gfar_read(&priv->regs->maccfg2);
+			tempval = gfar_read(&regs->maccfg2);
 			tempval |= MACCFG2_MPEN;
-			gfar_write(&priv->regs->maccfg2, tempval);
+			gfar_write(&regs->maccfg2, tempval);
 		} else {
 			phy_stop(priv->phydev);
 		}
@@ -547,17 +1182,18 @@ static int gfar_suspend(struct of_device *ofdev, pm_message_t state)
 	return 0;
 }
 
-static int gfar_resume(struct of_device *ofdev)
+static int gfar_resume(struct device *dev)
 {
-	struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
-	struct net_device *dev = priv->ndev;
+	struct gfar_private *priv = dev_get_drvdata(dev);
+	struct net_device *ndev = priv->ndev;
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	unsigned long flags;
 	u32 tempval;
 	int magic_packet = priv->wol_en &&
 		(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
 
-	if (!netif_running(dev)) {
-		netif_device_attach(dev);
+	if (!netif_running(ndev)) {
+		netif_device_attach(ndev);
 		return 0;
 	}
 
@@ -567,28 +1203,80 @@ static int gfar_resume(struct of_device *ofdev)
 	/* Disable Magic Packet mode, in case something
 	 * else woke us up.
 	 */
+	local_irq_save(flags);
+	lock_tx_qs(priv);
+	lock_rx_qs(priv);
 
-	spin_lock_irqsave(&priv->txlock, flags);
-	spin_lock(&priv->rxlock);
-
-	tempval = gfar_read(&priv->regs->maccfg2);
+	tempval = gfar_read(&regs->maccfg2);
 	tempval &= ~MACCFG2_MPEN;
-	gfar_write(&priv->regs->maccfg2, tempval);
+	gfar_write(&regs->maccfg2, tempval);
 
-	gfar_start(dev);
+	gfar_start(ndev);
 
-	spin_unlock(&priv->rxlock);
-	spin_unlock_irqrestore(&priv->txlock, flags);
+	unlock_rx_qs(priv);
+	unlock_tx_qs(priv);
+	local_irq_restore(flags);
 
-	netif_device_attach(dev);
+	netif_device_attach(ndev);
 
-	napi_enable(&priv->napi);
+	enable_napi(priv);
 
 	return 0;
 }
+
+static int gfar_restore(struct device *dev)
+{
+	struct gfar_private *priv = dev_get_drvdata(dev);
+	struct net_device *ndev = priv->ndev;
+
+	if (!netif_running(ndev))
+		return 0;
+
+	gfar_init_bds(ndev);
+	init_registers(ndev);
+	gfar_set_mac_address(ndev);
+	gfar_init_mac(ndev);
+	gfar_start(ndev);
+
+	priv->oldlink = 0;
+	priv->oldspeed = 0;
+	priv->oldduplex = -1;
+
+	if (priv->phydev)
+		phy_start(priv->phydev);
+
+	netif_device_attach(ndev);
+	enable_napi(priv);
+
+	return 0;
+}
+
+static struct dev_pm_ops gfar_pm_ops = {
+	.suspend = gfar_suspend,
+	.resume = gfar_resume,
+	.freeze = gfar_suspend,
+	.thaw = gfar_resume,
+	.restore = gfar_restore,
+};
+
+#define GFAR_PM_OPS (&gfar_pm_ops)
+
+static int gfar_legacy_suspend(struct of_device *ofdev, pm_message_t state)
+{
+	return gfar_suspend(&ofdev->dev);
+}
+
+static int gfar_legacy_resume(struct of_device *ofdev)
+{
+	return gfar_resume(&ofdev->dev);
+}
+
 #else
-#define gfar_suspend NULL
-#define gfar_resume NULL
+
+#define GFAR_PM_OPS NULL
+#define gfar_legacy_suspend NULL
+#define gfar_legacy_resume NULL
+
 #endif
 
 /* Reads the controller's registers to determine what interface
@@ -597,7 +1285,10 @@ static int gfar_resume(struct of_device *ofdev)
 static phy_interface_t gfar_get_interface(struct net_device *dev)
 {
 	struct gfar_private *priv = netdev_priv(dev);
-	u32 ecntrl = gfar_read(&priv->regs->ecntrl);
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+	u32 ecntrl;
+
+	ecntrl = gfar_read(&regs->ecntrl);
 
 	if (ecntrl & ECNTRL_SGMII_MODE)
 		return PHY_INTERFACE_MODE_SGMII;
@@ -719,46 +1410,52 @@ static void gfar_configure_serdes(struct net_device *dev)
 static void init_registers(struct net_device *dev)
 {
 	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar __iomem *regs = NULL;
+	int i = 0;
 
-	/* Clear IEVENT */
-	gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR);
+	for (i = 0; i < priv->num_grps; i++) {
+		regs = priv->gfargrp[i].regs;
+		/* Clear IEVENT */
+		gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
 
-	/* Initialize IMASK */
-	gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);
+		/* Initialize IMASK */
+		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+	}
 
+	regs = priv->gfargrp[0].regs;
 	/* Init hash registers to zero */
-	gfar_write(&priv->regs->igaddr0, 0);
-	gfar_write(&priv->regs->igaddr1, 0);
-	gfar_write(&priv->regs->igaddr2, 0);
-	gfar_write(&priv->regs->igaddr3, 0);
-	gfar_write(&priv->regs->igaddr4, 0);
-	gfar_write(&priv->regs->igaddr5, 0);
-	gfar_write(&priv->regs->igaddr6, 0);
-	gfar_write(&priv->regs->igaddr7, 0);
-
-	gfar_write(&priv->regs->gaddr0, 0);
-	gfar_write(&priv->regs->gaddr1, 0);
-	gfar_write(&priv->regs->gaddr2, 0);
-	gfar_write(&priv->regs->gaddr3, 0);
-	gfar_write(&priv->regs->gaddr4, 0);
-	gfar_write(&priv->regs->gaddr5, 0);
-	gfar_write(&priv->regs->gaddr6, 0);
-	gfar_write(&priv->regs->gaddr7, 0);
+	gfar_write(&regs->igaddr0, 0);
+	gfar_write(&regs->igaddr1, 0);
+	gfar_write(&regs->igaddr2, 0);
+	gfar_write(&regs->igaddr3, 0);
+	gfar_write(&regs->igaddr4, 0);
+	gfar_write(&regs->igaddr5, 0);
+	gfar_write(&regs->igaddr6, 0);
+	gfar_write(&regs->igaddr7, 0);
+
+	gfar_write(&regs->gaddr0, 0);
+	gfar_write(&regs->gaddr1, 0);
+	gfar_write(&regs->gaddr2, 0);
+	gfar_write(&regs->gaddr3, 0);
+	gfar_write(&regs->gaddr4, 0);
+	gfar_write(&regs->gaddr5, 0);
+	gfar_write(&regs->gaddr6, 0);
+	gfar_write(&regs->gaddr7, 0);
 
 	/* Zero out the rmon mib registers if it has them */
 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
-		memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib));
+		memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
 
 		/* Mask off the CAM interrupts */
-		gfar_write(&priv->regs->rmon.cam1, 0xffffffff);
-		gfar_write(&priv->regs->rmon.cam2, 0xffffffff);
+		gfar_write(&regs->rmon.cam1, 0xffffffff);
+		gfar_write(&regs->rmon.cam2, 0xffffffff);
 	}
 
 	/* Initialize the max receive buffer length */
-	gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
+	gfar_write(&regs->mrblr, priv->rx_buffer_size);
 
 	/* Initialize the Minimum Frame Length Register */
-	gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
+	gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
 }
 
 
@@ -766,23 +1463,28 @@ static void init_registers(struct net_device *dev)
 static void gfar_halt_nodisable(struct net_device *dev)
 {
 	struct gfar_private *priv = netdev_priv(dev);
-	struct gfar __iomem *regs = priv->regs;
+	struct gfar __iomem *regs = NULL;
 	u32 tempval;
+	int i = 0;
 
-	/* Mask all interrupts */
-	gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+	for (i = 0; i < priv->num_grps; i++) {
+		regs = priv->gfargrp[i].regs;
+		/* Mask all interrupts */
+		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
 
-	/* Clear all interrupts */
-	gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
+		/* Clear all interrupts */
+		gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
+	}
 
+	regs = priv->gfargrp[0].regs;
 	/* Stop the DMA, and wait for it to stop */
-	tempval = gfar_read(&priv->regs->dmactrl);
+	tempval = gfar_read(&regs->dmactrl);
 	if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
 	    != (DMACTRL_GRS | DMACTRL_GTS)) {
 		tempval |= (DMACTRL_GRS | DMACTRL_GTS);
-		gfar_write(&priv->regs->dmactrl, tempval);
+		gfar_write(&regs->dmactrl, tempval);
 
-		while (!(gfar_read(&priv->regs->ievent) &
+		while (!(gfar_read(&regs->ievent) &
 			 (IEVENT_GRSC | IEVENT_GTSC)))
 			cpu_relax();
 	}
@@ -792,7 +1494,7 @@ static void gfar_halt_nodisable(struct net_device *dev)
 void gfar_halt(struct net_device *dev)
 {
 	struct gfar_private *priv = netdev_priv(dev);
-	struct gfar __iomem *regs = priv->regs;
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	u32 tempval;
 
 	gfar_halt_nodisable(dev);
@@ -803,101 +1505,131 @@ void gfar_halt(struct net_device *dev)
 	gfar_write(&regs->maccfg1, tempval);
 }
 
+static void free_grp_irqs(struct gfar_priv_grp *grp)
+{
+	free_irq(grp->interruptError, grp);
+	free_irq(grp->interruptTransmit, grp);
+	free_irq(grp->interruptReceive, grp);
+}
+
 void stop_gfar(struct net_device *dev)
 {
 	struct gfar_private *priv = netdev_priv(dev);
-	struct gfar __iomem *regs = priv->regs;
 	unsigned long flags;
+	int i;
 
 	phy_stop(priv->phydev);
 
+
 	/* Lock it down */
-	spin_lock_irqsave(&priv->txlock, flags);
-	spin_lock(&priv->rxlock);
+	local_irq_save(flags);
+	lock_tx_qs(priv);
+	lock_rx_qs(priv);
 
 	gfar_halt(dev);
 
-	spin_unlock(&priv->rxlock);
-	spin_unlock_irqrestore(&priv->txlock, flags);
+	unlock_rx_qs(priv);
+	unlock_tx_qs(priv);
+	local_irq_restore(flags);
 
 	/* Free the IRQs */
 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
-		free_irq(priv->interruptError, dev);
-		free_irq(priv->interruptTransmit, dev);
-		free_irq(priv->interruptReceive, dev);
+		for (i = 0; i < priv->num_grps; i++)
+			free_grp_irqs(&priv->gfargrp[i]);
 	} else {
-		free_irq(priv->interruptTransmit, dev);
+		for (i = 0; i < priv->num_grps; i++)
+			free_irq(priv->gfargrp[i].interruptTransmit,
+					&priv->gfargrp[i]);
 	}
 
 	free_skb_resources(priv);
-
-	dma_free_coherent(&priv->ofdev->dev,
-			sizeof(struct txbd8)*priv->tx_ring_size
-			+ sizeof(struct rxbd8)*priv->rx_ring_size,
-			priv->tx_bd_base,
-			gfar_read(&regs->tbase0));
 }
 
-/* If there are any tx skbs or rx skbs still around, free them.
- * Then free tx_skbuff and rx_skbuff */
-static void free_skb_resources(struct gfar_private *priv)
+static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
 {
-	struct rxbd8 *rxbdp;
 	struct txbd8 *txbdp;
+	struct gfar_private *priv = netdev_priv(tx_queue->dev);
 	int i, j;
 
-	/* Go through all the buffer descriptors and free their data buffers */
-	txbdp = priv->tx_bd_base;
+	txbdp = tx_queue->tx_bd_base;
 
-	for (i = 0; i < priv->tx_ring_size; i++) {
-		if (!priv->tx_skbuff[i])
+	for (i = 0; i < tx_queue->tx_ring_size; i++) {
+		if (!tx_queue->tx_skbuff[i])
 			continue;
 
 		dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
 				txbdp->length, DMA_TO_DEVICE);
 		txbdp->lstatus = 0;
-		for (j = 0; j < skb_shinfo(priv->tx_skbuff[i])->nr_frags; j++) {
+		for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
+				j++) {
 			txbdp++;
 			dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
 					txbdp->length, DMA_TO_DEVICE);
 		}
 		txbdp++;
-		dev_kfree_skb_any(priv->tx_skbuff[i]);
-		priv->tx_skbuff[i] = NULL;
+		dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
+		tx_queue->tx_skbuff[i] = NULL;
 	}
+	kfree(tx_queue->tx_skbuff);
+}
 
-	kfree(priv->tx_skbuff);
-
-	rxbdp = priv->rx_bd_base;
+static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
+{
+	struct rxbd8 *rxbdp;
+	struct gfar_private *priv = netdev_priv(rx_queue->dev);
+	int i;
 
-	/* rx_skbuff is not guaranteed to be allocated, so only
-	 * free it and its contents if it is allocated */
-	if(priv->rx_skbuff != NULL) {
-		for (i = 0; i < priv->rx_ring_size; i++) {
-			if (priv->rx_skbuff[i]) {
-				dma_unmap_single(&priv->ofdev->dev, rxbdp->bufPtr,
-						priv->rx_buffer_size,
-						DMA_FROM_DEVICE);
+	rxbdp = rx_queue->rx_bd_base;
 
-				dev_kfree_skb_any(priv->rx_skbuff[i]);
-				priv->rx_skbuff[i] = NULL;
-			}
+	for (i = 0; i < rx_queue->rx_ring_size; i++) {
+		if (rx_queue->rx_skbuff[i]) {
+			dma_unmap_single(&priv->ofdev->dev,
+					rxbdp->bufPtr, priv->rx_buffer_size,
+					DMA_FROM_DEVICE);
+			dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
+			rx_queue->rx_skbuff[i] = NULL;
+		}
+		rxbdp->lstatus = 0;
+		rxbdp->bufPtr = 0;
+		rxbdp++;
+	}
+	kfree(rx_queue->rx_skbuff);
+}
 
-			rxbdp->lstatus = 0;
-			rxbdp->bufPtr = 0;
+/* If there are any tx skbs or rx skbs still around, free them.
+ * Then free tx_skbuff and rx_skbuff */
+static void free_skb_resources(struct gfar_private *priv)
+{
+	struct gfar_priv_tx_q *tx_queue = NULL;
+	struct gfar_priv_rx_q *rx_queue = NULL;
+	int i;
 
-			rxbdp++;
-		}
+	/* Go through all the buffer descriptors and free their data buffers */
+	for (i = 0; i < priv->num_tx_queues; i++) {
+		tx_queue = priv->tx_queue[i];
+		if(!tx_queue->tx_skbuff)
+			free_skb_tx_queue(tx_queue);
+	}
 
-		kfree(priv->rx_skbuff);
+	for (i = 0; i < priv->num_rx_queues; i++) {
+		rx_queue = priv->rx_queue[i];
+		if(!rx_queue->rx_skbuff)
+			free_skb_rx_queue(rx_queue);
 	}
+
+	dma_free_coherent(&priv->ofdev->dev,
+			sizeof(struct txbd8) * priv->total_tx_ring_size +
+			sizeof(struct rxbd8) * priv->total_rx_ring_size,
+			priv->tx_queue[0]->tx_bd_base,
+			priv->tx_queue[0]->tx_bd_dma_base);
 }
 
 void gfar_start(struct net_device *dev)
 {
 	struct gfar_private *priv = netdev_priv(dev);
-	struct gfar __iomem *regs = priv->regs;
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	u32 tempval;
+	int i = 0;
 
 	/* Enable Rx and Tx in MACCFG1 */
 	tempval = gfar_read(&regs->maccfg1);
@@ -905,269 +1637,159 @@ void gfar_start(struct net_device *dev)
 	gfar_write(&regs->maccfg1, tempval);
 
 	/* Initialize DMACTRL to have WWR and WOP */
-	tempval = gfar_read(&priv->regs->dmactrl);
+	tempval = gfar_read(&regs->dmactrl);
 	tempval |= DMACTRL_INIT_SETTINGS;
-	gfar_write(&priv->regs->dmactrl, tempval);
+	gfar_write(&regs->dmactrl, tempval);
 
 	/* Make sure we aren't stopped */
-	tempval = gfar_read(&priv->regs->dmactrl);
+	tempval = gfar_read(&regs->dmactrl);
 	tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
-	gfar_write(&priv->regs->dmactrl, tempval);
-
-	/* Clear THLT/RHLT, so that the DMA starts polling now */
-	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
-	gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT);
-
-	/* Unmask the interrupts we look for */
-	gfar_write(&regs->imask, IMASK_DEFAULT);
+	gfar_write(&regs->dmactrl, tempval);
+
+	for (i = 0; i < priv->num_grps; i++) {
+		regs = priv->gfargrp[i].regs;
+		/* Clear THLT/RHLT, so that the DMA starts polling now */
+		gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
+		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
+		/* Unmask the interrupts we look for */
+		gfar_write(&regs->imask, IMASK_DEFAULT);
+	}
 
 	dev->trans_start = jiffies;
 }
 
-/* Bring the controller up and running */
-int startup_gfar(struct net_device *dev)
+void gfar_configure_coalescing(struct gfar_private *priv,
+	unsigned long tx_mask, unsigned long rx_mask)
 {
-	struct txbd8 *txbdp;
-	struct rxbd8 *rxbdp;
-	dma_addr_t addr = 0;
-	unsigned long vaddr;
-	int i;
-	struct gfar_private *priv = netdev_priv(dev);
-	struct gfar __iomem *regs = priv->regs;
-	int err = 0;
-	u32 rctrl = 0;
-	u32 tctrl = 0;
-	u32 attrs = 0;
-
-	gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+	u32 __iomem *baddr;
+	int i = 0;
 
-	/* Allocate memory for the buffer descriptors */
-	vaddr = (unsigned long) dma_alloc_coherent(&priv->ofdev->dev,
-			sizeof (struct txbd8) * priv->tx_ring_size +
-			sizeof (struct rxbd8) * priv->rx_ring_size,
-			&addr, GFP_KERNEL);
-
-	if (vaddr == 0) {
-		if (netif_msg_ifup(priv))
-			printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
-					dev->name);
-		return -ENOMEM;
-	}
-
-	priv->tx_bd_base = (struct txbd8 *) vaddr;
-
-	/* enet DMA only understands physical addresses */
-	gfar_write(&regs->tbase0, addr);
-
-	/* Start the rx descriptor ring where the tx ring leaves off */
-	addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
-	vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size;
-	priv->rx_bd_base = (struct rxbd8 *) vaddr;
-	gfar_write(&regs->rbase0, addr);
-
-	/* Setup the skbuff rings */
-	priv->tx_skbuff =
-	    (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
-					priv->tx_ring_size, GFP_KERNEL);
-
-	if (NULL == priv->tx_skbuff) {
-		if (netif_msg_ifup(priv))
-			printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
-					dev->name);
-		err = -ENOMEM;
-		goto tx_skb_fail;
-	}
-
-	for (i = 0; i < priv->tx_ring_size; i++)
-		priv->tx_skbuff[i] = NULL;
-
-	priv->rx_skbuff =
-	    (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
-					priv->rx_ring_size, GFP_KERNEL);
-
-	if (NULL == priv->rx_skbuff) {
-		if (netif_msg_ifup(priv))
-			printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
-					dev->name);
-		err = -ENOMEM;
-		goto rx_skb_fail;
-	}
-
-	for (i = 0; i < priv->rx_ring_size; i++)
-		priv->rx_skbuff[i] = NULL;
-
-	/* Initialize some variables in our dev structure */
-	priv->num_txbdfree = priv->tx_ring_size;
-	priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
-	priv->cur_rx = priv->rx_bd_base;
-	priv->skb_curtx = priv->skb_dirtytx = 0;
-	priv->skb_currx = 0;
-
-	/* Initialize Transmit Descriptor Ring */
-	txbdp = priv->tx_bd_base;
-	for (i = 0; i < priv->tx_ring_size; i++) {
-		txbdp->lstatus = 0;
-		txbdp->bufPtr = 0;
-		txbdp++;
-	}
-
-	/* Set the last descriptor in the ring to indicate wrap */
-	txbdp--;
-	txbdp->status |= TXBD_WRAP;
-
-	rxbdp = priv->rx_bd_base;
-	for (i = 0; i < priv->rx_ring_size; i++) {
-		struct sk_buff *skb;
-
-		skb = gfar_new_skb(dev);
-
-		if (!skb) {
-			printk(KERN_ERR "%s: Can't allocate RX buffers\n",
-					dev->name);
+	/* Backward compatible case ---- even if we enable
+	 * multiple queues, there's only single reg to program
+	 */
+	gfar_write(&regs->txic, 0);
+	if(likely(priv->tx_queue[0]->txcoalescing))
+		gfar_write(&regs->txic, priv->tx_queue[0]->txic);
 
-			goto err_rxalloc_fail;
+	gfar_write(&regs->rxic, 0);
+	if(unlikely(priv->rx_queue[0]->rxcoalescing))
+		gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
+
+	if (priv->mode == MQ_MG_MODE) {
+		baddr = &regs->txic0;
+		for_each_bit (i, &tx_mask, priv->num_tx_queues) {
+			if (likely(priv->tx_queue[i]->txcoalescing)) {
+				gfar_write(baddr + i, 0);
+				gfar_write(baddr + i, priv->tx_queue[i]->txic);
+			}
 		}
 
-		priv->rx_skbuff[i] = skb;
-
-		gfar_new_rxbdp(dev, rxbdp, skb);
-
-		rxbdp++;
+		baddr = &regs->rxic0;
+		for_each_bit (i, &rx_mask, priv->num_rx_queues) {
+			if (likely(priv->rx_queue[i]->rxcoalescing)) {
+				gfar_write(baddr + i, 0);
+				gfar_write(baddr + i, priv->rx_queue[i]->rxic);
+			}
+		}
 	}
+}
 
-	/* Set the last descriptor in the ring to wrap */
-	rxbdp--;
-	rxbdp->status |= RXBD_WRAP;
+static int register_grp_irqs(struct gfar_priv_grp *grp)
+{
+	struct gfar_private *priv = grp->priv;
+	struct net_device *dev = priv->ndev;
+	int err;
 
 	/* If the device has multiple interrupts, register for
 	 * them.  Otherwise, only register for the one */
 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
 		/* Install our interrupt handlers for Error,
 		 * Transmit, and Receive */
-		if (request_irq(priv->interruptError, gfar_error,
-				0, priv->int_name_er, dev) < 0) {
+		if ((err = request_irq(grp->interruptError, gfar_error, 0,
+				grp->int_name_er,grp)) < 0) {
 			if (netif_msg_intr(priv))
 				printk(KERN_ERR "%s: Can't get IRQ %d\n",
-					dev->name, priv->interruptError);
+					dev->name, grp->interruptError);
 
-			err = -1;
-			goto err_irq_fail;
+				goto err_irq_fail;
 		}
 
-		if (request_irq(priv->interruptTransmit, gfar_transmit,
-				0, priv->int_name_tx, dev) < 0) {
+		if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
+				0, grp->int_name_tx, grp)) < 0) {
 			if (netif_msg_intr(priv))
 				printk(KERN_ERR "%s: Can't get IRQ %d\n",
-					dev->name, priv->interruptTransmit);
-
-			err = -1;
-
+					dev->name, grp->interruptTransmit);
 			goto tx_irq_fail;
 		}
 
-		if (request_irq(priv->interruptReceive, gfar_receive,
-				0, priv->int_name_rx, dev) < 0) {
+		if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
+				grp->int_name_rx, grp)) < 0) {
 			if (netif_msg_intr(priv))
-				printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
-						dev->name, priv->interruptReceive);
-
-			err = -1;
+				printk(KERN_ERR "%s: Can't get IRQ %d\n",
+					dev->name, grp->interruptReceive);
 			goto rx_irq_fail;
 		}
 	} else {
-		if (request_irq(priv->interruptTransmit, gfar_interrupt,
-				0, priv->int_name_tx, dev) < 0) {
+		if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
+				grp->int_name_tx, grp)) < 0) {
 			if (netif_msg_intr(priv))
 				printk(KERN_ERR "%s: Can't get IRQ %d\n",
-					dev->name, priv->interruptTransmit);
-
-			err = -1;
+					dev->name, grp->interruptTransmit);
 			goto err_irq_fail;
 		}
 	}
 
-	phy_start(priv->phydev);
-
-	/* Configure the coalescing support */
-	gfar_write(&regs->txic, 0);
-	if (priv->txcoalescing)
-		gfar_write(&regs->txic, priv->txic);
-
-	gfar_write(&regs->rxic, 0);
-	if (priv->rxcoalescing)
-		gfar_write(&regs->rxic, priv->rxic);
-
-	if (priv->rx_csum_enable)
-		rctrl |= RCTRL_CHECKSUMMING;
+	return 0;
 
-	if (priv->extended_hash) {
-		rctrl |= RCTRL_EXTHASH;
+rx_irq_fail:
+	free_irq(grp->interruptTransmit, grp);
+tx_irq_fail:
+	free_irq(grp->interruptError, grp);
+err_irq_fail:
+	return err;
 
-		gfar_clear_exact_match(dev);
-		rctrl |= RCTRL_EMEN;
-	}
+}
 
-	if (priv->padding) {
-		rctrl &= ~RCTRL_PAL_MASK;
-		rctrl |= RCTRL_PADDING(priv->padding);
-	}
+/* Bring the controller up and running */
+int startup_gfar(struct net_device *ndev)
+{
+	struct gfar_private *priv = netdev_priv(ndev);
+	struct gfar __iomem *regs = NULL;
+	int err, i, j;
 
-	/* keep vlan related bits if it's enabled */
-	if (priv->vlgrp) {
-		rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
-		tctrl |= TCTRL_VLINS;
+	for (i = 0; i < priv->num_grps; i++) {
+		regs= priv->gfargrp[i].regs;
+		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
 	}
 
-	/* Init rctrl based on our settings */
-	gfar_write(&priv->regs->rctrl, rctrl);
-
-	if (dev->features & NETIF_F_IP_CSUM)
-		tctrl |= TCTRL_INIT_CSUM;
-
-	gfar_write(&priv->regs->tctrl, tctrl);
-
-	/* Set the extraction length and index */
-	attrs = ATTRELI_EL(priv->rx_stash_size) |
-		ATTRELI_EI(priv->rx_stash_index);
-
-	gfar_write(&priv->regs->attreli, attrs);
-
-	/* Start with defaults, and add stashing or locking
-	 * depending on the approprate variables */
-	attrs = ATTR_INIT_SETTINGS;
+	regs= priv->gfargrp[0].regs;
+	err = gfar_alloc_skb_resources(ndev);
+	if (err)
+		return err;
 
-	if (priv->bd_stash_en)
-		attrs |= ATTR_BDSTASH;
+	gfar_init_mac(ndev);
 
-	if (priv->rx_stash_size != 0)
-		attrs |= ATTR_BUFSTASH;
+	for (i = 0; i < priv->num_grps; i++) {
+		err = register_grp_irqs(&priv->gfargrp[i]);
+		if (err) {
+			for (j = 0; j < i; j++)
+				free_grp_irqs(&priv->gfargrp[j]);
+				goto irq_fail;
+		}
+	}
 
-	gfar_write(&priv->regs->attr, attrs);
+	/* Start the controller */
+	gfar_start(ndev);
 
-	gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold);
-	gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve);
-	gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
+	phy_start(priv->phydev);
 
-	/* Start the controller */
-	gfar_start(dev);
+	gfar_configure_coalescing(priv, 0xFF, 0xFF);
 
 	return 0;
 
-rx_irq_fail:
-	free_irq(priv->interruptTransmit, dev);
-tx_irq_fail:
-	free_irq(priv->interruptError, dev);
-err_irq_fail:
-err_rxalloc_fail:
-rx_skb_fail:
+irq_fail:
 	free_skb_resources(priv);
-tx_skb_fail:
-	dma_free_coherent(&priv->ofdev->dev,
-			sizeof(struct txbd8)*priv->tx_ring_size
-			+ sizeof(struct rxbd8)*priv->rx_ring_size,
-			priv->tx_bd_base,
-			gfar_read(&regs->tbase0));
-
 	return err;
 }
 
@@ -1178,7 +1800,7 @@ static int gfar_enet_open(struct net_device *dev)
 	struct gfar_private *priv = netdev_priv(dev);
 	int err;
 
-	napi_enable(&priv->napi);
+	enable_napi(priv);
 
 	skb_queue_head_init(&priv->rx_recycle);
 
@@ -1189,18 +1811,18 @@ static int gfar_enet_open(struct net_device *dev)
 
 	err = init_phy(dev);
 
-	if(err) {
-		napi_disable(&priv->napi);
+	if (err) {
+		disable_napi(priv);
 		return err;
 	}
 
 	err = startup_gfar(dev);
 	if (err) {
-		napi_disable(&priv->napi);
+		disable_napi(priv);
 		return err;
 	}
 
-	netif_start_queue(dev);
+	netif_tx_start_all_queues(dev);
 
 	device_set_wakeup_enable(&dev->dev, priv->wol_en);
 
@@ -1269,15 +1891,23 @@ static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar_priv_tx_q *tx_queue = NULL;
+	struct netdev_queue *txq;
+	struct gfar __iomem *regs = NULL;
 	struct txfcb *fcb = NULL;
 	struct txbd8 *txbdp, *txbdp_start, *base;
 	u32 lstatus;
-	int i;
+	int i, rq = 0;
 	u32 bufaddr;
 	unsigned long flags;
 	unsigned int nr_frags, length;
 
-	base = priv->tx_bd_base;
+
+	rq = skb->queue_mapping;
+	tx_queue = priv->tx_queue[rq];
+	txq = netdev_get_tx_queue(dev, rq);
+	base = tx_queue->tx_bd_base;
+	regs = tx_queue->grp->regs;
 
 	/* make space for additional header when fcb is needed */
 	if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
@@ -1298,21 +1928,18 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	/* total number of fragments in the SKB */
 	nr_frags = skb_shinfo(skb)->nr_frags;
 
-	spin_lock_irqsave(&priv->txlock, flags);
-
 	/* check if there is space to queue this packet */
-	if ((nr_frags+1) > priv->num_txbdfree) {
+	if ((nr_frags+1) > tx_queue->num_txbdfree) {
 		/* no space, stop the queue */
-		netif_stop_queue(dev);
+		netif_tx_stop_queue(txq);
 		dev->stats.tx_fifo_errors++;
-		spin_unlock_irqrestore(&priv->txlock, flags);
 		return NETDEV_TX_BUSY;
 	}
 
 	/* Update transmit stats */
 	dev->stats.tx_bytes += skb->len;
 
-	txbdp = txbdp_start = priv->cur_tx;
+	txbdp = txbdp_start = tx_queue->cur_tx;
 
 	if (nr_frags == 0) {
 		lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
@@ -1320,7 +1947,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
 		/* Place the fragment addresses and lengths into the TxBDs */
 		for (i = 0; i < nr_frags; i++) {
 			/* Point at the next BD, wrapping as needed */
-			txbdp = next_txbd(txbdp, base, priv->tx_ring_size);
+			txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
 
 			length = skb_shinfo(skb)->frags[i].size;
 
@@ -1362,13 +1989,27 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	}
 
 	/* setup the TxBD length and buffer pointer for the first BD */
-	priv->tx_skbuff[priv->skb_curtx] = skb;
+	tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
 	txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
 			skb_headlen(skb), DMA_TO_DEVICE);
 
 	lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
 
 	/*
+	 * We can work in parallel with gfar_clean_tx_ring(), except
+	 * when modifying num_txbdfree. Note that we didn't grab the lock
+	 * when we were reading the num_txbdfree and checking for available
+	 * space, that's because outside of this function it can only grow,
+	 * and once we've got needed space, it cannot suddenly disappear.
+	 *
+	 * The lock also protects us from gfar_error(), which can modify
+	 * regs->tstat and thus retrigger the transfers, which is why we
+	 * also must grab the lock before setting ready bit for the first
+	 * to be transmitted BD.
+	 */
+	spin_lock_irqsave(&tx_queue->txlock, flags);
+
+	/*
 	 * The powerpc-specific eieio() is used, as wmb() has too strong
 	 * semantics (it requires synchronization between cacheable and
 	 * uncacheable mappings, which eieio doesn't provide and which we
@@ -1382,29 +2023,29 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
 	/* Update the current skb pointer to the next entry we will use
 	 * (wrapping if necessary) */
-	priv->skb_curtx = (priv->skb_curtx + 1) &
-		TX_RING_MOD_MASK(priv->tx_ring_size);
+	tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
+		TX_RING_MOD_MASK(tx_queue->tx_ring_size);
 
-	priv->cur_tx = next_txbd(txbdp, base, priv->tx_ring_size);
+	tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
 
 	/* reduce TxBD free count */
-	priv->num_txbdfree -= (nr_frags + 1);
+	tx_queue->num_txbdfree -= (nr_frags + 1);
 
 	dev->trans_start = jiffies;
 
 	/* If the next BD still needs to be cleaned up, then the bds
 	   are full.  We need to tell the kernel to stop sending us stuff. */
-	if (!priv->num_txbdfree) {
-		netif_stop_queue(dev);
+	if (!tx_queue->num_txbdfree) {
+		netif_tx_stop_queue(txq);
 
 		dev->stats.tx_fifo_errors++;
 	}
 
 	/* Tell the DMA to go go go */
-	gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
+	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
 
 	/* Unlock priv */
-	spin_unlock_irqrestore(&priv->txlock, flags);
+	spin_unlock_irqrestore(&tx_queue->txlock, flags);
 
 	return NETDEV_TX_OK;
 }
@@ -1414,7 +2055,7 @@ static int gfar_close(struct net_device *dev)
 {
 	struct gfar_private *priv = netdev_priv(dev);
 
-	napi_disable(&priv->napi);
+	disable_napi(priv);
 
 	skb_queue_purge(&priv->rx_recycle);
 	cancel_work_sync(&priv->reset_task);
@@ -1424,7 +2065,7 @@ static int gfar_close(struct net_device *dev)
 	phy_disconnect(priv->phydev);
 	priv->phydev = NULL;
 
-	netif_stop_queue(dev);
+	netif_tx_stop_all_queues(dev);
 
 	return 0;
 }
@@ -1443,50 +2084,55 @@ static void gfar_vlan_rx_register(struct net_device *dev,
 		struct vlan_group *grp)
 {
 	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar __iomem *regs = NULL;
 	unsigned long flags;
 	u32 tempval;
 
-	spin_lock_irqsave(&priv->rxlock, flags);
+	regs = priv->gfargrp[0].regs;
+	local_irq_save(flags);
+	lock_rx_qs(priv);
 
 	priv->vlgrp = grp;
 
 	if (grp) {
 		/* Enable VLAN tag insertion */
-		tempval = gfar_read(&priv->regs->tctrl);
+		tempval = gfar_read(&regs->tctrl);
 		tempval |= TCTRL_VLINS;
 
-		gfar_write(&priv->regs->tctrl, tempval);
+		gfar_write(&regs->tctrl, tempval);
 
 		/* Enable VLAN tag extraction */
-		tempval = gfar_read(&priv->regs->rctrl);
+		tempval = gfar_read(&regs->rctrl);
 		tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
-		gfar_write(&priv->regs->rctrl, tempval);
+		gfar_write(&regs->rctrl, tempval);
 	} else {
 		/* Disable VLAN tag insertion */
-		tempval = gfar_read(&priv->regs->tctrl);
+		tempval = gfar_read(&regs->tctrl);
 		tempval &= ~TCTRL_VLINS;
-		gfar_write(&priv->regs->tctrl, tempval);
+		gfar_write(&regs->tctrl, tempval);
 
 		/* Disable VLAN tag extraction */
-		tempval = gfar_read(&priv->regs->rctrl);
+		tempval = gfar_read(&regs->rctrl);
 		tempval &= ~RCTRL_VLEX;
 		/* If parse is no longer required, then disable parser */
 		if (tempval & RCTRL_REQ_PARSER)
 			tempval |= RCTRL_PRSDEP_INIT;
 		else
 			tempval &= ~RCTRL_PRSDEP_INIT;
-		gfar_write(&priv->regs->rctrl, tempval);
+		gfar_write(&regs->rctrl, tempval);
 	}
 
 	gfar_change_mtu(dev, dev->mtu);
 
-	spin_unlock_irqrestore(&priv->rxlock, flags);
+	unlock_rx_qs(priv);
+	local_irq_restore(flags);
 }
 
 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
 {
 	int tempsize, tempval;
 	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	int oldsize = priv->rx_buffer_size;
 	int frame_size = new_mtu + ETH_HLEN;
 
@@ -1518,20 +2164,20 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
 
 	dev->mtu = new_mtu;
 
-	gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
-	gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size);
+	gfar_write(&regs->mrblr, priv->rx_buffer_size);
+	gfar_write(&regs->maxfrm, priv->rx_buffer_size);
 
 	/* If the mtu is larger than the max size for standard
 	 * ethernet frames (ie, a jumbo frame), then set maccfg2
 	 * to allow huge frames, and to check the length */
-	tempval = gfar_read(&priv->regs->maccfg2);
+	tempval = gfar_read(&regs->maccfg2);
 
 	if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
 		tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
 	else
 		tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
 
-	gfar_write(&priv->regs->maccfg2, tempval);
+	gfar_write(&regs->maccfg2, tempval);
 
 	if ((oldsize != tempsize) && (dev->flags & IFF_UP))
 		startup_gfar(dev);
@@ -1551,10 +2197,10 @@ static void gfar_reset_task(struct work_struct *work)
 	struct net_device *dev = priv->ndev;
 
 	if (dev->flags & IFF_UP) {
-		netif_stop_queue(dev);
+		netif_tx_stop_all_queues(dev);
 		stop_gfar(dev);
 		startup_gfar(dev);
-		netif_start_queue(dev);
+		netif_tx_start_all_queues(dev);
 	}
 
 	netif_tx_schedule_all(dev);
@@ -1569,24 +2215,29 @@ static void gfar_timeout(struct net_device *dev)
 }
 
 /* Interrupt Handler for Transmit complete */
-static int gfar_clean_tx_ring(struct net_device *dev)
+static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
 {
+	struct net_device *dev = tx_queue->dev;
 	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar_priv_rx_q *rx_queue = NULL;
 	struct txbd8 *bdp;
 	struct txbd8 *lbdp = NULL;
-	struct txbd8 *base = priv->tx_bd_base;
+	struct txbd8 *base = tx_queue->tx_bd_base;
 	struct sk_buff *skb;
 	int skb_dirtytx;
-	int tx_ring_size = priv->tx_ring_size;
+	int tx_ring_size = tx_queue->tx_ring_size;
 	int frags = 0;
 	int i;
 	int howmany = 0;
 	u32 lstatus;
 
-	bdp = priv->dirty_tx;
-	skb_dirtytx = priv->skb_dirtytx;
+	rx_queue = priv->rx_queue[tx_queue->qindex];
+	bdp = tx_queue->dirty_tx;
+	skb_dirtytx = tx_queue->skb_dirtytx;
+
+	while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
+		unsigned long flags;
 
-	while ((skb = priv->tx_skbuff[skb_dirtytx])) {
 		frags = skb_shinfo(skb)->nr_frags;
 		lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
 
@@ -1618,82 +2269,73 @@ static int gfar_clean_tx_ring(struct net_device *dev)
 		 * If there's room in the queue (limit it to rx_buffer_size)
 		 * we add this skb back into the pool, if it's the right size
 		 */
-		if (skb_queue_len(&priv->rx_recycle) < priv->rx_ring_size &&
+		if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
 				skb_recycle_check(skb, priv->rx_buffer_size +
 					RXBUF_ALIGNMENT))
 			__skb_queue_head(&priv->rx_recycle, skb);
 		else
 			dev_kfree_skb_any(skb);
 
-		priv->tx_skbuff[skb_dirtytx] = NULL;
+		tx_queue->tx_skbuff[skb_dirtytx] = NULL;
 
 		skb_dirtytx = (skb_dirtytx + 1) &
 			TX_RING_MOD_MASK(tx_ring_size);
 
 		howmany++;
-		priv->num_txbdfree += frags + 1;
+		spin_lock_irqsave(&tx_queue->txlock, flags);
+		tx_queue->num_txbdfree += frags + 1;
+		spin_unlock_irqrestore(&tx_queue->txlock, flags);
 	}
 
 	/* If we freed a buffer, we can restart transmission, if necessary */
-	if (netif_queue_stopped(dev) && priv->num_txbdfree)
-		netif_wake_queue(dev);
+	if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree)
+		netif_wake_subqueue(dev, tx_queue->qindex);
 
 	/* Update dirty indicators */
-	priv->skb_dirtytx = skb_dirtytx;
-	priv->dirty_tx = bdp;
+	tx_queue->skb_dirtytx = skb_dirtytx;
+	tx_queue->dirty_tx = bdp;
 
 	dev->stats.tx_packets += howmany;
 
 	return howmany;
 }
 
-static void gfar_schedule_cleanup(struct net_device *dev)
+static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
 {
-	struct gfar_private *priv = netdev_priv(dev);
 	unsigned long flags;
 
-	spin_lock_irqsave(&priv->txlock, flags);
-	spin_lock(&priv->rxlock);
-
-	if (napi_schedule_prep(&priv->napi)) {
-		gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
-		__napi_schedule(&priv->napi);
+	spin_lock_irqsave(&gfargrp->grplock, flags);
+	if (napi_schedule_prep(&gfargrp->napi)) {
+		gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
+		__napi_schedule(&gfargrp->napi);
 	} else {
 		/*
 		 * Clear IEVENT, so interrupts aren't called again
 		 * because of the packets that have already arrived.
 		 */
-		gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
+		gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
 	}
+	spin_unlock_irqrestore(&gfargrp->grplock, flags);
 
-	spin_unlock(&priv->rxlock);
-	spin_unlock_irqrestore(&priv->txlock, flags);
 }
 
 /* Interrupt Handler for Transmit complete */
-static irqreturn_t gfar_transmit(int irq, void *dev_id)
+static irqreturn_t gfar_transmit(int irq, void *grp_id)
 {
-	gfar_schedule_cleanup((struct net_device *)dev_id);
+	gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
 	return IRQ_HANDLED;
 }
 
-static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
+static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
 		struct sk_buff *skb)
 {
+	struct net_device *dev = rx_queue->dev;
 	struct gfar_private *priv = netdev_priv(dev);
-	u32 lstatus;
-
-	bdp->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
-			priv->rx_buffer_size, DMA_FROM_DEVICE);
-
-	lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
-
-	if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1)
-		lstatus |= BD_LFLAG(RXBD_WRAP);
-
-	eieio();
+	dma_addr_t buf;
 
-	bdp->lstatus = lstatus;
+	buf = dma_map_single(&priv->ofdev->dev, skb->data,
+			     priv->rx_buffer_size, DMA_FROM_DEVICE);
+	gfar_init_rxbdp(rx_queue, bdp, buf);
 }
 
 
@@ -1760,9 +2402,9 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
 	}
 }
 
-irqreturn_t gfar_receive(int irq, void *dev_id)
+irqreturn_t gfar_receive(int irq, void *grp_id)
 {
-	gfar_schedule_cleanup((struct net_device *)dev_id);
+	gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
 	return IRQ_HANDLED;
 }
 
@@ -1792,6 +2434,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
 	fcb = (struct rxfcb *)skb->data;
 
 	/* Remove the FCB from the skb */
+	skb_set_queue_mapping(skb, fcb->rq);
 	/* Remove the padded bytes, if there are any */
 	if (amount_pull)
 		skb_pull(skb, amount_pull);
@@ -1818,8 +2461,9 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
  *   until the budget/quota has been reached. Returns the number
  *   of frames handled
  */
-int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
+int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
 {
+	struct net_device *dev = rx_queue->dev;
 	struct rxbd8 *bdp, *base;
 	struct sk_buff *skb;
 	int pkt_len;
@@ -1828,8 +2472,8 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
 	struct gfar_private *priv = netdev_priv(dev);
 
 	/* Get the first full descriptor */
-	bdp = priv->cur_rx;
-	base = priv->rx_bd_base;
+	bdp = rx_queue->cur_rx;
+	base = rx_queue->rx_bd_base;
 
 	amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
 		priv->padding;
@@ -1841,7 +2485,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
 		/* Add another skb for the future */
 		newskb = gfar_new_skb(dev);
 
-		skb = priv->rx_skbuff[priv->skb_currx];
+		skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
 
 		dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
 				priv->rx_buffer_size, DMA_FROM_DEVICE);
@@ -1875,8 +2519,6 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
 				skb_put(skb, pkt_len);
 				dev->stats.rx_bytes += pkt_len;
 
-				if (in_irq() || irqs_disabled())
-					printk("Interrupt problem!\n");
 				gfar_process_frame(dev, skb, amount_pull);
 
 			} else {
@@ -1889,46 +2531,70 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
 
 		}
 
-		priv->rx_skbuff[priv->skb_currx] = newskb;
+		rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
 
 		/* Setup the new bdp */
-		gfar_new_rxbdp(dev, bdp, newskb);
+		gfar_new_rxbdp(rx_queue, bdp, newskb);
 
 		/* Update to the next pointer */
-		bdp = next_bd(bdp, base, priv->rx_ring_size);
+		bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
 
 		/* update to point at the next skb */
-		priv->skb_currx =
-		    (priv->skb_currx + 1) &
-		    RX_RING_MOD_MASK(priv->rx_ring_size);
+		rx_queue->skb_currx =
+		    (rx_queue->skb_currx + 1) &
+		    RX_RING_MOD_MASK(rx_queue->rx_ring_size);
 	}
 
 	/* Update the current rxbd pointer to be the next one */
-	priv->cur_rx = bdp;
+	rx_queue->cur_rx = bdp;
 
 	return howmany;
 }
 
 static int gfar_poll(struct napi_struct *napi, int budget)
 {
-	struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
-	struct net_device *dev = priv->ndev;
-	int tx_cleaned = 0;
-	int rx_cleaned = 0;
-	unsigned long flags;
+	struct gfar_priv_grp *gfargrp = container_of(napi,
+			struct gfar_priv_grp, napi);
+	struct gfar_private *priv = gfargrp->priv;
+	struct gfar __iomem *regs = gfargrp->regs;
+	struct gfar_priv_tx_q *tx_queue = NULL;
+	struct gfar_priv_rx_q *rx_queue = NULL;
+	int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
+	int tx_cleaned = 0, i, left_over_budget = budget;
+	unsigned long serviced_queues = 0;
+	int num_queues = 0;
+
+	num_queues = gfargrp->num_rx_queues;
+	budget_per_queue = budget/num_queues;
 
 	/* Clear IEVENT, so interrupts aren't called again
 	 * because of the packets that have already arrived */
-	gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
-
-	/* If we fail to get the lock, don't bother with the TX BDs */
-	if (spin_trylock_irqsave(&priv->txlock, flags)) {
-		tx_cleaned = gfar_clean_tx_ring(dev);
-		spin_unlock_irqrestore(&priv->txlock, flags);
+	gfar_write(&regs->ievent, IEVENT_RTX_MASK);
+
+	while (num_queues && left_over_budget) {
+
+		budget_per_queue = left_over_budget/num_queues;
+		left_over_budget = 0;
+
+		for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
+			if (test_bit(i, &serviced_queues))
+				continue;
+			rx_queue = priv->rx_queue[i];
+			tx_queue = priv->tx_queue[rx_queue->qindex];
+
+			tx_cleaned += gfar_clean_tx_ring(tx_queue);
+			rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
+							budget_per_queue);
+			rx_cleaned += rx_cleaned_per_queue;
+			if(rx_cleaned_per_queue < budget_per_queue) {
+				left_over_budget = left_over_budget +
+					(budget_per_queue - rx_cleaned_per_queue);
+				set_bit(i, &serviced_queues);
+				num_queues--;
+			}
+		}
 	}
 
-	rx_cleaned = gfar_clean_rx_ring(dev, budget);
-
 	if (tx_cleaned)
 		return budget;
 
@@ -1936,20 +2602,14 @@ static int gfar_poll(struct napi_struct *napi, int budget)
 		napi_complete(napi);
 
 		/* Clear the halt bit in RSTAT */
-		gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
+		gfar_write(&regs->rstat, gfargrp->rstat);
 
-		gfar_write(&priv->regs->imask, IMASK_DEFAULT);
+		gfar_write(&regs->imask, IMASK_DEFAULT);
 
 		/* If we are coalescing interrupts, update the timer */
 		/* Otherwise, clear it */
-		if (likely(priv->rxcoalescing)) {
-			gfar_write(&priv->regs->rxic, 0);
-			gfar_write(&priv->regs->rxic, priv->rxic);
-		}
-		if (likely(priv->txcoalescing)) {
-			gfar_write(&priv->regs->txic, 0);
-			gfar_write(&priv->regs->txic, priv->txic);
-		}
+		gfar_configure_coalescing(priv,
+				gfargrp->rx_bit_map, gfargrp->tx_bit_map);
 	}
 
 	return rx_cleaned;
@@ -1964,44 +2624,49 @@ static int gfar_poll(struct napi_struct *napi, int budget)
 static void gfar_netpoll(struct net_device *dev)
 {
 	struct gfar_private *priv = netdev_priv(dev);
+	int i = 0;
 
 	/* If the device has multiple interrupts, run tx/rx */
 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
-		disable_irq(priv->interruptTransmit);
-		disable_irq(priv->interruptReceive);
-		disable_irq(priv->interruptError);
-		gfar_interrupt(priv->interruptTransmit, dev);
-		enable_irq(priv->interruptError);
-		enable_irq(priv->interruptReceive);
-		enable_irq(priv->interruptTransmit);
+		for (i = 0; i < priv->num_grps; i++) {
+			disable_irq(priv->gfargrp[i].interruptTransmit);
+			disable_irq(priv->gfargrp[i].interruptReceive);
+			disable_irq(priv->gfargrp[i].interruptError);
+			gfar_interrupt(priv->gfargrp[i].interruptTransmit,
+						&priv->gfargrp[i]);
+			enable_irq(priv->gfargrp[i].interruptError);
+			enable_irq(priv->gfargrp[i].interruptReceive);
+			enable_irq(priv->gfargrp[i].interruptTransmit);
+		}
 	} else {
-		disable_irq(priv->interruptTransmit);
-		gfar_interrupt(priv->interruptTransmit, dev);
-		enable_irq(priv->interruptTransmit);
+		for (i = 0; i < priv->num_grps; i++) {
+			disable_irq(priv->gfargrp[i].interruptTransmit);
+			gfar_interrupt(priv->gfargrp[i].interruptTransmit,
+						&priv->gfargrp[i]);
+			enable_irq(priv->gfargrp[i].interruptTransmit);
 	}
 }
 #endif
 
 /* The interrupt handler for devices with one interrupt */
-static irqreturn_t gfar_interrupt(int irq, void *dev_id)
+static irqreturn_t gfar_interrupt(int irq, void *grp_id)
 {
-	struct net_device *dev = dev_id;
-	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar_priv_grp *gfargrp = grp_id;
 
 	/* Save ievent for future reference */
-	u32 events = gfar_read(&priv->regs->ievent);
+	u32 events = gfar_read(&gfargrp->regs->ievent);
 
 	/* Check for reception */
 	if (events & IEVENT_RX_MASK)
-		gfar_receive(irq, dev_id);
+		gfar_receive(irq, grp_id);
 
 	/* Check for transmit completion */
 	if (events & IEVENT_TX_MASK)
-		gfar_transmit(irq, dev_id);
+		gfar_transmit(irq, grp_id);
 
 	/* Check for errors */
 	if (events & IEVENT_ERR_MASK)
-		gfar_error(irq, dev_id);
+		gfar_error(irq, grp_id);
 
 	return IRQ_HANDLED;
 }
@@ -2015,12 +2680,14 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id)
 static void adjust_link(struct net_device *dev)
 {
 	struct gfar_private *priv = netdev_priv(dev);
-	struct gfar __iomem *regs = priv->regs;
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	unsigned long flags;
 	struct phy_device *phydev = priv->phydev;
 	int new_state = 0;
 
-	spin_lock_irqsave(&priv->txlock, flags);
+	local_irq_save(flags);
+	lock_tx_qs(priv);
+
 	if (phydev->link) {
 		u32 tempval = gfar_read(&regs->maccfg2);
 		u32 ecntrl = gfar_read(&regs->ecntrl);
@@ -2085,8 +2752,8 @@ static void adjust_link(struct net_device *dev)
 
 	if (new_state && netif_msg_link(priv))
 		phy_print_status(phydev);
-
-	spin_unlock_irqrestore(&priv->txlock, flags);
+	unlock_tx_qs(priv);
+	local_irq_restore(flags);
 }
 
 /* Update the hash table based on the current list of multicast
@@ -2097,10 +2764,10 @@ static void gfar_set_multi(struct net_device *dev)
 {
 	struct dev_mc_list *mc_ptr;
 	struct gfar_private *priv = netdev_priv(dev);
-	struct gfar __iomem *regs = priv->regs;
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	u32 tempval;
 
-	if(dev->flags & IFF_PROMISC) {
+	if (dev->flags & IFF_PROMISC) {
 		/* Set RCTRL to PROM */
 		tempval = gfar_read(&regs->rctrl);
 		tempval |= RCTRL_PROM;
@@ -2112,7 +2779,7 @@ static void gfar_set_multi(struct net_device *dev)
 		gfar_write(&regs->rctrl, tempval);
 	}
 
-	if(dev->flags & IFF_ALLMULTI) {
+	if (dev->flags & IFF_ALLMULTI) {
 		/* Set the hash to rx all multicast frames */
 		gfar_write(&regs->igaddr0, 0xffffffff);
 		gfar_write(&regs->igaddr1, 0xffffffff);
@@ -2164,7 +2831,7 @@ static void gfar_set_multi(struct net_device *dev)
 			em_num = 0;
 		}
 
-		if(dev->mc_count == 0)
+		if (dev->mc_count == 0)
 			return;
 
 		/* Parse the list, and set the appropriate bits */
@@ -2230,10 +2897,11 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
 {
 	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	int idx;
 	char tmpbuf[MAC_ADDR_LEN];
 	u32 tempval;
-	u32 __iomem *macptr = &priv->regs->macstnaddr1;
+	u32 __iomem *macptr = &regs->macstnaddr1;
 
 	macptr += num*2;
 
@@ -2250,16 +2918,18 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
 }
 
 /* GFAR error interrupt handler */
-static irqreturn_t gfar_error(int irq, void *dev_id)
+static irqreturn_t gfar_error(int irq, void *grp_id)
 {
-	struct net_device *dev = dev_id;
-	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar_priv_grp *gfargrp = grp_id;
+	struct gfar __iomem *regs = gfargrp->regs;
+	struct gfar_private *priv= gfargrp->priv;
+	struct net_device *dev = priv->ndev;
 
 	/* Save ievent for future reference */
-	u32 events = gfar_read(&priv->regs->ievent);
+	u32 events = gfar_read(&regs->ievent);
 
 	/* Clear IEVENT */
-	gfar_write(&priv->regs->ievent, events & IEVENT_ERR_MASK);
+	gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
 
 	/* Magic Packet is not an error. */
 	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
@@ -2269,7 +2939,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
 	/* Hmm... */
 	if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
 		printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
-		       dev->name, events, gfar_read(&priv->regs->imask));
+		       dev->name, events, gfar_read(&regs->imask));
 
 	/* Update the error counters */
 	if (events & IEVENT_TXE) {
@@ -2280,14 +2950,22 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
 		if (events & IEVENT_CRL)
 			dev->stats.tx_aborted_errors++;
 		if (events & IEVENT_XFUN) {
+			unsigned long flags;
+
 			if (netif_msg_tx_err(priv))
 				printk(KERN_DEBUG "%s: TX FIFO underrun, "
 				       "packet dropped.\n", dev->name);
 			dev->stats.tx_dropped++;
 			priv->extra_stats.tx_underrun++;
 
+			local_irq_save(flags);
+			lock_tx_qs(priv);
+
 			/* Reactivate the Tx Queues */
-			gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
+			gfar_write(&regs->tstat, gfargrp->tstat);
+
+			unlock_tx_qs(priv);
+			local_irq_restore(flags);
 		}
 		if (netif_msg_tx_err(priv))
 			printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
@@ -2296,11 +2974,11 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
 		dev->stats.rx_errors++;
 		priv->extra_stats.rx_bsy++;
 
-		gfar_receive(irq, dev_id);
+		gfar_receive(irq, grp_id);
 
 		if (netif_msg_rx_err(priv))
 			printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
-			       dev->name, gfar_read(&priv->regs->rstat));
+			       dev->name, gfar_read(&regs->rstat));
 	}
 	if (events & IEVENT_BABR) {
 		dev->stats.rx_errors++;
@@ -2331,6 +3009,9 @@ static struct of_device_id gfar_match[] =
 		.type = "network",
 		.compatible = "gianfar",
 	},
+	{
+		.compatible = "fsl,etsec2",
+	},
 	{},
 };
 MODULE_DEVICE_TABLE(of, gfar_match);
@@ -2342,8 +3023,9 @@ static struct of_platform_driver gfar_driver = {
 
 	.probe = gfar_probe,
 	.remove = gfar_remove,
-	.suspend = gfar_suspend,
-	.resume = gfar_resume,
+	.suspend = gfar_legacy_suspend,
+	.resume = gfar_legacy_resume,
+	.driver.pm = GFAR_PM_OPS,
 };
 
 static int __init gfar_init(void)
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 2cd94338b5d3..cbb451011cb5 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -7,8 +7,9 @@
  *
  * Author: Andy Fleming
  * Maintainer: Kumar Gala
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
  *
- * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
+ * Copyright 2002-2009 Freescale Semiconductor, Inc.
  *
  * This program is free software; you can redistribute  it and/or modify it
  * under  the terms of  the GNU General  Public License as published by the
@@ -74,6 +75,13 @@
 extern const char gfar_driver_name[];
 extern const char gfar_driver_version[];
 
+/* MAXIMUM NUMBER OF QUEUES SUPPORTED */
+#define MAX_TX_QS	0x8
+#define MAX_RX_QS	0x8
+
+/* MAXIMUM NUMBER OF GROUPS SUPPORTED */
+#define MAXGROUPS 0x2
+
 /* These need to be powers of 2 for this driver */
 #define DEFAULT_TX_RING_SIZE	256
 #define DEFAULT_RX_RING_SIZE	256
@@ -171,12 +179,63 @@ extern const char gfar_driver_version[];
 
 #define MINFLR_INIT_SETTINGS	0x00000040
 
+/* Tqueue control */
+#define TQUEUE_EN0		0x00008000
+#define TQUEUE_EN1		0x00004000
+#define TQUEUE_EN2		0x00002000
+#define TQUEUE_EN3		0x00001000
+#define TQUEUE_EN4		0x00000800
+#define TQUEUE_EN5		0x00000400
+#define TQUEUE_EN6		0x00000200
+#define TQUEUE_EN7		0x00000100
+#define TQUEUE_EN_ALL		0x0000FF00
+
+#define TR03WT_WT0_MASK		0xFF000000
+#define TR03WT_WT1_MASK		0x00FF0000
+#define TR03WT_WT2_MASK		0x0000FF00
+#define TR03WT_WT3_MASK		0x000000FF
+
+#define TR47WT_WT4_MASK		0xFF000000
+#define TR47WT_WT5_MASK		0x00FF0000
+#define TR47WT_WT6_MASK		0x0000FF00
+#define TR47WT_WT7_MASK		0x000000FF
+
+/* Rqueue control */
+#define RQUEUE_EX0		0x00800000
+#define RQUEUE_EX1		0x00400000
+#define RQUEUE_EX2		0x00200000
+#define RQUEUE_EX3		0x00100000
+#define RQUEUE_EX4		0x00080000
+#define RQUEUE_EX5		0x00040000
+#define RQUEUE_EX6		0x00020000
+#define RQUEUE_EX7		0x00010000
+#define RQUEUE_EX_ALL		0x00FF0000
+
+#define RQUEUE_EN0		0x00000080
+#define RQUEUE_EN1		0x00000040
+#define RQUEUE_EN2		0x00000020
+#define RQUEUE_EN3		0x00000010
+#define RQUEUE_EN4		0x00000008
+#define RQUEUE_EN5		0x00000004
+#define RQUEUE_EN6		0x00000002
+#define RQUEUE_EN7		0x00000001
+#define RQUEUE_EN_ALL		0x000000FF
+
 /* Init to do tx snooping for buffers and descriptors */
 #define DMACTRL_INIT_SETTINGS   0x000000c3
 #define DMACTRL_GRS             0x00000010
 #define DMACTRL_GTS             0x00000008
 
-#define TSTAT_CLEAR_THALT       0x80000000
+#define TSTAT_CLEAR_THALT_ALL	0xFF000000
+#define TSTAT_CLEAR_THALT	0x80000000
+#define TSTAT_CLEAR_THALT0	0x80000000
+#define TSTAT_CLEAR_THALT1	0x40000000
+#define TSTAT_CLEAR_THALT2	0x20000000
+#define TSTAT_CLEAR_THALT3	0x10000000
+#define TSTAT_CLEAR_THALT4	0x08000000
+#define TSTAT_CLEAR_THALT5	0x04000000
+#define TSTAT_CLEAR_THALT6	0x02000000
+#define TSTAT_CLEAR_THALT7	0x01000000
 
 /* Interrupt coalescing macros */
 #define IC_ICEN			0x80000000
@@ -227,6 +286,13 @@ extern const char gfar_driver_version[];
 #define TCTRL_IPCSEN		0x00004000
 #define TCTRL_TUCSEN		0x00002000
 #define TCTRL_VLINS		0x00001000
+#define TCTRL_THDF		0x00000800
+#define TCTRL_RFCPAUSE		0x00000010
+#define TCTRL_TFCPAUSE		0x00000008
+#define TCTRL_TXSCHED_MASK	0x00000006
+#define TCTRL_TXSCHED_INIT	0x00000000
+#define TCTRL_TXSCHED_PRIO	0x00000002
+#define TCTRL_TXSCHED_WRRS	0x00000004
 #define TCTRL_INIT_CSUM		(TCTRL_TUCSEN | TCTRL_IPCSEN)
 
 #define IEVENT_INIT_CLEAR	0xffffffff
@@ -315,6 +381,84 @@ extern const char gfar_driver_version[];
 #define BD_LFLAG(flags) ((flags) << 16)
 #define BD_LENGTH_MASK		0x0000ffff
 
+#define CLASS_CODE_UNRECOG		0x00
+#define CLASS_CODE_DUMMY1		0x01
+#define CLASS_CODE_ETHERTYPE1		0x02
+#define CLASS_CODE_ETHERTYPE2		0x03
+#define CLASS_CODE_USER_PROG1		0x04
+#define CLASS_CODE_USER_PROG2		0x05
+#define CLASS_CODE_USER_PROG3		0x06
+#define CLASS_CODE_USER_PROG4		0x07
+#define CLASS_CODE_TCP_IPV4		0x08
+#define CLASS_CODE_UDP_IPV4		0x09
+#define CLASS_CODE_AH_ESP_IPV4		0x0a
+#define CLASS_CODE_SCTP_IPV4		0x0b
+#define CLASS_CODE_TCP_IPV6		0x0c
+#define CLASS_CODE_UDP_IPV6		0x0d
+#define CLASS_CODE_AH_ESP_IPV6		0x0e
+#define CLASS_CODE_SCTP_IPV6		0x0f
+
+#define FPR_FILER_MASK	0xFFFFFFFF
+#define MAX_FILER_IDX	0xFF
+
+/* RQFCR register bits */
+#define RQFCR_GPI		0x80000000
+#define RQFCR_HASHTBL_Q		0x00000000
+#define RQFCR_HASHTBL_0		0x00020000
+#define RQFCR_HASHTBL_1		0x00040000
+#define RQFCR_HASHTBL_2		0x00060000
+#define RQFCR_HASHTBL_3		0x00080000
+#define RQFCR_HASH		0x00010000
+#define RQFCR_CLE		0x00000200
+#define RQFCR_RJE		0x00000100
+#define RQFCR_AND		0x00000080
+#define RQFCR_CMP_EXACT		0x00000000
+#define RQFCR_CMP_MATCH		0x00000020
+#define RQFCR_CMP_NOEXACT	0x00000040
+#define RQFCR_CMP_NOMATCH	0x00000060
+
+/* RQFCR PID values */
+#define	RQFCR_PID_MASK		0x00000000
+#define	RQFCR_PID_PARSE		0x00000001
+#define	RQFCR_PID_ARB		0x00000002
+#define	RQFCR_PID_DAH		0x00000003
+#define	RQFCR_PID_DAL		0x00000004
+#define	RQFCR_PID_SAH		0x00000005
+#define	RQFCR_PID_SAL		0x00000006
+#define	RQFCR_PID_ETY		0x00000007
+#define	RQFCR_PID_VID		0x00000008
+#define	RQFCR_PID_PRI		0x00000009
+#define	RQFCR_PID_TOS		0x0000000A
+#define	RQFCR_PID_L4P		0x0000000B
+#define	RQFCR_PID_DIA		0x0000000C
+#define	RQFCR_PID_SIA		0x0000000D
+#define	RQFCR_PID_DPT		0x0000000E
+#define	RQFCR_PID_SPT		0x0000000F
+
+/* RQFPR when PID is 0x0001 */
+#define RQFPR_HDR_GE_512	0x00200000
+#define RQFPR_LERR		0x00100000
+#define RQFPR_RAR		0x00080000
+#define RQFPR_RARQ		0x00040000
+#define RQFPR_AR		0x00020000
+#define RQFPR_ARQ		0x00010000
+#define RQFPR_EBC		0x00008000
+#define RQFPR_VLN		0x00004000
+#define RQFPR_CFI		0x00002000
+#define RQFPR_JUM		0x00001000
+#define RQFPR_IPF		0x00000800
+#define RQFPR_FIF		0x00000400
+#define RQFPR_IPV4		0x00000200
+#define RQFPR_IPV6		0x00000100
+#define RQFPR_ICC		0x00000080
+#define RQFPR_ICV		0x00000040
+#define RQFPR_TCP		0x00000020
+#define RQFPR_UDP		0x00000010
+#define RQFPR_TUC		0x00000008
+#define RQFPR_TUV		0x00000004
+#define RQFPR_PER		0x00000002
+#define RQFPR_EER		0x00000001
+
 /* TxBD status field bits */
 #define TXBD_READY		0x8000
 #define TXBD_PADCRC		0x4000
@@ -503,25 +647,32 @@ struct gfar_stats {
 
 struct gfar {
 	u32	tsec_id;	/* 0x.000 - Controller ID register */
-	u8	res1[12];
+	u32	tsec_id2;	/* 0x.004 - Controller ID2 register */
+	u8	res1[8];
 	u32	ievent;		/* 0x.010 - Interrupt Event Register */
 	u32	imask;		/* 0x.014 - Interrupt Mask Register */
 	u32	edis;		/* 0x.018 - Error Disabled Register */
-	u8	res2[4];
+	u32	emapg;		/* 0x.01c - Group Error mapping register */
 	u32	ecntrl;		/* 0x.020 - Ethernet Control Register */
 	u32	minflr;		/* 0x.024 - Minimum Frame Length Register */
 	u32	ptv;		/* 0x.028 - Pause Time Value Register */
 	u32	dmactrl;	/* 0x.02c - DMA Control Register */
 	u32	tbipa;		/* 0x.030 - TBI PHY Address Register */
-	u8	res3[88];
+	u8	res2[28];
+	u32	fifo_rx_pause;	/* 0x.050 - FIFO receive pause start threshold
+					register */
+	u32	fifo_rx_pause_shutoff;	/* x.054 - FIFO receive starve shutoff
+						register */
+	u32	fifo_rx_alarm;	/* 0x.058 - FIFO receive alarm start threshold
+						register */
+	u32	fifo_rx_alarm_shutoff;	/*0x.05c - FIFO receive alarm  starve
+						shutoff register */
+	u8	res3[44];
 	u32	fifo_tx_thr;	/* 0x.08c - FIFO transmit threshold register */
 	u8	res4[8];
 	u32	fifo_tx_starve;	/* 0x.098 - FIFO transmit starve register */
 	u32	fifo_tx_starve_shutoff;	/* 0x.09c - FIFO transmit starve shutoff register */
-	u8	res5[4];
-	u32	fifo_rx_pause;	/* 0x.0a4 - FIFO receive pause threshold register */
-	u32	fifo_rx_alarm;	/* 0x.0a8 - FIFO receive alarm threshold register */
-	u8	res6[84];
+	u8	res5[96];
 	u32	tctrl;		/* 0x.100 - Transmit Control Register */
 	u32	tstat;		/* 0x.104 - Transmit Status Register */
 	u32	dfvlan;		/* 0x.108 - Default VLAN Control word */
@@ -572,7 +723,11 @@ struct gfar {
 	u8	res12[8];
 	u32	rxic;		/* 0x.310 - Receive Interrupt Coalescing Configuration Register */
 	u32	rqueue;		/* 0x.314 - Receive queue control register */
-	u8	res13[24];
+	u32	rir0;		/* 0x.318 - Ring mapping register 0 */
+	u32	rir1;		/* 0x.31c - Ring mapping register 1 */
+	u32	rir2;		/* 0x.320 - Ring mapping register 2 */
+	u32	rir3;		/* 0x.324 - Ring mapping register 3 */
+	u8	res13[8];
 	u32	rbifx;		/* 0x.330 - Receive bit field extract control register */
 	u32	rqfar;		/* 0x.334 - Receive queue filing table address register */
 	u32	rqfcr;		/* 0x.338 - Receive queue filing table control register */
@@ -621,7 +776,7 @@ struct gfar {
 	u32	maxfrm;		/* 0x.510 - Maximum Frame Length Register */
 	u8	res18[12];
 	u8	gfar_mii_regs[24];	/* See gianfar_phy.h */
-	u8	res19[4];
+	u32	ifctrl;		/* 0x.538 - Interface control register */
 	u32	ifstat;		/* 0x.53c - Interface Status Register */
 	u32	macstnaddr1;	/* 0x.540 - Station Address Part 1 Register */
 	u32	macstnaddr2;	/* 0x.544 - Station Address Part 2 Register */
@@ -682,8 +837,30 @@ struct gfar {
 	u8	res23c[248];
 	u32	attr;		/* 0x.bf8 - Attributes Register */
 	u32	attreli;	/* 0x.bfc - Attributes Extract Length and Extract Index Register */
-	u8	res24[1024];
-
+	u8	res24[688];
+	u32	isrg0;		/* 0x.eb0 - Interrupt steering group 0 register */
+	u32	isrg1;		/* 0x.eb4 - Interrupt steering group 1 register */
+	u32	isrg2;		/* 0x.eb8 - Interrupt steering group 2 register */
+	u32	isrg3;		/* 0x.ebc - Interrupt steering group 3 register */
+	u8	res25[16];
+	u32	rxic0;		/* 0x.ed0 - Ring 0 Rx interrupt coalescing */
+	u32	rxic1;		/* 0x.ed4 - Ring 1 Rx interrupt coalescing */
+	u32	rxic2;		/* 0x.ed8 - Ring 2 Rx interrupt coalescing */
+	u32	rxic3;		/* 0x.edc - Ring 3 Rx interrupt coalescing */
+	u32	rxic4;		/* 0x.ee0 - Ring 4 Rx interrupt coalescing */
+	u32	rxic5;		/* 0x.ee4 - Ring 5 Rx interrupt coalescing */
+	u32	rxic6;		/* 0x.ee8 - Ring 6 Rx interrupt coalescing */
+	u32	rxic7;		/* 0x.eec - Ring 7 Rx interrupt coalescing */
+	u8	res26[32];
+	u32	txic0;		/* 0x.f10 - Ring 0 Tx interrupt coalescing */
+	u32	txic1;		/* 0x.f14 - Ring 1 Tx interrupt coalescing */
+	u32	txic2;		/* 0x.f18 - Ring 2 Tx interrupt coalescing */
+	u32	txic3;		/* 0x.f1c - Ring 3 Tx interrupt coalescing */
+	u32	txic4;		/* 0x.f20 - Ring 4 Tx interrupt coalescing */
+	u32	txic5;		/* 0x.f24 - Ring 5 Tx interrupt coalescing */
+	u32	txic6;		/* 0x.f28 - Ring 6 Tx interrupt coalescing */
+	u32	txic7;		/* 0x.f2c - Ring 7 Tx interrupt coalescing */
+	u8	res27[208];
 };
 
 /* Flags related to gianfar device features */
@@ -699,6 +876,133 @@ struct gfar {
 #define FSL_GIANFAR_DEV_HAS_BD_STASHING		0x00000200
 #define FSL_GIANFAR_DEV_HAS_BUF_STASHING	0x00000400
 
+#if (MAXGROUPS == 2)
+#define DEFAULT_MAPPING 	0xAA
+#else
+#define DEFAULT_MAPPING 	0xFF
+#endif
+
+#define ISRG_SHIFT_TX	0x10
+#define ISRG_SHIFT_RX	0x18
+
+/* The same driver can operate in two modes */
+/* SQ_SG_MODE: Single Queue Single Group Mode
+ * 		(Backward compatible mode)
+ * MQ_MG_MODE: Multi Queue Multi Group mode
+ */
+enum {
+	SQ_SG_MODE = 0,
+	MQ_MG_MODE
+};
+
+/**
+ *	struct gfar_priv_tx_q - per tx queue structure
+ *	@txlock: per queue tx spin lock
+ *	@tx_skbuff:skb pointers
+ *	@skb_curtx: to be used skb pointer
+ *	@skb_dirtytx:the last used skb pointer
+ *	@qindex: index of this queue
+ *	@dev: back pointer to the dev structure
+ *	@grp: back pointer to the group to which this queue belongs
+ *	@tx_bd_base: First tx buffer descriptor
+ *	@cur_tx: Next free ring entry
+ *	@dirty_tx: First buffer in line to be transmitted
+ *	@tx_ring_size: Tx ring size
+ *	@num_txbdfree: number of free TxBds
+ *	@txcoalescing: enable/disable tx coalescing
+ *	@txic: transmit interrupt coalescing value
+ *	@txcount: coalescing value if based on tx frame count
+ *	@txtime: coalescing value if based on time
+ */
+struct gfar_priv_tx_q {
+	spinlock_t txlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
+	struct sk_buff ** tx_skbuff;
+	/* Buffer descriptor pointers */
+	dma_addr_t tx_bd_dma_base;
+	struct	txbd8 *tx_bd_base;
+	struct	txbd8 *cur_tx;
+	struct	txbd8 *dirty_tx;
+	struct	net_device *dev;
+	struct gfar_priv_grp *grp;
+	u16	skb_curtx;
+	u16	skb_dirtytx;
+	u16	qindex;
+	unsigned int tx_ring_size;
+	unsigned int num_txbdfree;
+	/* Configuration info for the coalescing features */
+	unsigned char txcoalescing;
+	unsigned long txic;
+	unsigned short txcount;
+	unsigned short txtime;
+};
+
+/**
+ *	struct gfar_priv_rx_q - per rx queue structure
+ *	@rxlock: per queue rx spin lock
+ *	@rx_skbuff: skb pointers
+ *	@skb_currx: currently use skb pointer
+ *	@rx_bd_base: First rx buffer descriptor
+ *	@cur_rx: Next free rx ring entry
+ *	@qindex: index of this queue
+ *	@dev: back pointer to the dev structure
+ *	@rx_ring_size: Rx ring size
+ *	@rxcoalescing: enable/disable rx-coalescing
+ *	@rxic: receive interrupt coalescing vlaue
+ */
+
+struct gfar_priv_rx_q {
+	spinlock_t rxlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
+	struct	sk_buff ** rx_skbuff;
+	dma_addr_t rx_bd_dma_base;
+	struct	rxbd8 *rx_bd_base;
+	struct	rxbd8 *cur_rx;
+	struct	net_device *dev;
+	struct gfar_priv_grp *grp;
+	u16	skb_currx;
+	u16	qindex;
+	unsigned int	rx_ring_size;
+	/* RX Coalescing values */
+	unsigned char rxcoalescing;
+	unsigned long rxic;
+};
+
+/**
+ *	struct gfar_priv_grp - per group structure
+ *	@napi: the napi poll function
+ *	@priv: back pointer to the priv structure
+ *	@regs: the ioremapped register space for this group
+ *	@grp_id: group id for this group
+ *	@interruptTransmit: The TX interrupt number for this group
+ *	@interruptReceive: The RX interrupt number for this group
+ *	@interruptError: The ERROR interrupt number for this group
+ *	@int_name_tx: tx interrupt name for this group
+ *	@int_name_rx: rx interrupt name for this group
+ *	@int_name_er: er interrupt name for this group
+ */
+
+struct gfar_priv_grp {
+	spinlock_t grplock __attribute__ ((aligned (SMP_CACHE_BYTES)));
+	struct	napi_struct napi;
+	struct gfar_private *priv;
+	struct gfar __iomem *regs;
+	unsigned int grp_id;
+	unsigned long rx_bit_map;
+	unsigned long tx_bit_map;
+	unsigned long num_tx_queues;
+	unsigned long num_rx_queues;
+	unsigned int rstat;
+	unsigned int tstat;
+	unsigned int imask;
+	unsigned int ievent;
+	unsigned int interruptTransmit;
+	unsigned int interruptReceive;
+	unsigned int interruptError;
+
+	char int_name_tx[GFAR_INT_NAME_MAX];
+	char int_name_rx[GFAR_INT_NAME_MAX];
+	char int_name_er[GFAR_INT_NAME_MAX];
+};
+
 /* Struct stolen almost completely (and shamelessly) from the FCC enet source
  * (Ok, that's not so true anymore, but there is a family resemblence)
  * The GFAR buffer descriptors track the ring buffers.  The rx_bd_base
@@ -709,62 +1013,36 @@ struct gfar {
  * the buffer descriptor determines the actual condition.
  */
 struct gfar_private {
-	/* Fields controlled by TX lock */
-	spinlock_t txlock;
 
-	/* Pointer to the array of skbuffs */
-	struct sk_buff ** tx_skbuff;
+	/* Indicates how many tx, rx queues are enabled */
+	unsigned int num_tx_queues;
+	unsigned int num_rx_queues;
+	unsigned int num_grps;
+	unsigned int mode;
 
-	/* next free skb in the array */
-	u16 skb_curtx;
-
-	/* First skb in line to be transmitted */
-	u16 skb_dirtytx;
-
-	/* Configuration info for the coalescing features */
-	unsigned char txcoalescing;
-	unsigned long txic;
-
-	/* Buffer descriptor pointers */
-	struct txbd8 *tx_bd_base;	/* First tx buffer descriptor */
-	struct txbd8 *cur_tx;	        /* Next free ring entry */
-	struct txbd8 *dirty_tx;		/* First buffer in line
-					   to be transmitted */
-	unsigned int tx_ring_size;
-	unsigned int num_txbdfree;	/* number of TxBDs free */
-
-	/* RX Locked fields */
-	spinlock_t rxlock;
+	/* The total tx and rx ring size for the enabled queues */
+	unsigned int total_tx_ring_size;
+	unsigned int total_rx_ring_size;
 
 	struct device_node *node;
 	struct net_device *ndev;
 	struct of_device *ofdev;
-	struct napi_struct napi;
-
-	/* skb array and index */
-	struct sk_buff ** rx_skbuff;
-	u16 skb_currx;
-
-	/* RX Coalescing values */
-	unsigned char rxcoalescing;
-	unsigned long rxic;
 
-	struct rxbd8 *rx_bd_base;	/* First Rx buffers */
-	struct rxbd8 *cur_rx;           /* Next free rx ring entry */
+	struct gfar_priv_grp gfargrp[MAXGROUPS];
+	struct gfar_priv_tx_q *tx_queue[MAX_TX_QS];
+	struct gfar_priv_rx_q *rx_queue[MAX_RX_QS];
 
-	/* RX parameters */
-	unsigned int rx_ring_size;
+	/* RX per device parameters */
 	unsigned int rx_buffer_size;
 	unsigned int rx_stash_size;
 	unsigned int rx_stash_index;
 
+	u32 cur_filer_idx;
+
 	struct sk_buff_head rx_recycle;
 
 	struct vlan_group *vlgrp;
 
-	/* Unprotected fields */
-	/* Pointer to the GFAR memory mapped Registers */
-	struct gfar __iomem *regs;
 
 	/* Hash registers and their width */
 	u32 __iomem *hash_regs[16];
@@ -785,13 +1063,10 @@ struct gfar_private {
 	unsigned char rx_csum_enable:1,
 		extended_hash:1,
 		bd_stash_en:1,
+		rx_filer_enable:1,
 		wol_en:1; /* Wake-on-LAN enabled */
 	unsigned short padding;
 
-	unsigned int interruptTransmit;
-	unsigned int interruptReceive;
-	unsigned int interruptError;
-
 	/* PHY stuff */
 	struct phy_device *phydev;
 	struct mii_bus *mii_bus;
@@ -803,14 +1078,13 @@ struct gfar_private {
 
 	struct work_struct reset_task;
 
-	char int_name_tx[GFAR_INT_NAME_MAX];
-	char int_name_rx[GFAR_INT_NAME_MAX];
-	char int_name_er[GFAR_INT_NAME_MAX];
-
 	/* Network Statistics */
 	struct gfar_extra_stats extra_stats;
 };
 
+extern unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
+extern unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
+
 static inline u32 gfar_read(volatile unsigned __iomem *addr)
 {
 	u32 val;
@@ -823,12 +1097,28 @@ static inline void gfar_write(volatile unsigned __iomem *addr, u32 val)
 	out_be32(addr, val);
 }
 
+static inline void gfar_write_filer(struct gfar_private *priv,
+		unsigned int far, unsigned int fcr, unsigned int fpr)
+{
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+
+	gfar_write(&regs->rqfar, far);
+	gfar_write(&regs->rqfcr, fcr);
+	gfar_write(&regs->rqfpr, fpr);
+}
+
+extern void lock_rx_qs(struct gfar_private *priv);
+extern void lock_tx_qs(struct gfar_private *priv);
+extern void unlock_rx_qs(struct gfar_private *priv);
+extern void unlock_tx_qs(struct gfar_private *priv);
 extern irqreturn_t gfar_receive(int irq, void *dev_id);
 extern int startup_gfar(struct net_device *dev);
 extern void stop_gfar(struct net_device *dev);
 extern void gfar_halt(struct net_device *dev);
 extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
 		int enable, u32 regnum, u32 read);
+extern void gfar_configure_coalescing(struct gfar_private *priv,
+		unsigned long tx_mask, unsigned long rx_mask);
 void gfar_init_sysfs(struct net_device *dev);
 
 extern const struct ethtool_ops gfar_ethtool_ops;
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 6c144b525b47..1010367695e4 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -7,8 +7,9 @@
  *
  *  Author: Andy Fleming
  *  Maintainer: Kumar Gala
+ *  Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
  *
- *  Copyright (c) 2003,2004 Freescale Semiconductor, Inc.
+ *  Copyright 2003-2006, 2008-2009 Freescale Semiconductor, Inc.
  *
  *  This software may be used and distributed according to
  *  the terms of the GNU Public License, Version 2, incorporated herein
@@ -41,7 +42,7 @@
 #include "gianfar.h"
 
 extern void gfar_start(struct net_device *dev);
-extern int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
+extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
 
 #define GFAR_MAX_COAL_USECS 0xffff
 #define GFAR_MAX_COAL_FRAMES 0xff
@@ -136,10 +137,11 @@ static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
 {
 	int i;
 	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	u64 *extra = (u64 *) & priv->extra_stats;
 
 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
-		u32 __iomem *rmon = (u32 __iomem *) & priv->regs->rmon;
+		u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
 		struct gfar_stats *stats = (struct gfar_stats *) buf;
 
 		for (i = 0; i < GFAR_RMON_LEN; i++)
@@ -197,12 +199,18 @@ static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
 	struct gfar_private *priv = netdev_priv(dev);
 	struct phy_device *phydev = priv->phydev;
+	struct gfar_priv_rx_q *rx_queue = NULL;
+	struct gfar_priv_tx_q *tx_queue = NULL;
 
 	if (NULL == phydev)
 		return -ENODEV;
+	tx_queue = priv->tx_queue[0];
+	rx_queue = priv->rx_queue[0];
 
-	cmd->maxtxpkt = get_icft_value(priv->txic);
-	cmd->maxrxpkt = get_icft_value(priv->rxic);
+	/* etsec-1.7 and older versions have only one txic
+	 * and rxic regs although they support multiple queues */
+	cmd->maxtxpkt = get_icft_value(tx_queue->txic);
+	cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
 
 	return phy_ethtool_gset(phydev, cmd);
 }
@@ -218,7 +226,7 @@ static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, voi
 {
 	int i;
 	struct gfar_private *priv = netdev_priv(dev);
-	u32 __iomem *theregs = (u32 __iomem *) priv->regs;
+	u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs;
 	u32 *buf = (u32 *) regbuf;
 
 	for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
@@ -279,6 +287,8 @@ static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int tic
 static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
 {
 	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar_priv_rx_q *rx_queue = NULL;
+	struct gfar_priv_tx_q *tx_queue = NULL;
 	unsigned long rxtime;
 	unsigned long rxcount;
 	unsigned long txtime;
@@ -290,10 +300,13 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
 	if (NULL == priv->phydev)
 		return -ENODEV;
 
-	rxtime  = get_ictt_value(priv->rxic);
-	rxcount = get_icft_value(priv->rxic);
-	txtime  = get_ictt_value(priv->txic);
-	txcount = get_icft_value(priv->txic);
+	rx_queue = priv->rx_queue[0];
+	tx_queue = priv->tx_queue[0];
+
+	rxtime  = get_ictt_value(rx_queue->rxic);
+	rxcount = get_icft_value(rx_queue->rxic);
+	txtime  = get_ictt_value(tx_queue->txic);
+	txcount = get_icft_value(tx_queue->txic);
 	cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
 	cvals->rx_max_coalesced_frames = rxcount;
 
@@ -339,16 +352,23 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
 static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
 {
 	struct gfar_private *priv = netdev_priv(dev);
+	int i = 0;
 
 	if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
 		return -EOPNOTSUPP;
 
 	/* Set up rx coalescing */
+	/* As of now, we will enable/disable coalescing for all
+	 * queues together in case of eTSEC2, this will be modified
+	 * along with the ethtool interface */
 	if ((cvals->rx_coalesce_usecs == 0) ||
-	    (cvals->rx_max_coalesced_frames == 0))
-		priv->rxcoalescing = 0;
-	else
-		priv->rxcoalescing = 1;
+	    (cvals->rx_max_coalesced_frames == 0)) {
+		for (i = 0; i < priv->num_rx_queues; i++)
+			priv->rx_queue[i]->rxcoalescing = 0;
+	} else {
+		for (i = 0; i < priv->num_rx_queues; i++)
+			priv->rx_queue[i]->rxcoalescing = 1;
+	}
 
 	if (NULL == priv->phydev)
 		return -ENODEV;
@@ -366,15 +386,21 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
 		return -EINVAL;
 	}
 
-	priv->rxic = mk_ic_value(cvals->rx_max_coalesced_frames,
-		gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
+	for (i = 0; i < priv->num_rx_queues; i++) {
+		priv->rx_queue[i]->rxic = mk_ic_value(
+			cvals->rx_max_coalesced_frames,
+			gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
+	}
 
 	/* Set up tx coalescing */
 	if ((cvals->tx_coalesce_usecs == 0) ||
-	    (cvals->tx_max_coalesced_frames == 0))
-		priv->txcoalescing = 0;
-	else
-		priv->txcoalescing = 1;
+	    (cvals->tx_max_coalesced_frames == 0)) {
+		for (i = 0; i < priv->num_tx_queues; i++)
+			priv->tx_queue[i]->txcoalescing = 0;
+	} else {
+		for (i = 0; i < priv->num_tx_queues; i++)
+			priv->tx_queue[i]->txcoalescing = 1;
+	}
 
 	/* Check the bounds of the values */
 	if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
@@ -389,16 +415,13 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
 		return -EINVAL;
 	}
 
-	priv->txic = mk_ic_value(cvals->tx_max_coalesced_frames,
-		gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
-
-	gfar_write(&priv->regs->rxic, 0);
-	if (priv->rxcoalescing)
-		gfar_write(&priv->regs->rxic, priv->rxic);
+	for (i = 0; i < priv->num_tx_queues; i++) {
+		priv->tx_queue[i]->txic = mk_ic_value(
+			cvals->tx_max_coalesced_frames,
+			gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
+	}
 
-	gfar_write(&priv->regs->txic, 0);
-	if (priv->txcoalescing)
-		gfar_write(&priv->regs->txic, priv->txic);
+	gfar_configure_coalescing(priv, 0xFF, 0xFF);
 
 	return 0;
 }
@@ -409,6 +432,11 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
 static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
 {
 	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar_priv_tx_q *tx_queue = NULL;
+	struct gfar_priv_rx_q *rx_queue = NULL;
+
+	tx_queue = priv->tx_queue[0];
+	rx_queue = priv->rx_queue[0];
 
 	rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
 	rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
@@ -418,10 +446,10 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv
 	/* Values changeable by the user.  The valid values are
 	 * in the range 1 to the "*_max_pending" counterpart above.
 	 */
-	rvals->rx_pending = priv->rx_ring_size;
-	rvals->rx_mini_pending = priv->rx_ring_size;
-	rvals->rx_jumbo_pending = priv->rx_ring_size;
-	rvals->tx_pending = priv->tx_ring_size;
+	rvals->rx_pending = rx_queue->rx_ring_size;
+	rvals->rx_mini_pending = rx_queue->rx_ring_size;
+	rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
+	rvals->tx_pending = tx_queue->tx_ring_size;
 }
 
 /* Change the current ring parameters, stopping the controller if
@@ -431,7 +459,7 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv
 static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
 {
 	struct gfar_private *priv = netdev_priv(dev);
-	int err = 0;
+	int err = 0, i = 0;
 
 	if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
 		return -EINVAL;
@@ -451,34 +479,41 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
 		return -EINVAL;
 	}
 
+
 	if (dev->flags & IFF_UP) {
 		unsigned long flags;
 
 		/* Halt TX and RX, and process the frames which
 		 * have already been received */
-		spin_lock_irqsave(&priv->txlock, flags);
-		spin_lock(&priv->rxlock);
+		local_irq_save(flags);
+		lock_tx_qs(priv);
+		lock_rx_qs(priv);
 
 		gfar_halt(dev);
 
-		spin_unlock(&priv->rxlock);
-		spin_unlock_irqrestore(&priv->txlock, flags);
+		unlock_rx_qs(priv);
+		unlock_tx_qs(priv);
+		local_irq_restore(flags);
 
-		gfar_clean_rx_ring(dev, priv->rx_ring_size);
+		for (i = 0; i < priv->num_rx_queues; i++)
+			gfar_clean_rx_ring(priv->rx_queue[i],
+					priv->rx_queue[i]->rx_ring_size);
 
 		/* Now we take down the rings to rebuild them */
 		stop_gfar(dev);
 	}
 
 	/* Change the size */
-	priv->rx_ring_size = rvals->rx_pending;
-	priv->tx_ring_size = rvals->tx_pending;
-	priv->num_txbdfree = priv->tx_ring_size;
+	for (i = 0; i < priv->num_rx_queues; i++) {
+		priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
+		priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
+		priv->tx_queue[i]->num_txbdfree = priv->tx_queue[i]->tx_ring_size;
+	}
 
 	/* Rebuild the rings with the new size */
 	if (dev->flags & IFF_UP) {
 		err = startup_gfar(dev);
-		netif_wake_queue(dev);
+		netif_tx_wake_all_queues(dev);
 	}
 	return err;
 }
@@ -487,23 +522,28 @@ static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
 {
 	struct gfar_private *priv = netdev_priv(dev);
 	unsigned long flags;
-	int err = 0;
+	int err = 0, i = 0;
 
 	if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
 		return -EOPNOTSUPP;
 
+
 	if (dev->flags & IFF_UP) {
 		/* Halt TX and RX, and process the frames which
 		 * have already been received */
-		spin_lock_irqsave(&priv->txlock, flags);
-		spin_lock(&priv->rxlock);
+		local_irq_save(flags);
+		lock_tx_qs(priv);
+		lock_rx_qs(priv);
 
 		gfar_halt(dev);
 
-		spin_unlock(&priv->rxlock);
-		spin_unlock_irqrestore(&priv->txlock, flags);
+		unlock_tx_qs(priv);
+		unlock_rx_qs(priv);
+		local_irq_save(flags);
 
-		gfar_clean_rx_ring(dev, priv->rx_ring_size);
+		for (i = 0; i < priv->num_rx_queues; i++)
+			gfar_clean_rx_ring(priv->rx_queue[i],
+					priv->rx_queue[i]->rx_ring_size);
 
 		/* Now we take down the rings to rebuild them */
 		stop_gfar(dev);
@@ -515,7 +555,7 @@ static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
 
 	if (dev->flags & IFF_UP) {
 		err = startup_gfar(dev);
-		netif_wake_queue(dev);
+		netif_tx_wake_all_queues(dev);
 	}
 	return err;
 }
@@ -605,6 +645,241 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 }
 #endif
 
+static int gfar_ethflow_to_class(int flow_type, u64 *class)
+{
+	switch (flow_type) {
+	case TCP_V4_FLOW:
+		*class = CLASS_CODE_TCP_IPV4;
+		break;
+	case UDP_V4_FLOW:
+		*class = CLASS_CODE_UDP_IPV4;
+		break;
+	case AH_V4_FLOW:
+	case ESP_V4_FLOW:
+		*class = CLASS_CODE_AH_ESP_IPV4;
+		break;
+	case SCTP_V4_FLOW:
+		*class = CLASS_CODE_SCTP_IPV4;
+		break;
+	case TCP_V6_FLOW:
+		*class = CLASS_CODE_TCP_IPV6;
+		break;
+	case UDP_V6_FLOW:
+		*class = CLASS_CODE_UDP_IPV6;
+		break;
+	case AH_V6_FLOW:
+	case ESP_V6_FLOW:
+		*class = CLASS_CODE_AH_ESP_IPV6;
+		break;
+	case SCTP_V6_FLOW:
+		*class = CLASS_CODE_SCTP_IPV6;
+		break;
+	default:
+		return 0;
+	}
+
+	return 1;
+}
+
+static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
+{
+	u32 fcr = 0x0, fpr = FPR_FILER_MASK;
+
+	if (ethflow & RXH_L2DA) {
+		fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
+			RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
+		ftp_rqfpr[priv->cur_filer_idx] = fpr;
+		ftp_rqfcr[priv->cur_filer_idx] = fcr;
+		gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+		priv->cur_filer_idx = priv->cur_filer_idx - 1;
+
+		fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
+				RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
+		ftp_rqfpr[priv->cur_filer_idx] = fpr;
+		ftp_rqfcr[priv->cur_filer_idx] = fcr;
+		gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+		priv->cur_filer_idx = priv->cur_filer_idx - 1;
+	}
+
+	if (ethflow & RXH_VLAN) {
+		fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+				RQFCR_AND | RQFCR_HASHTBL_0;
+		gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+		ftp_rqfpr[priv->cur_filer_idx] = fpr;
+		ftp_rqfcr[priv->cur_filer_idx] = fcr;
+		priv->cur_filer_idx = priv->cur_filer_idx - 1;
+	}
+
+	if (ethflow & RXH_IP_SRC) {
+		fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+			RQFCR_AND | RQFCR_HASHTBL_0;
+		ftp_rqfpr[priv->cur_filer_idx] = fpr;
+		ftp_rqfcr[priv->cur_filer_idx] = fcr;
+		gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+		priv->cur_filer_idx = priv->cur_filer_idx - 1;
+	}
+
+	if (ethflow & (RXH_IP_DST)) {
+		fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+			RQFCR_AND | RQFCR_HASHTBL_0;
+		ftp_rqfpr[priv->cur_filer_idx] = fpr;
+		ftp_rqfcr[priv->cur_filer_idx] = fcr;
+		gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+		priv->cur_filer_idx = priv->cur_filer_idx - 1;
+	}
+
+	if (ethflow & RXH_L3_PROTO) {
+		fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+			RQFCR_AND | RQFCR_HASHTBL_0;
+		ftp_rqfpr[priv->cur_filer_idx] = fpr;
+		ftp_rqfcr[priv->cur_filer_idx] = fcr;
+		gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+		priv->cur_filer_idx = priv->cur_filer_idx - 1;
+	}
+
+	if (ethflow & RXH_L4_B_0_1) {
+		fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+			RQFCR_AND | RQFCR_HASHTBL_0;
+		ftp_rqfpr[priv->cur_filer_idx] = fpr;
+		ftp_rqfcr[priv->cur_filer_idx] = fcr;
+		gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+		priv->cur_filer_idx = priv->cur_filer_idx - 1;
+	}
+
+	if (ethflow & RXH_L4_B_2_3) {
+		fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+			RQFCR_AND | RQFCR_HASHTBL_0;
+		ftp_rqfpr[priv->cur_filer_idx] = fpr;
+		ftp_rqfcr[priv->cur_filer_idx] = fcr;
+		gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+		priv->cur_filer_idx = priv->cur_filer_idx - 1;
+	}
+}
+
+static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u64 class)
+{
+	unsigned int last_rule_idx = priv->cur_filer_idx;
+	unsigned int cmp_rqfpr;
+	unsigned int local_rqfpr[MAX_FILER_IDX + 1];
+	unsigned int local_rqfcr[MAX_FILER_IDX + 1];
+	int i = 0x0, k = 0x0;
+	int j = MAX_FILER_IDX, l = 0x0;
+
+	switch (class) {
+	case TCP_V4_FLOW:
+		cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP;
+		break;
+	case UDP_V4_FLOW:
+		cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP;
+		break;
+	case TCP_V6_FLOW:
+		cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP;
+		break;
+	case UDP_V6_FLOW:
+		cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
+		break;
+	case IPV4_FLOW:
+		cmp_rqfpr = RQFPR_IPV4;
+	case IPV6_FLOW:
+		cmp_rqfpr = RQFPR_IPV6;
+		break;
+	default:
+		printk(KERN_ERR "Right now this class is not supported\n");
+		return 0;
+	}
+
+	for (i = 0; i < MAX_FILER_IDX + 1; i++) {
+		local_rqfpr[j] = ftp_rqfpr[i];
+		local_rqfcr[j] = ftp_rqfcr[i];
+		j--;
+		if ((ftp_rqfcr[i] == (RQFCR_PID_PARSE |
+			RQFCR_CLE |RQFCR_AND)) &&
+			(ftp_rqfpr[i] == cmp_rqfpr))
+			break;
+	}
+
+	if (i == MAX_FILER_IDX + 1) {
+		printk(KERN_ERR "No parse rule found, ");
+		printk(KERN_ERR "can't create hash rules\n");
+		return 0;
+	}
+
+	/* If a match was found, then it begins the starting of a cluster rule
+	 * if it was already programmed, we need to overwrite these rules
+	 */
+	for (l = i+1; l < MAX_FILER_IDX; l++) {
+		if ((ftp_rqfcr[l] & RQFCR_CLE) &&
+			!(ftp_rqfcr[l] & RQFCR_AND)) {
+			ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
+				RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
+			ftp_rqfpr[l] = FPR_FILER_MASK;
+			gfar_write_filer(priv, l, ftp_rqfcr[l], ftp_rqfpr[l]);
+			break;
+		}
+
+		if (!(ftp_rqfcr[l] & RQFCR_CLE) && (ftp_rqfcr[l] & RQFCR_AND))
+			continue;
+		else {
+			local_rqfpr[j] = ftp_rqfpr[l];
+			local_rqfcr[j] = ftp_rqfcr[l];
+			j--;
+		}
+	}
+
+	priv->cur_filer_idx = l - 1;
+	last_rule_idx = l;
+
+	/* hash rules */
+	ethflow_to_filer_rules(priv, ethflow);
+
+	/* Write back the popped out rules again */
+	for (k = j+1; k < MAX_FILER_IDX; k++) {
+		ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
+		ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
+		gfar_write_filer(priv, priv->cur_filer_idx,
+				local_rqfcr[k], local_rqfpr[k]);
+		if (!priv->cur_filer_idx)
+			break;
+		priv->cur_filer_idx = priv->cur_filer_idx - 1;
+	}
+
+	return 1;
+}
+
+static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
+{
+	u64 class;
+
+	if (!gfar_ethflow_to_class(cmd->flow_type, &class))
+		return -EINVAL;
+
+	if (class < CLASS_CODE_USER_PROG1 ||
+			class > CLASS_CODE_SCTP_IPV6)
+		return -EINVAL;
+
+	/* write the filer rules here */
+	if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
+		return -1;
+
+	return 0;
+}
+
+static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+	struct gfar_private *priv = netdev_priv(dev);
+	int ret = 0;
+
+	switch(cmd->cmd) {
+	case ETHTOOL_SRXFH:
+		ret = gfar_set_hash_opts(priv, cmd);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
 const struct ethtool_ops gfar_ethtool_ops = {
 	.get_settings = gfar_gsettings,
 	.set_settings = gfar_ssettings,
@@ -630,4 +905,5 @@ const struct ethtool_ops gfar_ethtool_ops = {
 	.get_wol = gfar_get_wol,
 	.set_wol = gfar_set_wol,
 #endif
+	.set_rxnfc = gfar_set_nfc,
 };
diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c
index dd26da74f27a..b98c6c512299 100644
--- a/drivers/net/gianfar_sysfs.c
+++ b/drivers/net/gianfar_sysfs.c
@@ -8,8 +8,9 @@
  *
  * Author: Andy Fleming
  * Maintainer: Kumar Gala (galak@kernel.crashing.org)
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
  *
- * Copyright (c) 2002-2005 Freescale Semiconductor, Inc.
+ * Copyright 2002-2009 Freescale Semiconductor, Inc.
  *
  * This program is free software; you can redistribute  it and/or modify it
  * under  the terms of  the GNU General  Public License as published by the
@@ -49,6 +50,7 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
 				 const char *buf, size_t count)
 {
 	struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	int new_setting = 0;
 	u32 temp;
 	unsigned long flags;
@@ -56,30 +58,34 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
 	if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING))
 		return count;
 
+
 	/* Find out the new setting */
 	if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
 		new_setting = 1;
-	else if (!strncmp("off", buf, count - 1)
-		 || !strncmp("0", buf, count - 1))
+	else if (!strncmp("off", buf, count - 1) ||
+		 !strncmp("0", buf, count - 1))
 		new_setting = 0;
 	else
 		return count;
 
-	spin_lock_irqsave(&priv->rxlock, flags);
+
+	local_irq_save(flags);
+	lock_rx_qs(priv);
 
 	/* Set the new stashing value */
 	priv->bd_stash_en = new_setting;
 
-	temp = gfar_read(&priv->regs->attr);
+	temp = gfar_read(&regs->attr);
 
 	if (new_setting)
 		temp |= ATTR_BDSTASH;
 	else
 		temp &= ~(ATTR_BDSTASH);
 
-	gfar_write(&priv->regs->attr, temp);
+	gfar_write(&regs->attr, temp);
 
-	spin_unlock_irqrestore(&priv->rxlock, flags);
+	unlock_rx_qs(priv);
+	local_irq_restore(flags);
 
 	return count;
 }
@@ -99,6 +105,7 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
 				      const char *buf, size_t count)
 {
 	struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	unsigned int length = simple_strtoul(buf, NULL, 0);
 	u32 temp;
 	unsigned long flags;
@@ -106,7 +113,9 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
 	if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
 		return count;
 
-	spin_lock_irqsave(&priv->rxlock, flags);
+	local_irq_save(flags);
+	lock_rx_qs(priv);
+
 	if (length > priv->rx_buffer_size)
 		goto out;
 
@@ -115,23 +124,24 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
 
 	priv->rx_stash_size = length;
 
-	temp = gfar_read(&priv->regs->attreli);
+	temp = gfar_read(&regs->attreli);
 	temp &= ~ATTRELI_EL_MASK;
 	temp |= ATTRELI_EL(length);
-	gfar_write(&priv->regs->attreli, temp);
+	gfar_write(&regs->attreli, temp);
 
 	/* Turn stashing on/off as appropriate */
-	temp = gfar_read(&priv->regs->attr);
+	temp = gfar_read(&regs->attr);
 
 	if (length)
 		temp |= ATTR_BUFSTASH;
 	else
 		temp &= ~(ATTR_BUFSTASH);
 
-	gfar_write(&priv->regs->attr, temp);
+	gfar_write(&regs->attr, temp);
 
 out:
-	spin_unlock_irqrestore(&priv->rxlock, flags);
+	unlock_rx_qs(priv);
+	local_irq_restore(flags);
 
 	return count;
 }
@@ -154,6 +164,7 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
 				       const char *buf, size_t count)
 {
 	struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	unsigned short index = simple_strtoul(buf, NULL, 0);
 	u32 temp;
 	unsigned long flags;
@@ -161,7 +172,9 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
 	if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
 		return count;
 
-	spin_lock_irqsave(&priv->rxlock, flags);
+	local_irq_save(flags);
+	lock_rx_qs(priv);
+
 	if (index > priv->rx_stash_size)
 		goto out;
 
@@ -170,13 +183,14 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
 
 	priv->rx_stash_index = index;
 
-	temp = gfar_read(&priv->regs->attreli);
+	temp = gfar_read(&regs->attreli);
 	temp &= ~ATTRELI_EI_MASK;
 	temp |= ATTRELI_EI(index);
-	gfar_write(&priv->regs->attreli, flags);
+	gfar_write(&regs->attreli, temp);
 
 out:
-	spin_unlock_irqrestore(&priv->rxlock, flags);
+	unlock_rx_qs(priv);
+	local_irq_restore(flags);
 
 	return count;
 }
@@ -198,6 +212,7 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev,
 				       const char *buf, size_t count)
 {
 	struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	unsigned int length = simple_strtoul(buf, NULL, 0);
 	u32 temp;
 	unsigned long flags;
@@ -205,16 +220,18 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev,
 	if (length > GFAR_MAX_FIFO_THRESHOLD)
 		return count;
 
-	spin_lock_irqsave(&priv->txlock, flags);
+	local_irq_save(flags);
+	lock_tx_qs(priv);
 
 	priv->fifo_threshold = length;
 
-	temp = gfar_read(&priv->regs->fifo_tx_thr);
+	temp = gfar_read(&regs->fifo_tx_thr);
 	temp &= ~FIFO_TX_THR_MASK;
 	temp |= length;
-	gfar_write(&priv->regs->fifo_tx_thr, temp);
+	gfar_write(&regs->fifo_tx_thr, temp);
 
-	spin_unlock_irqrestore(&priv->txlock, flags);
+	unlock_tx_qs(priv);
+	local_irq_restore(flags);
 
 	return count;
 }
@@ -235,6 +252,7 @@ static ssize_t gfar_set_fifo_starve(struct device *dev,
 				    const char *buf, size_t count)
 {
 	struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	unsigned int num = simple_strtoul(buf, NULL, 0);
 	u32 temp;
 	unsigned long flags;
@@ -242,16 +260,18 @@ static ssize_t gfar_set_fifo_starve(struct device *dev,
 	if (num > GFAR_MAX_FIFO_STARVE)
 		return count;
 
-	spin_lock_irqsave(&priv->txlock, flags);
+	local_irq_save(flags);
+	lock_tx_qs(priv);
 
 	priv->fifo_starve = num;
 
-	temp = gfar_read(&priv->regs->fifo_tx_starve);
+	temp = gfar_read(&regs->fifo_tx_starve);
 	temp &= ~FIFO_TX_STARVE_MASK;
 	temp |= num;
-	gfar_write(&priv->regs->fifo_tx_starve, temp);
+	gfar_write(&regs->fifo_tx_starve, temp);
 
-	spin_unlock_irqrestore(&priv->txlock, flags);
+	unlock_tx_qs(priv);
+	local_irq_restore(flags);
 
 	return count;
 }
@@ -273,6 +293,7 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev,
 					const char *buf, size_t count)
 {
 	struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	unsigned int num = simple_strtoul(buf, NULL, 0);
 	u32 temp;
 	unsigned long flags;
@@ -280,16 +301,18 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev,
 	if (num > GFAR_MAX_FIFO_STARVE_OFF)
 		return count;
 
-	spin_lock_irqsave(&priv->txlock, flags);
+	local_irq_save(flags);
+	lock_tx_qs(priv);
 
 	priv->fifo_starve_off = num;
 
-	temp = gfar_read(&priv->regs->fifo_tx_starve_shutoff);
+	temp = gfar_read(&regs->fifo_tx_starve_shutoff);
 	temp &= ~FIFO_TX_STARVE_OFF_MASK;
 	temp |= num;
-	gfar_write(&priv->regs->fifo_tx_starve_shutoff, temp);
+	gfar_write(&regs->fifo_tx_starve_shutoff, temp);
 
-	spin_unlock_irqrestore(&priv->txlock, flags);
+	unlock_tx_qs(priv);
+	local_irq_restore(flags);
 
 	return count;
 }
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index f7519a594945..ea85075a89a2 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -407,10 +407,9 @@ that case.
 /* A few values that may be tweaked. */
 /* Size of each temporary Rx buffer, calculated as:
  * 1518 bytes (ethernet packet) + 2 bytes (to get 8 byte alignment for
- * the card) + 8 bytes of status info + 8 bytes for the Rx Checksum +
- * 2 more because we use skb_reserve.
+ * the card) + 8 bytes of status info + 8 bytes for the Rx Checksum
  */
-#define PKT_BUF_SZ		1538
+#define PKT_BUF_SZ		1536
 
 /* For now, this is going to be set to the maximum size of an ethernet
  * packet.  Eventually, we may want to make it a variable that is
@@ -873,7 +872,7 @@ static int hamachi_open(struct net_device *dev)
 	u32 rx_int_var, tx_int_var;
 	u16 fifo_info;
 
-	i = request_irq(dev->irq, &hamachi_interrupt, IRQF_SHARED, dev->name, dev);
+	i = request_irq(dev->irq, hamachi_interrupt, IRQF_SHARED, dev->name, dev);
 	if (i)
 		return i;
 
@@ -1152,12 +1151,13 @@ static void hamachi_tx_timeout(struct net_device *dev)
 	}
 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
 	for (i = 0; i < RX_RING_SIZE; i++) {
-		struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz);
+		struct sk_buff *skb;
+
+		skb = netdev_alloc_skb_ip_align(dev, hmp->rx_buf_sz);
 		hmp->rx_skbuff[i] = skb;
 		if (skb == NULL)
 			break;
 
-		skb_reserve(skb, 2); /* 16 byte align the IP header. */
                 hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
 			skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
 		hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
@@ -1196,7 +1196,7 @@ static void hamachi_init_ring(struct net_device *dev)
 	 * card.  -KDU
 	 */
 	hmp->rx_buf_sz = (dev->mtu <= 1492 ? PKT_BUF_SZ :
-		(((dev->mtu+26+7) & ~7) + 2 + 16));
+		(((dev->mtu+26+7) & ~7) + 16));
 
 	/* Initialize all Rx descriptors. */
 	for (i = 0; i < RX_RING_SIZE; i++) {
@@ -1566,8 +1566,8 @@ static int hamachi_rx(struct net_device *dev)
 #endif
 			/* Check if the packet is long enough to accept without copying
 			   to a minimally-sized skbuff. */
-			if (pkt_len < rx_copybreak
-				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+			if (pkt_len < rx_copybreak &&
+			    (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
 #ifdef RX_CHECKSUM
 				printk(KERN_ERR "%s: rx_copybreak non-zero "
 				  "not good with RX_CHECKSUM\n", dev->name);
@@ -1722,10 +1722,10 @@ static void hamachi_error(struct net_device *dev, int intr_status)
 		readl(ioaddr + 0x370);
 		readl(ioaddr + 0x3F0);
 	}
-	if ((intr_status & ~(LinkChange|StatsMax|NegotiationChange|IntrRxDone|IntrTxDone))
-		&& hamachi_debug)
+	if ((intr_status & ~(LinkChange|StatsMax|NegotiationChange|IntrRxDone|IntrTxDone)) &&
+	    hamachi_debug)
 		printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
-			   dev->name, intr_status);
+		       dev->name, intr_status);
 	/* Hmmmmm, it's not clear how to recover from PCI faults. */
 	if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
 		hmp->stats.tx_fifo_errors++;
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index fb588301a05d..689b9bd377a5 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -34,6 +34,7 @@
 #include <linux/ip.h>
 #include <linux/tcp.h>
 #include <linux/semaphore.h>
+#include <linux/compat.h>
 #include <asm/atomic.h>
 
 #define SIXPACK_VERSION    "Revision: 0.3.0"
@@ -777,6 +778,23 @@ static int sixpack_ioctl(struct tty_struct *tty, struct file *file,
 	return err;
 }
 
+#ifdef CONFIG_COMPAT
+static long sixpack_compat_ioctl(struct tty_struct * tty, struct file * file,
+				unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case SIOCGIFNAME:
+	case SIOCGIFENCAP:
+	case SIOCSIFENCAP:
+	case SIOCSIFHWADDR:
+		return sixpack_ioctl(tty, file, cmd,
+				(unsigned long)compat_ptr(arg));
+	}
+
+	return -ENOIOCTLCMD;
+}
+#endif
+
 static struct tty_ldisc_ops sp_ldisc = {
 	.owner		= THIS_MODULE,
 	.magic		= TTY_LDISC_MAGIC,
@@ -784,6 +802,9 @@ static struct tty_ldisc_ops sp_ldisc = {
 	.open		= sixpack_open,
 	.close		= sixpack_close,
 	.ioctl		= sixpack_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= sixpack_compat_ioctl,
+#endif
 	.receive_buf	= sixpack_receive_buf,
 	.write_wakeup	= sixpack_write_wakeup,
 };
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index e344c84c0ef9..a3c0dc9d8b98 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -596,16 +596,16 @@ static int receive(struct net_device *dev, int cnt)
 					if (!(notbitstream & (0x1fc << j)))
 						state = 0;
 
-					/* not flag received */
-					else if (!(bitstream & (0x1fe << j)) != (0x0fc << j)) {
+					/* flag received */
+					else if ((bitstream & (0x1fe << j)) == (0x0fc << j)) {
 						if (state)
 							do_rxpacket(dev);
 						bc->hdlcrx.bufcnt = 0;
 						bc->hdlcrx.bufptr = bc->hdlcrx.buf;
 						state = 1;
 						numbits = 7-j;
-						}
 					}
+				}
 
 				/* stuffed bit */
 				else if (unlikely((bitstream & (0x1f8 << j)) == (0xf8 << j))) {
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index fe893c91a01b..ae5f11c8fc13 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -167,10 +167,7 @@ static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev)
 
 static inline int dev_is_ethdev(struct net_device *dev)
 {
-	return (
-			dev->type == ARPHRD_ETHER
-			&& strncmp(dev->name, "dummy", 5)
-	);
+	return (dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5));
 }
 
 /* ------------------------------------------------------------------------ */
@@ -186,7 +183,7 @@ static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty
 	struct ethhdr *eth;
 	struct bpqdev *bpq;
 
-	if (dev_net(dev) != &init_net)
+	if (!net_eq(dev_net(dev), &init_net))
 		goto drop;
 
 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
@@ -552,7 +549,7 @@ static int bpq_device_event(struct notifier_block *this,unsigned long event, voi
 {
 	struct net_device *dev = (struct net_device *)ptr;
 
-	if (dev_net(dev) != &init_net)
+	if (!net_eq(dev_net(dev), &init_net))
 		return NOTIFY_DONE;
 
 	if (!dev_is_ethdev(dev))
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index 950f3bb21f9d..9ee76b42668f 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -331,8 +331,8 @@ static int __init dmascc_init(void)
 			for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
 				j = (io[i] -
 				     hw[h].io_region) / hw[h].io_delta;
-				if (j >= 0 && j < hw[h].num_devs
-				    && hw[h].io_region +
+				if (j >= 0 && j < hw[h].num_devs &&
+				    hw[h].io_region +
 				    j * hw[h].io_delta == io[i]) {
 					base[j] = io[i];
 				}
@@ -396,8 +396,8 @@ static int __init dmascc_init(void)
 					t_val =
 					    inb(t1[i]) + (inb(t1[i]) << 8);
 					/* Also check whether counter did wrap */
-					if (t_val == 0
-					    || t_val > TMR_0_HZ / HZ * 10)
+					if (t_val == 0 ||
+					    t_val > TMR_0_HZ / HZ * 10)
 						counting[i] = 0;
 					delay[i] = jiffies - start[i];
 				}
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index db4b7f1603f6..7db0a1c3216c 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -36,6 +36,7 @@
 #include <linux/skbuff.h>
 #include <linux/if_arp.h>
 #include <linux/jiffies.h>
+#include <linux/compat.h>
 
 #include <net/ax25.h>
 
@@ -898,6 +899,23 @@ static int mkiss_ioctl(struct tty_struct *tty, struct file *file,
 	return err;
 }
 
+#ifdef CONFIG_COMPAT
+static long mkiss_compat_ioctl(struct tty_struct *tty, struct file *file,
+	unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case SIOCGIFNAME:
+	case SIOCGIFENCAP:
+	case SIOCSIFENCAP:
+	case SIOCSIFHWADDR:
+		return mkiss_ioctl(tty, file, cmd,
+				   (unsigned long)compat_ptr(arg));
+	}
+
+	return -ENOIOCTLCMD;
+}
+#endif
+
 /*
  * Handle the 'receiver data ready' interrupt.
  * This function is called by the 'tty_io' module in the kernel when
@@ -972,6 +990,9 @@ static struct tty_ldisc_ops ax_ldisc = {
 	.open		= mkiss_open,
 	.close		= mkiss_close,
 	.ioctl		= mkiss_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= mkiss_compat_ioctl,
+#endif
 	.receive_buf	= mkiss_receive_buf,
 	.write_wakeup	= mkiss_write_wakeup
 };
diff --git a/drivers/net/hp-plus.c b/drivers/net/hp-plus.c
index 0486cbe01adb..efdbcad63c67 100644
--- a/drivers/net/hp-plus.c
+++ b/drivers/net/hp-plus.c
@@ -187,8 +187,8 @@ static int __init hpp_probe1(struct net_device *dev, int ioaddr)
 		return -EBUSY;
 
 	/* Check for the HP+ signature, 50 48 0x 53. */
-	if (inw(ioaddr + HP_ID) != 0x4850
-		|| (inw(ioaddr + HP_PAGING) & 0xfff0) != 0x5300) {
+	if (inw(ioaddr + HP_ID) != 0x4850 ||
+	    (inw(ioaddr + HP_PAGING) & 0xfff0) != 0x5300) {
 		retval = -ENODEV;
 		goto out;
 	}
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index dd8665138062..90f890e7c5e1 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -993,8 +993,8 @@ static void hp100_mmuinit(struct net_device *dev)
 	if (lp->mode == 1) {	/* only needed for Busmaster */
 		int xmit_stop, recv_stop;
 
-		if ((lp->chip == HP100_CHIPID_RAINIER)
-		    || (lp->chip == HP100_CHIPID_SHASTA)) {
+		if ((lp->chip == HP100_CHIPID_RAINIER) ||
+		    (lp->chip == HP100_CHIPID_SHASTA)) {
 			int pdl_stop;
 
 			/*
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 3fae87559791..fb5e019169ee 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -1976,27 +1976,27 @@ static int emac_ethtool_set_settings(struct net_device *ndev,
 	if (cmd->autoneg == AUTONEG_DISABLE) {
 		switch (cmd->speed) {
 		case SPEED_10:
-			if (cmd->duplex == DUPLEX_HALF
-			    && !(f & SUPPORTED_10baseT_Half))
+			if (cmd->duplex == DUPLEX_HALF &&
+			    !(f & SUPPORTED_10baseT_Half))
 				return -EINVAL;
-			if (cmd->duplex == DUPLEX_FULL
-			    && !(f & SUPPORTED_10baseT_Full))
+			if (cmd->duplex == DUPLEX_FULL &&
+			    !(f & SUPPORTED_10baseT_Full))
 				return -EINVAL;
 			break;
 		case SPEED_100:
-			if (cmd->duplex == DUPLEX_HALF
-			    && !(f & SUPPORTED_100baseT_Half))
+			if (cmd->duplex == DUPLEX_HALF &&
+			    !(f & SUPPORTED_100baseT_Half))
 				return -EINVAL;
-			if (cmd->duplex == DUPLEX_FULL
-			    && !(f & SUPPORTED_100baseT_Full))
+			if (cmd->duplex == DUPLEX_FULL &&
+			    !(f & SUPPORTED_100baseT_Full))
 				return -EINVAL;
 			break;
 		case SPEED_1000:
-			if (cmd->duplex == DUPLEX_HALF
-			    && !(f & SUPPORTED_1000baseT_Half))
+			if (cmd->duplex == DUPLEX_HALF &&
+			    !(f & SUPPORTED_1000baseT_Half))
 				return -EINVAL;
-			if (cmd->duplex == DUPLEX_FULL
-			    && !(f & SUPPORTED_1000baseT_Full))
+			if (cmd->duplex == DUPLEX_FULL &&
+			    !(f & SUPPORTED_1000baseT_Full))
 				return -EINVAL;
 			break;
 		default:
@@ -2149,9 +2149,12 @@ static int emac_ethtool_nway_reset(struct net_device *ndev)
 	return res;
 }
 
-static int emac_ethtool_get_stats_count(struct net_device *ndev)
+static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
 {
-	return EMAC_ETHTOOL_STATS_COUNT;
+	if (stringset == ETH_SS_STATS)
+		return EMAC_ETHTOOL_STATS_COUNT;
+	else
+		return -EINVAL;
 }
 
 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
@@ -2182,7 +2185,6 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev,
 	info->fw_version[0] = '\0';
 	sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
 		dev->cell_index, dev->ofdev->node->full_name);
-	info->n_stats = emac_ethtool_get_stats_count(ndev);
 	info->regdump_len = emac_ethtool_get_regs_len(ndev);
 }
 
@@ -2202,7 +2204,7 @@ static const struct ethtool_ops emac_ethtool_ops = {
 	.get_rx_csum = emac_ethtool_get_rx_csum,
 
 	.get_strings = emac_ethtool_get_strings,
-	.get_stats_count = emac_ethtool_get_stats_count,
+	.get_sset_count = emac_ethtool_get_sset_count,
 	.get_ethtool_stats = emac_ethtool_get_ethtool_stats,
 
 	.get_link = ethtool_op_get_link,
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 5862282ab2fe..a86693906ac8 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -625,7 +625,7 @@ static int ibmveth_open(struct net_device *netdev)
 	}
 
 	ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq);
-	if((rc = request_irq(netdev->irq, &ibmveth_interrupt, 0, netdev->name, netdev)) != 0) {
+	if((rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name, netdev)) != 0) {
 		ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc);
 		do {
 			rc = h_free_logical_lan(adapter->vdev->unit_address);
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 030913f8bd26..f4081c0a2d9c 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -98,14 +98,16 @@ static void ri_tasklet(unsigned long dev)
 		stats->tx_packets++;
 		stats->tx_bytes +=skb->len;
 
-		skb->dev = dev_get_by_index(&init_net, skb->iif);
+		rcu_read_lock();
+		skb->dev = dev_get_by_index_rcu(&init_net, skb->skb_iif);
 		if (!skb->dev) {
+			rcu_read_unlock();
 			dev_kfree_skb(skb);
 			stats->tx_dropped++;
 			break;
 		}
-		dev_put(skb->dev);
-		skb->iif = _dev->ifindex;
+		rcu_read_unlock();
+		skb->skb_iif = _dev->ifindex;
 
 		if (from & AT_EGRESS) {
 			dp->st_rx_frm_egr++;
@@ -170,7 +172,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
 	stats->rx_packets++;
 	stats->rx_bytes+=skb->len;
 
-	if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->iif) {
+	if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) {
 		dev_kfree_skb(skb);
 		stats->rx_dropped++;
 		return NETDEV_TX_OK;
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index f8f5772557ce..e8e9e9194a88 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -46,7 +46,10 @@ static s32  igb_get_cfg_done_82575(struct e1000_hw *);
 static s32  igb_init_hw_82575(struct e1000_hw *);
 static s32  igb_phy_hw_reset_sgmii_82575(struct e1000_hw *);
 static s32  igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *);
+static s32  igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *);
+static s32  igb_write_phy_reg_82580(struct e1000_hw *, u32, u16);
 static s32  igb_reset_hw_82575(struct e1000_hw *);
+static s32  igb_reset_hw_82580(struct e1000_hw *);
 static s32  igb_set_d0_lplu_state_82575(struct e1000_hw *, bool);
 static s32  igb_setup_copper_link_82575(struct e1000_hw *);
 static s32  igb_setup_serdes_link_82575(struct e1000_hw *);
@@ -62,6 +65,12 @@ static s32  igb_reset_init_script_82575(struct e1000_hw *);
 static s32  igb_read_mac_addr_82575(struct e1000_hw *);
 static s32  igb_set_pcie_completion_timeout(struct e1000_hw *hw);
 
+static const u16 e1000_82580_rxpbs_table[] =
+	{ 36, 72, 144, 1, 2, 4, 8, 16,
+	  35, 70, 140 };
+#define E1000_82580_RXPBS_TABLE_SIZE \
+	(sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
+
 static s32 igb_get_invariants_82575(struct e1000_hw *hw)
 {
 	struct e1000_phy_info *phy = &hw->phy;
@@ -81,12 +90,20 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
 		break;
 	case E1000_DEV_ID_82576:
 	case E1000_DEV_ID_82576_NS:
+	case E1000_DEV_ID_82576_NS_SERDES:
 	case E1000_DEV_ID_82576_FIBER:
 	case E1000_DEV_ID_82576_SERDES:
 	case E1000_DEV_ID_82576_QUAD_COPPER:
 	case E1000_DEV_ID_82576_SERDES_QUAD:
 		mac->type = e1000_82576;
 		break;
+	case E1000_DEV_ID_82580_COPPER:
+	case E1000_DEV_ID_82580_FIBER:
+	case E1000_DEV_ID_82580_SERDES:
+	case E1000_DEV_ID_82580_SGMII:
+	case E1000_DEV_ID_82580_COPPER_DUAL:
+		mac->type = e1000_82580;
+		break;
 	default:
 		return -E1000_ERR_MAC_INIT;
 		break;
@@ -109,6 +126,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
 		dev_spec->sgmii_active = true;
 		ctrl_ext |= E1000_CTRL_I2C_ENA;
 		break;
+	case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
 	case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
 		hw->phy.media_type = e1000_media_type_internal_serdes;
 		ctrl_ext |= E1000_CTRL_I2C_ENA;
@@ -120,12 +138,26 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
 
 	wr32(E1000_CTRL_EXT, ctrl_ext);
 
+	/*
+	 * if using i2c make certain the MDICNFG register is cleared to prevent
+	 * communications from being misrouted to the mdic registers
+	 */
+	if ((ctrl_ext & E1000_CTRL_I2C_ENA) && (hw->mac.type == e1000_82580))
+		wr32(E1000_MDICNFG, 0);
+
 	/* Set mta register count */
 	mac->mta_reg_count = 128;
 	/* Set rar entry count */
 	mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
 	if (mac->type == e1000_82576)
 		mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
+	if (mac->type == e1000_82580)
+		mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
+	/* reset */
+	if (mac->type == e1000_82580)
+		mac->ops.reset_hw = igb_reset_hw_82580;
+	else
+		mac->ops.reset_hw = igb_reset_hw_82575;
 	/* Set if part includes ASF firmware */
 	mac->asf_firmware_present = true;
 	/* Set if manageability features are enabled. */
@@ -193,6 +225,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
 		phy->ops.reset              = igb_phy_hw_reset_sgmii_82575;
 		phy->ops.read_reg           = igb_read_phy_reg_sgmii_82575;
 		phy->ops.write_reg          = igb_write_phy_reg_sgmii_82575;
+	} else if (hw->mac.type == e1000_82580) {
+		phy->ops.reset              = igb_phy_hw_reset;
+		phy->ops.read_reg           = igb_read_phy_reg_82580;
+		phy->ops.write_reg          = igb_write_phy_reg_82580;
 	} else {
 		phy->ops.reset              = igb_phy_hw_reset;
 		phy->ops.read_reg           = igb_read_phy_reg_igp;
@@ -224,6 +260,12 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
 		phy->ops.set_d0_lplu_state  = igb_set_d0_lplu_state_82575;
 		phy->ops.set_d3_lplu_state  = igb_set_d3_lplu_state;
 		break;
+	case I82580_I_PHY_ID:
+		phy->type                   = e1000_phy_82580;
+		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580;
+		phy->ops.get_cable_length   = igb_get_cable_length_82580;
+		phy->ops.get_phy_info       = igb_get_phy_info_82580;
+		break;
 	default:
 		return -E1000_ERR_PHY;
 	}
@@ -240,9 +282,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
  **/
 static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
 {
-	u16 mask;
+	u16 mask = E1000_SWFW_PHY0_SM;
 
-	mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
+	if (hw->bus.func == E1000_FUNC_1)
+		mask = E1000_SWFW_PHY1_SM;
 
 	return igb_acquire_swfw_sync_82575(hw, mask);
 }
@@ -256,9 +299,11 @@ static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
  **/
 static void igb_release_phy_82575(struct e1000_hw *hw)
 {
-	u16 mask;
+	u16 mask = E1000_SWFW_PHY0_SM;
+
+	if (hw->bus.func == E1000_FUNC_1)
+		mask = E1000_SWFW_PHY1_SM;
 
-	mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
 	igb_release_swfw_sync_82575(hw, mask);
 }
 
@@ -274,45 +319,23 @@ static void igb_release_phy_82575(struct e1000_hw *hw)
 static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
 					  u16 *data)
 {
-	struct e1000_phy_info *phy = &hw->phy;
-	u32 i, i2ccmd = 0;
+	s32 ret_val = -E1000_ERR_PARAM;
 
 	if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
 		hw_dbg("PHY Address %u is out of range\n", offset);
-		return -E1000_ERR_PARAM;
+		goto out;
 	}
 
-	/*
-	 * Set up Op-code, Phy Address, and register address in the I2CCMD
-	 * register.  The MAC will take care of interfacing with the
-	 * PHY to retrieve the desired data.
-	 */
-	i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
-		  (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
-		  (E1000_I2CCMD_OPCODE_READ));
-
-	wr32(E1000_I2CCMD, i2ccmd);
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		goto out;
 
-	/* Poll the ready bit to see if the I2C read completed */
-	for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
-		udelay(50);
-		i2ccmd = rd32(E1000_I2CCMD);
-		if (i2ccmd & E1000_I2CCMD_READY)
-			break;
-	}
-	if (!(i2ccmd & E1000_I2CCMD_READY)) {
-		hw_dbg("I2CCMD Read did not complete\n");
-		return -E1000_ERR_PHY;
-	}
-	if (i2ccmd & E1000_I2CCMD_ERROR) {
-		hw_dbg("I2CCMD Error bit set\n");
-		return -E1000_ERR_PHY;
-	}
+	ret_val = igb_read_phy_reg_i2c(hw, offset, data);
 
-	/* Need to byte-swap the 16-bit value. */
-	*data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
+	hw->phy.ops.release(hw);
 
-	return 0;
+out:
+	return ret_val;
 }
 
 /**
@@ -327,47 +350,24 @@ static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
 static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
 					   u16 data)
 {
-	struct e1000_phy_info *phy = &hw->phy;
-	u32 i, i2ccmd = 0;
-	u16 phy_data_swapped;
+	s32 ret_val = -E1000_ERR_PARAM;
+
 
 	if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
 		hw_dbg("PHY Address %d is out of range\n", offset);
-		return -E1000_ERR_PARAM;
+		goto out;
 	}
 
-	/* Swap the data bytes for the I2C interface */
-	phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		goto out;
 
-	/*
-	 * Set up Op-code, Phy Address, and register address in the I2CCMD
-	 * register.  The MAC will take care of interfacing with the
-	 * PHY to retrieve the desired data.
-	 */
-	i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
-		  (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
-		  E1000_I2CCMD_OPCODE_WRITE |
-		  phy_data_swapped);
-
-	wr32(E1000_I2CCMD, i2ccmd);
-
-	/* Poll the ready bit to see if the I2C read completed */
-	for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
-		udelay(50);
-		i2ccmd = rd32(E1000_I2CCMD);
-		if (i2ccmd & E1000_I2CCMD_READY)
-			break;
-	}
-	if (!(i2ccmd & E1000_I2CCMD_READY)) {
-		hw_dbg("I2CCMD Write did not complete\n");
-		return -E1000_ERR_PHY;
-	}
-	if (i2ccmd & E1000_I2CCMD_ERROR) {
-		hw_dbg("I2CCMD Error bit set\n");
-		return -E1000_ERR_PHY;
-	}
+	ret_val = igb_write_phy_reg_i2c(hw, offset, data);
 
-	return 0;
+	hw->phy.ops.release(hw);
+
+out:
+	return ret_val;
 }
 
 /**
@@ -676,6 +676,10 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
 
 	if (hw->bus.func == 1)
 		mask = E1000_NVM_CFG_DONE_PORT_1;
+	else if (hw->bus.func == E1000_FUNC_2)
+		mask = E1000_NVM_CFG_DONE_PORT_2;
+	else if (hw->bus.func == E1000_FUNC_3)
+		mask = E1000_NVM_CFG_DONE_PORT_3;
 
 	while (timeout) {
 		if (rd32(E1000_EEMNGCTL) & mask)
@@ -706,9 +710,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
 	s32 ret_val;
 	u16 speed, duplex;
 
-	/* SGMII link check is done through the PCS register. */
-	if ((hw->phy.media_type != e1000_media_type_copper) ||
-	    (igb_sgmii_active_82575(hw))) {
+	if (hw->phy.media_type != e1000_media_type_copper) {
 		ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
 		                                             &duplex);
 		/*
@@ -723,6 +725,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
 
 	return ret_val;
 }
+
 /**
  *  igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
  *  @hw: pointer to the HW structure
@@ -788,13 +791,27 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
 void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
 {
 	u32 reg;
+	u16 eeprom_data = 0;
 
 	if (hw->phy.media_type != e1000_media_type_internal_serdes ||
 	    igb_sgmii_active_82575(hw))
 		return;
 
-	/* if the management interface is not enabled, then power down */
-	if (!igb_enable_mng_pass_thru(hw)) {
+	if (hw->bus.func == E1000_FUNC_0)
+		hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
+	else if (hw->mac.type == e1000_82580)
+		hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
+		                 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
+		                 &eeprom_data);
+	else if (hw->bus.func == E1000_FUNC_1)
+		hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
+
+	/*
+	 * If APM is not enabled in the EEPROM and management interface is
+	 * not enabled, then power down.
+	 */
+	if (!(eeprom_data & E1000_NVM_APME_82575) &&
+	    !igb_enable_mng_pass_thru(hw)) {
 		/* Disable PCS to turn off link */
 		reg = rd32(E1000_PCS_CFG0);
 		reg &= ~E1000_PCS_CFG_PCS_EN;
@@ -908,6 +925,11 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
 	for (i = 0; i < mac->mta_reg_count; i++)
 		array_wr32(E1000_MTA, i, 0);
 
+	/* Zero out the Unicast HASH table */
+	hw_dbg("Zeroing the UTA\n");
+	for (i = 0; i < mac->uta_reg_count; i++)
+		array_wr32(E1000_UTA, i, 0);
+
 	/* Setup link and flow control */
 	ret_val = igb_setup_link(hw);
 
@@ -934,7 +956,6 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
 {
 	u32 ctrl;
 	s32  ret_val;
-	bool link;
 
 	ctrl = rd32(E1000_CTRL);
 	ctrl |= E1000_CTRL_SLU;
@@ -946,6 +967,9 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
 		goto out;
 
 	if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
+		/* allow time for SFP cage time to power up phy */
+		msleep(300);
+
 		ret_val = hw->phy.ops.reset(hw);
 		if (ret_val) {
 			hw_dbg("Error resetting the PHY.\n");
@@ -959,6 +983,9 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
 	case e1000_phy_igp_3:
 		ret_val = igb_copper_link_setup_igp(hw);
 		break;
+	case e1000_phy_82580:
+		ret_val = igb_copper_link_setup_82580(hw);
+		break;
 	default:
 		ret_val = -E1000_ERR_PHY;
 		break;
@@ -967,57 +994,24 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
 	if (ret_val)
 		goto out;
 
-	if (hw->mac.autoneg) {
-		/*
-		 * Setup autoneg and flow control advertisement
-		 * and perform autonegotiation.
-		 */
-		ret_val = igb_copper_link_autoneg(hw);
-		if (ret_val)
-			goto out;
-	} else {
-		/*
-		 * PHY will be set to 10H, 10F, 100H or 100F
-		 * depending on user settings.
-		 */
-		hw_dbg("Forcing Speed and Duplex\n");
-		ret_val = hw->phy.ops.force_speed_duplex(hw);
-		if (ret_val) {
-			hw_dbg("Error Forcing Speed and Duplex\n");
-			goto out;
-		}
-	}
-
-	/*
-	 * Check link status. Wait up to 100 microseconds for link to become
-	 * valid.
-	 */
-	ret_val = igb_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link);
-	if (ret_val)
-		goto out;
-
-	if (link) {
-		hw_dbg("Valid link established!!!\n");
-		/* Config the MAC and PHY after link is up */
-		igb_config_collision_dist(hw);
-		ret_val = igb_config_fc_after_link_up(hw);
-	} else {
-		hw_dbg("Unable to establish link!!!\n");
-	}
-
+	ret_val = igb_setup_copper_link(hw);
 out:
 	return ret_val;
 }
 
 /**
- *  igb_setup_serdes_link_82575 - Setup link for fiber/serdes
+ *  igb_setup_serdes_link_82575 - Setup link for serdes
  *  @hw: pointer to the HW structure
  *
- *  Configures speed and duplex for fiber and serdes links.
+ *  Configure the physical coding sub-layer (PCS) link.  The PCS link is
+ *  used on copper connections where the serialized gigabit media independent
+ *  interface (sgmii), or serdes fiber is being used.  Configures the link
+ *  for auto-negotiation or forces speed/duplex.
  **/
 static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
 {
-	u32 ctrl_reg, reg;
+	u32 ctrl_ext, ctrl_reg, reg;
+	bool pcs_autoneg;
 
 	if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
 	    !igb_sgmii_active_82575(hw))
@@ -1032,9 +1026,9 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
 	wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
 
 	/* power on the sfp cage if present */
-	reg = rd32(E1000_CTRL_EXT);
-	reg &= ~E1000_CTRL_EXT_SDP3_DATA;
-	wr32(E1000_CTRL_EXT, reg);
+	ctrl_ext = rd32(E1000_CTRL_EXT);
+	ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
+	wr32(E1000_CTRL_EXT, ctrl_ext);
 
 	ctrl_reg = rd32(E1000_CTRL);
 	ctrl_reg |= E1000_CTRL_SLU;
@@ -1051,15 +1045,31 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
 
 	reg = rd32(E1000_PCS_LCTL);
 
-	if (igb_sgmii_active_82575(hw)) {
-		/* allow time for SFP cage to power up phy */
-		msleep(300);
+	/* default pcs_autoneg to the same setting as mac autoneg */
+	pcs_autoneg = hw->mac.autoneg;
 
-		/* AN time out should be disabled for SGMII mode */
+	switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
+	case E1000_CTRL_EXT_LINK_MODE_SGMII:
+		/* sgmii mode lets the phy handle forcing speed/duplex */
+		pcs_autoneg = true;
+		/* autoneg time out should be disabled for SGMII mode */
 		reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
-	} else {
+		break;
+	case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
+		/* disable PCS autoneg and support parallel detect only */
+		pcs_autoneg = false;
+	default:
+		/*
+		 * non-SGMII modes only supports a speed of 1000/Full for the
+		 * link so it is best to just force the MAC and let the pcs
+		 * link either autoneg or be forced to 1000/Full
+		 */
 		ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
 		            E1000_CTRL_FD | E1000_CTRL_FRCDPX;
+
+		/* set speed of 1000/Full if speed/duplex is forced */
+		reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
+		break;
 	}
 
 	wr32(E1000_CTRL, ctrl_reg);
@@ -1070,7 +1080,6 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
 	 * mode that will be compatible with older link partners and switches.
 	 * However, both are supported by the hardware and some drivers/tools.
 	 */
-
 	reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
 		E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
 
@@ -1080,25 +1089,18 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
 	 */
 	reg |= E1000_PCS_LCTL_FORCE_FCTRL;
 
-	/*
-	 * we always set sgmii to autoneg since it is the phy that will be
-	 * forcing the link and the serdes is just a go-between
-	 */
-	if (hw->mac.autoneg || igb_sgmii_active_82575(hw)) {
+	if (pcs_autoneg) {
 		/* Set PCS register for autoneg */
-		reg |= E1000_PCS_LCTL_FSV_1000 |      /* Force 1000    */
-		       E1000_PCS_LCTL_FDV_FULL |      /* SerDes Full duplex */
-		       E1000_PCS_LCTL_AN_ENABLE |     /* Enable Autoneg */
-		       E1000_PCS_LCTL_AN_RESTART;     /* Restart autoneg */
-		hw_dbg("Configuring Autoneg; PCS_LCTL = 0x%08X\n", reg);
+		reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
+		       E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
+		hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
 	} else {
-		/* Set PCS register for forced speed */
-		reg |= E1000_PCS_LCTL_FLV_LINK_UP |   /* Force link up */
-		       E1000_PCS_LCTL_FSV_1000 |      /* Force 1000    */
-		       E1000_PCS_LCTL_FDV_FULL |      /* SerDes Full duplex */
-		       E1000_PCS_LCTL_FSD |           /* Force Speed */
-		       E1000_PCS_LCTL_FORCE_LINK;     /* Force Link */
-		hw_dbg("Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg);
+		/* Set PCS register for forced link */
+		reg |= E1000_PCS_LCTL_FSD |        /* Force Speed */
+		       E1000_PCS_LCTL_FORCE_LINK | /* Force Link */
+		       E1000_PCS_LCTL_FLV_LINK_UP; /* Force link value up */
+
+		hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
 	}
 
 	wr32(E1000_PCS_LCTL, reg);
@@ -1167,9 +1169,18 @@ static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
 {
 	s32 ret_val = 0;
 
-	if (igb_check_alt_mac_addr(hw))
-		ret_val = igb_read_mac_addr(hw);
+	/*
+	 * If there's an alternate MAC address place it in RAR0
+	 * so that it will override the Si installed default perm
+	 * address.
+	 */
+	ret_val = igb_check_alt_mac_addr(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = igb_read_mac_addr(hw);
 
+out:
 	return ret_val;
 }
 
@@ -1181,61 +1192,59 @@ static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
  **/
 static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw)
 {
-	u32 temp;
-
 	igb_clear_hw_cntrs_base(hw);
 
-	temp = rd32(E1000_PRC64);
-	temp = rd32(E1000_PRC127);
-	temp = rd32(E1000_PRC255);
-	temp = rd32(E1000_PRC511);
-	temp = rd32(E1000_PRC1023);
-	temp = rd32(E1000_PRC1522);
-	temp = rd32(E1000_PTC64);
-	temp = rd32(E1000_PTC127);
-	temp = rd32(E1000_PTC255);
-	temp = rd32(E1000_PTC511);
-	temp = rd32(E1000_PTC1023);
-	temp = rd32(E1000_PTC1522);
-
-	temp = rd32(E1000_ALGNERRC);
-	temp = rd32(E1000_RXERRC);
-	temp = rd32(E1000_TNCRS);
-	temp = rd32(E1000_CEXTERR);
-	temp = rd32(E1000_TSCTC);
-	temp = rd32(E1000_TSCTFC);
-
-	temp = rd32(E1000_MGTPRC);
-	temp = rd32(E1000_MGTPDC);
-	temp = rd32(E1000_MGTPTC);
-
-	temp = rd32(E1000_IAC);
-	temp = rd32(E1000_ICRXOC);
-
-	temp = rd32(E1000_ICRXPTC);
-	temp = rd32(E1000_ICRXATC);
-	temp = rd32(E1000_ICTXPTC);
-	temp = rd32(E1000_ICTXATC);
-	temp = rd32(E1000_ICTXQEC);
-	temp = rd32(E1000_ICTXQMTC);
-	temp = rd32(E1000_ICRXDMTC);
-
-	temp = rd32(E1000_CBTMPC);
-	temp = rd32(E1000_HTDPMC);
-	temp = rd32(E1000_CBRMPC);
-	temp = rd32(E1000_RPTHC);
-	temp = rd32(E1000_HGPTC);
-	temp = rd32(E1000_HTCBDPC);
-	temp = rd32(E1000_HGORCL);
-	temp = rd32(E1000_HGORCH);
-	temp = rd32(E1000_HGOTCL);
-	temp = rd32(E1000_HGOTCH);
-	temp = rd32(E1000_LENERRS);
+	rd32(E1000_PRC64);
+	rd32(E1000_PRC127);
+	rd32(E1000_PRC255);
+	rd32(E1000_PRC511);
+	rd32(E1000_PRC1023);
+	rd32(E1000_PRC1522);
+	rd32(E1000_PTC64);
+	rd32(E1000_PTC127);
+	rd32(E1000_PTC255);
+	rd32(E1000_PTC511);
+	rd32(E1000_PTC1023);
+	rd32(E1000_PTC1522);
+
+	rd32(E1000_ALGNERRC);
+	rd32(E1000_RXERRC);
+	rd32(E1000_TNCRS);
+	rd32(E1000_CEXTERR);
+	rd32(E1000_TSCTC);
+	rd32(E1000_TSCTFC);
+
+	rd32(E1000_MGTPRC);
+	rd32(E1000_MGTPDC);
+	rd32(E1000_MGTPTC);
+
+	rd32(E1000_IAC);
+	rd32(E1000_ICRXOC);
+
+	rd32(E1000_ICRXPTC);
+	rd32(E1000_ICRXATC);
+	rd32(E1000_ICTXPTC);
+	rd32(E1000_ICTXATC);
+	rd32(E1000_ICTXQEC);
+	rd32(E1000_ICTXQMTC);
+	rd32(E1000_ICRXDMTC);
+
+	rd32(E1000_CBTMPC);
+	rd32(E1000_HTDPMC);
+	rd32(E1000_CBRMPC);
+	rd32(E1000_RPTHC);
+	rd32(E1000_HGPTC);
+	rd32(E1000_HTCBDPC);
+	rd32(E1000_HGORCL);
+	rd32(E1000_HGORCH);
+	rd32(E1000_HGOTCL);
+	rd32(E1000_HGOTCH);
+	rd32(E1000_LENERRS);
 
 	/* This register should not be read in copper configurations */
 	if (hw->phy.media_type == e1000_media_type_internal_serdes ||
 	    igb_sgmii_active_82575(hw))
-		temp = rd32(E1000_SCVPC);
+		rd32(E1000_SCVPC);
 }
 
 /**
@@ -1400,8 +1409,183 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
 	wr32(E1000_VT_CTL, vt_ctl);
 }
 
+/**
+ *  igb_read_phy_reg_82580 - Read 82580 MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the MDI control register in the PHY at offset and stores the
+ *  information read to data.
+ **/
+static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	u32 mdicnfg = 0;
+	s32 ret_val;
+
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * We config the phy address in MDICNFG register now. Same bits
+	 * as before. The values in MDIC can be written but will be
+	 * ignored. This allows us to call the old function after
+	 * configuring the PHY address in the new register
+	 */
+	mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
+	wr32(E1000_MDICNFG, mdicnfg);
+
+	ret_val = igb_read_phy_reg_mdic(hw, offset, data);
+
+	hw->phy.ops.release(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_write_phy_reg_82580 - Write 82580 MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write to register at offset
+ *
+ *  Writes data to MDI control register in the PHY at offset.
+ **/
+static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	u32 mdicnfg = 0;
+	s32 ret_val;
+
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * We config the phy address in MDICNFG register now. Same bits
+	 * as before. The values in MDIC can be written but will be
+	 * ignored. This allows us to call the old function after
+	 * configuring the PHY address in the new register
+	 */
+	mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
+	wr32(E1000_MDICNFG, mdicnfg);
+
+	ret_val = igb_write_phy_reg_mdic(hw, offset, data);
+
+	hw->phy.ops.release(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_reset_hw_82580 - Reset hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets function or entire device (all ports, etc.)
+ *  to a known state.
+ **/
+static s32 igb_reset_hw_82580(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	/* BH SW mailbox bit in SW_FW_SYNC */
+	u16 swmbsw_mask = E1000_SW_SYNCH_MB;
+	u32 ctrl, icr;
+	bool global_device_reset = hw->dev_spec._82575.global_device_reset;
+
+
+	hw->dev_spec._82575.global_device_reset = false;
+
+	/* Get current control state. */
+	ctrl = rd32(E1000_CTRL);
+
+	/*
+	 * Prevent the PCI-E bus from sticking if there is no TLP connection
+	 * on the last TLP read/write transaction when MAC is reset.
+	 */
+	ret_val = igb_disable_pcie_master(hw);
+	if (ret_val)
+		hw_dbg("PCI-E Master disable polling has failed.\n");
+
+	hw_dbg("Masking off all interrupts\n");
+	wr32(E1000_IMC, 0xffffffff);
+	wr32(E1000_RCTL, 0);
+	wr32(E1000_TCTL, E1000_TCTL_PSP);
+	wrfl();
+
+	msleep(10);
+
+	/* Determine whether or not a global dev reset is requested */
+	if (global_device_reset &&
+		igb_acquire_swfw_sync_82575(hw, swmbsw_mask))
+			global_device_reset = false;
+
+	if (global_device_reset &&
+		!(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET))
+		ctrl |= E1000_CTRL_DEV_RST;
+	else
+		ctrl |= E1000_CTRL_RST;
+
+	wr32(E1000_CTRL, ctrl);
+
+	/* Add delay to insure DEV_RST has time to complete */
+	if (global_device_reset)
+		msleep(5);
+
+	ret_val = igb_get_auto_rd_done(hw);
+	if (ret_val) {
+		/*
+		 * When auto config read does not complete, do not
+		 * return with an error. This can happen in situations
+		 * where there is no eeprom and prevents getting link.
+		 */
+		hw_dbg("Auto Read Done did not complete\n");
+	}
+
+	/* If EEPROM is not present, run manual init scripts */
+	if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
+		igb_reset_init_script_82575(hw);
+
+	/* clear global device reset status bit */
+	wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET);
+
+	/* Clear any pending interrupt events. */
+	wr32(E1000_IMC, 0xffffffff);
+	icr = rd32(E1000_ICR);
+
+	/* Install any alternate MAC address into RAR0 */
+	ret_val = igb_check_alt_mac_addr(hw);
+
+	/* Release semaphore */
+	if (global_device_reset)
+		igb_release_swfw_sync_82575(hw, swmbsw_mask);
+
+	return ret_val;
+}
+
+/**
+ *  igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size
+ *  @data: data received by reading RXPBS register
+ *
+ *  The 82580 uses a table based approach for packet buffer allocation sizes.
+ *  This function converts the retrieved value into the correct table value
+ *     0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
+ *  0x0 36  72 144   1   2   4   8  16
+ *  0x8 35  70 140 rsv rsv rsv rsv rsv
+ */
+u16 igb_rxpbs_adjust_82580(u32 data)
+{
+	u16 ret_val = 0;
+
+	if (data < E1000_82580_RXPBS_TABLE_SIZE)
+		ret_val = e1000_82580_rxpbs_table[data];
+
+	return ret_val;
+}
+
 static struct e1000_mac_operations e1000_mac_ops_82575 = {
-	.reset_hw             = igb_reset_hw_82575,
 	.init_hw              = igb_init_hw_82575,
 	.check_for_link       = igb_check_for_link_82575,
 	.rar_set              = igb_rar_set,
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index ebd146fd4e15..d51c9927c819 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -38,6 +38,11 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
 
 #define E1000_RAR_ENTRIES_82575   16
 #define E1000_RAR_ENTRIES_82576   24
+#define E1000_RAR_ENTRIES_82580   24
+
+#define E1000_SW_SYNCH_MB              0x00000100
+#define E1000_STAT_DEV_RST_SET         0x00100000
+#define E1000_CTRL_DEV_RST             0x20000000
 
 /* SRRCTL bit definitions */
 #define E1000_SRRCTL_BSIZEPKT_SHIFT                     10 /* Shift _right_ */
@@ -66,6 +71,8 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
     E1000_EICR_RX_QUEUE3)
 
 /* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
+#define E1000_IMIREXT_SIZE_BP     0x00001000  /* Packet size bypass */
+#define E1000_IMIREXT_CTRL_BP     0x00080000  /* Bypass check of ctrl bits */
 
 /* Receive Descriptor - Advanced */
 union e1000_adv_rx_desc {
@@ -98,6 +105,7 @@ union e1000_adv_rx_desc {
 
 #define E1000_RXDADV_HDRBUFLEN_MASK      0x7FE0
 #define E1000_RXDADV_HDRBUFLEN_SHIFT     5
+#define E1000_RXDADV_STAT_TS             0x10000 /* Pkt was time stamped */
 
 /* Transmit Descriptor - Advanced */
 union e1000_adv_tx_desc {
@@ -167,6 +175,18 @@ struct e1000_adv_tx_context_desc {
 #define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */
 #define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */
 
+/* ETQF register bit definitions */
+#define E1000_ETQF_FILTER_ENABLE   (1 << 26)
+#define E1000_ETQF_1588            (1 << 30)
+
+/* FTQF register bit definitions */
+#define E1000_FTQF_VF_BP               0x00008000
+#define E1000_FTQF_1588_TIME_STAMP     0x08000000
+#define E1000_FTQF_MASK                0xF0000000
+#define E1000_FTQF_MASK_PROTO_BP       0x10000000
+#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
+
+#define E1000_NVM_APME_82575          0x0400
 #define MAX_NUM_VFS                   8
 
 #define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31)  /* global VF LB enable */
@@ -202,9 +222,21 @@ struct e1000_adv_tx_context_desc {
 #define E1000_IOVCTL 0x05BBC
 #define E1000_IOVCTL_REUSE_VFQ 0x00000001
 
+#define E1000_RPLOLR_STRVLAN   0x40000000
+#define E1000_RPLOLR_STRCRC    0x80000000
+
+#define E1000_DTXCTL_8023LL     0x0004
+#define E1000_DTXCTL_VLAN_ADDED 0x0008
+#define E1000_DTXCTL_OOS_ENABLE 0x0010
+#define E1000_DTXCTL_MDP_EN     0x0020
+#define E1000_DTXCTL_SPOOF_INT  0x0040
+
 #define ALL_QUEUES   0xFFFF
 
+/* RX packet buffer size defines */
+#define E1000_RXPBS_SIZE_MASK_82576  0x0000007F
 void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
 void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
+u16 igb_rxpbs_adjust_82580(u32 data);
 
 #endif
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index cb916833f303..6e036ae3138f 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -49,6 +49,7 @@
 #define E1000_CTRL_EXT_PFRSTD    0x00004000
 #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
 #define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES  0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX  0x00400000
 #define E1000_CTRL_EXT_LINK_MODE_SGMII   0x00800000
 #define E1000_CTRL_EXT_EIAME          0x01000000
 #define E1000_CTRL_EXT_IRCA           0x00000001
@@ -329,6 +330,7 @@
 #define E1000_ICR_RXDMT0        0x00000010 /* rx desc min. threshold (0) */
 #define E1000_ICR_RXT0          0x00000080 /* rx timer intr (ring 0) */
 #define E1000_ICR_VMMB          0x00000100 /* VM MB event */
+#define E1000_ICR_DRSTA         0x40000000 /* Device Reset Asserted */
 /* If this bit asserted, the driver should claim the interrupt */
 #define E1000_ICR_INT_ASSERTED  0x80000000
 /* LAN connected device generates an interrupt */
@@ -370,6 +372,7 @@
 #define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
 #define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
 #define E1000_IMS_RXT0      E1000_ICR_RXT0      /* rx timer intr */
+#define E1000_IMS_DRSTA     E1000_ICR_DRSTA     /* Device Reset Asserted */
 #define E1000_IMS_DOUTSYNC  E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
 
 /* Extended Interrupt Mask Set */
@@ -378,6 +381,7 @@
 /* Interrupt Cause Set */
 #define E1000_ICS_LSC       E1000_ICR_LSC       /* Link Status Change */
 #define E1000_ICS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
+#define E1000_ICS_DRSTA     E1000_ICR_DRSTA     /* Device Reset Aserted */
 
 /* Extended Interrupt Cause Set */
 
@@ -435,6 +439,39 @@
 /* Flow Control */
 #define E1000_FCRTL_XONE 0x80000000     /* Enable XON frame transmission */
 
+#define E1000_TSYNCTXCTL_VALID    0x00000001 /* tx timestamp valid */
+#define E1000_TSYNCTXCTL_ENABLED  0x00000010 /* enable tx timestampping */
+
+#define E1000_TSYNCRXCTL_VALID      0x00000001 /* rx timestamp valid */
+#define E1000_TSYNCRXCTL_TYPE_MASK  0x0000000E /* rx type mask */
+#define E1000_TSYNCRXCTL_TYPE_L2_V2       0x00
+#define E1000_TSYNCRXCTL_TYPE_L4_V1       0x02
+#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2    0x04
+#define E1000_TSYNCRXCTL_TYPE_ALL         0x08
+#define E1000_TSYNCRXCTL_TYPE_EVENT_V2    0x0A
+#define E1000_TSYNCRXCTL_ENABLED    0x00000010 /* enable rx timestampping */
+
+#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK   0x000000FF
+#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE       0x00
+#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE  0x01
+#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE   0x02
+#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03
+#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04
+
+#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK               0x00000F00
+#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE                 0x0000
+#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE            0x0100
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE       0x0200
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE      0x0300
+#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE             0x0800
+#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE           0x0900
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE  0x0A00
+#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE             0x0B00
+#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE           0x0C00
+#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE           0x0D00
+
+#define E1000_TIMINCA_16NS_SHIFT 24
+
 /* PCI Express Control */
 #define E1000_GCR_CMPL_TMOUT_MASK       0x0000F000
 #define E1000_GCR_CMPL_TMOUT_10ms       0x00001000
@@ -524,8 +561,12 @@
 #define NVM_ALT_MAC_ADDR_PTR       0x0037
 #define NVM_CHECKSUM_REG           0x003F
 
-#define E1000_NVM_CFG_DONE_PORT_0  0x40000 /* MNG config cycle done */
-#define E1000_NVM_CFG_DONE_PORT_1  0x80000 /* ...for second port */
+#define E1000_NVM_CFG_DONE_PORT_0  0x040000 /* MNG config cycle done */
+#define E1000_NVM_CFG_DONE_PORT_1  0x080000 /* ...for second port */
+#define E1000_NVM_CFG_DONE_PORT_2  0x100000 /* ...for third port */
+#define E1000_NVM_CFG_DONE_PORT_3  0x200000 /* ...for fourth port */
+
+#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0)
 
 /* Mask bits for fields in Word 0x0f of the NVM */
 #define NVM_WORD0F_PAUSE_MASK       0x3000
@@ -592,6 +633,7 @@
  */
 #define M88E1111_I_PHY_ID    0x01410CC0
 #define IGP03E1000_E_PHY_ID  0x02A80390
+#define I82580_I_PHY_ID      0x015403A0
 #define M88_VENDOR           0x0141
 
 /* M88E1000 Specific Registers */
@@ -678,4 +720,8 @@
 #define E1000_VFTA_ENTRY_MASK                0x7F
 #define E1000_VFTA_ENTRY_BIT_SHIFT_MASK      0x1F
 
+/* DMA Coalescing register fields */
+#define E1000_PCIEMISC_LX_DECISION      0x00000080 /* Lx power decision based
+                                                      on DMA coal */
+
 #endif
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index 119869b1124d..dbaeb5f5e0c7 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -42,20 +42,35 @@ struct e1000_hw;
 #define E1000_DEV_ID_82576_SERDES             0x10E7
 #define E1000_DEV_ID_82576_QUAD_COPPER        0x10E8
 #define E1000_DEV_ID_82576_NS                 0x150A
+#define E1000_DEV_ID_82576_NS_SERDES          0x1518
 #define E1000_DEV_ID_82576_SERDES_QUAD        0x150D
 #define E1000_DEV_ID_82575EB_COPPER           0x10A7
 #define E1000_DEV_ID_82575EB_FIBER_SERDES     0x10A9
 #define E1000_DEV_ID_82575GB_QUAD_COPPER      0x10D6
+#define E1000_DEV_ID_82580_COPPER             0x150E
+#define E1000_DEV_ID_82580_FIBER              0x150F
+#define E1000_DEV_ID_82580_SERDES             0x1510
+#define E1000_DEV_ID_82580_SGMII              0x1511
+#define E1000_DEV_ID_82580_COPPER_DUAL        0x1516
 
 #define E1000_REVISION_2 2
 #define E1000_REVISION_4 4
 
+#define E1000_FUNC_0     0
 #define E1000_FUNC_1     1
+#define E1000_FUNC_2     2
+#define E1000_FUNC_3     3
+
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0   0
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1   3
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2   6
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3   9
 
 enum e1000_mac_type {
 	e1000_undefined = 0,
 	e1000_82575,
 	e1000_82576,
+	e1000_82580,
 	e1000_num_macs  /* List is 1-based, so subtract 1 for true count. */
 };
 
@@ -70,7 +85,6 @@ enum e1000_nvm_type {
 	e1000_nvm_unknown = 0,
 	e1000_nvm_none,
 	e1000_nvm_eeprom_spi,
-	e1000_nvm_eeprom_microwire,
 	e1000_nvm_flash_hw,
 	e1000_nvm_flash_sw
 };
@@ -79,8 +93,6 @@ enum e1000_nvm_override {
 	e1000_nvm_override_none = 0,
 	e1000_nvm_override_spi_small,
 	e1000_nvm_override_spi_large,
-	e1000_nvm_override_microwire_small,
-	e1000_nvm_override_microwire_large
 };
 
 enum e1000_phy_type {
@@ -92,6 +104,7 @@ enum e1000_phy_type {
 	e1000_phy_gg82563,
 	e1000_phy_igp_3,
 	e1000_phy_ife,
+	e1000_phy_82580,
 };
 
 enum e1000_bus_type {
@@ -288,6 +301,7 @@ struct e1000_mac_operations {
 
 struct e1000_phy_operations {
 	s32  (*acquire)(struct e1000_hw *);
+	s32  (*check_polarity)(struct e1000_hw *);
 	s32  (*check_reset_block)(struct e1000_hw *);
 	s32  (*force_speed_duplex)(struct e1000_hw *);
 	s32  (*get_cfg_done)(struct e1000_hw *hw);
@@ -339,6 +353,7 @@ struct e1000_mac_info {
 	u16 ifs_ratio;
 	u16 ifs_step_size;
 	u16 mta_reg_count;
+	u16 uta_reg_count;
 
 	/* Maximum size of the MTA register table in all supported adapters */
 	#define MAX_MTA_REG 128
@@ -463,6 +478,7 @@ struct e1000_mbx_info {
 
 struct e1000_dev_spec_82575 {
 	bool sgmii_active;
+	bool global_device_reset;
 };
 
 struct e1000_hw {
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index 7d76bb085e10..2ad358a240bf 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -185,13 +185,12 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
 	}
 
 	if (nvm_alt_mac_addr_offset == 0xFFFF) {
-		ret_val = -(E1000_NOT_IMPLEMENTED);
+		/* There is no Alternate MAC Address */
 		goto out;
 	}
 
 	if (hw->bus.func == E1000_FUNC_1)
-		nvm_alt_mac_addr_offset += ETH_ALEN/sizeof(u16);
-
+		nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
 	for (i = 0; i < ETH_ALEN; i += 2) {
 		offset = nvm_alt_mac_addr_offset + (i >> 1);
 		ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
@@ -206,14 +205,16 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
 
 	/* if multicast bit is set, the alternate address will not be used */
 	if (alt_mac_addr[0] & 0x01) {
-		ret_val = -(E1000_NOT_IMPLEMENTED);
+		hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
 		goto out;
 	}
 
-	for (i = 0; i < ETH_ALEN; i++)
-		hw->mac.addr[i] = hw->mac.perm_addr[i] = alt_mac_addr[i];
-
-	hw->mac.ops.rar_set(hw, hw->mac.perm_addr, 0);
+	/*
+	 * We have a valid alternate MAC address, and we want to treat it the
+	 * same as the normal permanent MAC address stored by the HW into the
+	 * RAR. Do this by mapping this address into RAR0.
+	 */
+	hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
 
 out:
 	return ret_val;
@@ -246,8 +247,15 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
 	if (rar_low || rar_high)
 		rar_high |= E1000_RAH_AV;
 
+	/*
+	 * Some bridges will combine consecutive 32-bit writes into
+	 * a single burst write, which will malfunction on some parts.
+	 * The flushes avoid this.
+	 */
 	wr32(E1000_RAL(index), rar_low);
+	wrfl();
 	wr32(E1000_RAH(index), rar_high);
+	wrfl();
 }
 
 /**
@@ -399,45 +407,43 @@ void igb_update_mc_addr_list(struct e1000_hw *hw,
  **/
 void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
 {
-	u32 temp;
-
-	temp = rd32(E1000_CRCERRS);
-	temp = rd32(E1000_SYMERRS);
-	temp = rd32(E1000_MPC);
-	temp = rd32(E1000_SCC);
-	temp = rd32(E1000_ECOL);
-	temp = rd32(E1000_MCC);
-	temp = rd32(E1000_LATECOL);
-	temp = rd32(E1000_COLC);
-	temp = rd32(E1000_DC);
-	temp = rd32(E1000_SEC);
-	temp = rd32(E1000_RLEC);
-	temp = rd32(E1000_XONRXC);
-	temp = rd32(E1000_XONTXC);
-	temp = rd32(E1000_XOFFRXC);
-	temp = rd32(E1000_XOFFTXC);
-	temp = rd32(E1000_FCRUC);
-	temp = rd32(E1000_GPRC);
-	temp = rd32(E1000_BPRC);
-	temp = rd32(E1000_MPRC);
-	temp = rd32(E1000_GPTC);
-	temp = rd32(E1000_GORCL);
-	temp = rd32(E1000_GORCH);
-	temp = rd32(E1000_GOTCL);
-	temp = rd32(E1000_GOTCH);
-	temp = rd32(E1000_RNBC);
-	temp = rd32(E1000_RUC);
-	temp = rd32(E1000_RFC);
-	temp = rd32(E1000_ROC);
-	temp = rd32(E1000_RJC);
-	temp = rd32(E1000_TORL);
-	temp = rd32(E1000_TORH);
-	temp = rd32(E1000_TOTL);
-	temp = rd32(E1000_TOTH);
-	temp = rd32(E1000_TPR);
-	temp = rd32(E1000_TPT);
-	temp = rd32(E1000_MPTC);
-	temp = rd32(E1000_BPTC);
+	rd32(E1000_CRCERRS);
+	rd32(E1000_SYMERRS);
+	rd32(E1000_MPC);
+	rd32(E1000_SCC);
+	rd32(E1000_ECOL);
+	rd32(E1000_MCC);
+	rd32(E1000_LATECOL);
+	rd32(E1000_COLC);
+	rd32(E1000_DC);
+	rd32(E1000_SEC);
+	rd32(E1000_RLEC);
+	rd32(E1000_XONRXC);
+	rd32(E1000_XONTXC);
+	rd32(E1000_XOFFRXC);
+	rd32(E1000_XOFFTXC);
+	rd32(E1000_FCRUC);
+	rd32(E1000_GPRC);
+	rd32(E1000_BPRC);
+	rd32(E1000_MPRC);
+	rd32(E1000_GPTC);
+	rd32(E1000_GORCL);
+	rd32(E1000_GORCH);
+	rd32(E1000_GOTCL);
+	rd32(E1000_GOTCH);
+	rd32(E1000_RNBC);
+	rd32(E1000_RUC);
+	rd32(E1000_RFC);
+	rd32(E1000_ROC);
+	rd32(E1000_RJC);
+	rd32(E1000_TORL);
+	rd32(E1000_TORH);
+	rd32(E1000_TOTL);
+	rd32(E1000_TOTH);
+	rd32(E1000_TPR);
+	rd32(E1000_TPT);
+	rd32(E1000_MPTC);
+	rd32(E1000_BPTC);
 }
 
 /**
diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c
index ed9058eca45c..c474cdb70047 100644
--- a/drivers/net/igb/e1000_mbx.c
+++ b/drivers/net/igb/e1000_mbx.c
@@ -143,12 +143,16 @@ static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
 	if (!countdown || !mbx->ops.check_for_msg)
 		goto out;
 
-	while (mbx->ops.check_for_msg(hw, mbx_id)) {
+	while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
 		countdown--;
 		if (!countdown)
 			break;
 		udelay(mbx->usec_delay);
 	}
+
+	/* if we failed, all future posted messages fail until reset */
+	if (!countdown)
+		mbx->timeout = 0;
 out:
 	return countdown ? 0 : -E1000_ERR_MBX;
 }
@@ -168,12 +172,16 @@ static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
 	if (!countdown || !mbx->ops.check_for_ack)
 		goto out;
 
-	while (mbx->ops.check_for_ack(hw, mbx_id)) {
+	while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
 		countdown--;
 		if (!countdown)
 			break;
 		udelay(mbx->usec_delay);
 	}
+
+	/* if we failed, all future posted messages fail until reset */
+	if (!countdown)
+		mbx->timeout = 0;
 out:
 	return countdown ? 0 : -E1000_ERR_MBX;
 }
@@ -217,12 +225,13 @@ out:
 static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
 {
 	struct e1000_mbx_info *mbx = &hw->mbx;
-	s32 ret_val = 0;
+	s32 ret_val = -E1000_ERR_MBX;
 
-	if (!mbx->ops.write)
+	/* exit if either we can't write or there isn't a defined timeout */
+	if (!mbx->ops.write || !mbx->timeout)
 		goto out;
 
-	/* send msg*/
+	/* send msg */
 	ret_val = mbx->ops.write(hw, msg, size, mbx_id);
 
 	/* if msg sent wait until we receive an ack */
@@ -305,6 +314,30 @@ static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
 }
 
 /**
+ *  igb_obtain_mbx_lock_pf - obtain mailbox lock
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  return SUCCESS if we obtained the mailbox lock
+ **/
+static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
+{
+	s32 ret_val = -E1000_ERR_MBX;
+	u32 p2v_mailbox;
+
+
+	/* Take ownership of the buffer */
+	wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
+
+	/* reserve mailbox for vf use */
+	p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number));
+	if (p2v_mailbox & E1000_P2VMAILBOX_PFU)
+		ret_val = 0;
+
+	return ret_val;
+}
+
+/**
  *  igb_write_mbx_pf - Places a message in the mailbox
  *  @hw: pointer to the HW structure
  *  @msg: The message buffer
@@ -316,27 +349,17 @@ static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
 static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
                               u16 vf_number)
 {
-	u32 p2v_mailbox;
-	s32 ret_val = 0;
+	s32 ret_val;
 	u16 i;
 
-	/* Take ownership of the buffer */
-	wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
-
-	/* Make sure we have ownership now... */
-	p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number));
-	if (!(p2v_mailbox & E1000_P2VMAILBOX_PFU)) {
-		/* failed to grab ownership */
-		ret_val = -E1000_ERR_MBX;
+	/* lock the mailbox to prevent pf/vf race condition */
+	ret_val = igb_obtain_mbx_lock_pf(hw, vf_number);
+	if (ret_val)
 		goto out_no_write;
-	}
 
-	/*
-	 * flush any ack or msg which may already be in the queue
-	 * as they are likely the result of an error
-	 */
-	igb_check_for_ack_pf(hw, vf_number);
+	/* flush msg and acks as we are overwriting the message buffer */
 	igb_check_for_msg_pf(hw, vf_number);
+	igb_check_for_ack_pf(hw, vf_number);
 
 	/* copy the caller specified message to the mailbox memory buffer */
 	for (i = 0; i < size; i++)
@@ -367,20 +390,13 @@ out_no_write:
 static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
                              u16 vf_number)
 {
-	u32 p2v_mailbox;
-	s32 ret_val = 0;
+	s32 ret_val;
 	u16 i;
 
-	/* Take ownership of the buffer */
-	wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
-
-	/* Make sure we have ownership now... */
-	p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number));
-	if (!(p2v_mailbox & E1000_P2VMAILBOX_PFU)) {
-		/* failed to grab ownership */
-		ret_val = -E1000_ERR_MBX;
+	/* lock the mailbox to prevent pf/vf race condition */
+	ret_val = igb_obtain_mbx_lock_pf(hw, vf_number);
+	if (ret_val)
 		goto out_no_read;
-	}
 
 	/* copy the message to the mailbox memory buffer */
 	for (i = 0; i < size; i++)
@@ -392,8 +408,6 @@ static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
 	/* update stats */
 	hw->mbx.stats.msgs_rx++;
 
-	ret_val = 0;
-
 out_no_read:
 	return ret_val;
 }
diff --git a/drivers/net/igb/e1000_mbx.h b/drivers/net/igb/e1000_mbx.h
index ebc02ea3f198..bb112fb6c3a1 100644
--- a/drivers/net/igb/e1000_mbx.h
+++ b/drivers/net/igb/e1000_mbx.h
@@ -58,10 +58,12 @@
 #define E1000_VT_MSGINFO_MASK     (0xFF << E1000_VT_MSGINFO_SHIFT)
 
 #define E1000_VF_RESET            0x01 /* VF requests reset */
-#define E1000_VF_SET_MAC_ADDR     0x02 /* VF requests PF to set MAC addr */
-#define E1000_VF_SET_MULTICAST    0x03 /* VF requests PF to set MC addr */
-#define E1000_VF_SET_VLAN         0x04 /* VF requests PF to set VLAN */
-#define E1000_VF_SET_LPE          0x05 /* VF requests PF to set VMOLR.LPE */
+#define E1000_VF_SET_MAC_ADDR     0x02 /* VF requests to set MAC addr */
+#define E1000_VF_SET_MULTICAST    0x03 /* VF requests to set MC addr */
+#define E1000_VF_SET_VLAN         0x04 /* VF requests to set VLAN */
+#define E1000_VF_SET_LPE          0x05 /* VF requests to set VMOLR.LPE */
+#define E1000_VF_SET_PROMISC      0x06 /*VF requests to clear VMOLR.ROPE/MPME*/
+#define E1000_VF_SET_PROMISC_MULTICAST    (0x02 << E1000_VT_MSGINFO_SHIFT)
 
 #define E1000_PF_CONTROL_MSG      0x0100 /* PF control message */
 
diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c
index a88bfe2f1e8f..d83b77fa4038 100644
--- a/drivers/net/igb/e1000_nvm.c
+++ b/drivers/net/igb/e1000_nvm.c
@@ -78,9 +78,7 @@ static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
 	u32 mask;
 
 	mask = 0x01 << (count - 1);
-	if (nvm->type == e1000_nvm_eeprom_microwire)
-		eecd &= ~E1000_EECD_DO;
-	else if (nvm->type == e1000_nvm_eeprom_spi)
+	if (nvm->type == e1000_nvm_eeprom_spi)
 		eecd |= E1000_EECD_DO;
 
 	do {
@@ -220,22 +218,7 @@ static void igb_standby_nvm(struct e1000_hw *hw)
 	struct e1000_nvm_info *nvm = &hw->nvm;
 	u32 eecd = rd32(E1000_EECD);
 
-	if (nvm->type == e1000_nvm_eeprom_microwire) {
-		eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
-		wr32(E1000_EECD, eecd);
-		wrfl();
-		udelay(nvm->delay_usec);
-
-		igb_raise_eec_clk(hw, &eecd);
-
-		/* Select EEPROM */
-		eecd |= E1000_EECD_CS;
-		wr32(E1000_EECD, eecd);
-		wrfl();
-		udelay(nvm->delay_usec);
-
-		igb_lower_eec_clk(hw, &eecd);
-	} else if (nvm->type == e1000_nvm_eeprom_spi) {
+	if (nvm->type == e1000_nvm_eeprom_spi) {
 		/* Toggle CS to flush commands */
 		eecd |= E1000_EECD_CS;
 		wr32(E1000_EECD, eecd);
@@ -263,12 +246,6 @@ static void e1000_stop_nvm(struct e1000_hw *hw)
 		/* Pull CS high */
 		eecd |= E1000_EECD_CS;
 		igb_lower_eec_clk(hw, &eecd);
-	} else if (hw->nvm.type == e1000_nvm_eeprom_microwire) {
-		/* CS on Microcwire is active-high */
-		eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
-		wr32(E1000_EECD, eecd);
-		igb_raise_eec_clk(hw, &eecd);
-		igb_lower_eec_clk(hw, &eecd);
 	}
 }
 
@@ -304,14 +281,7 @@ static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw)
 	u8 spi_stat_reg;
 
 
-	if (nvm->type == e1000_nvm_eeprom_microwire) {
-		/* Clear SK and DI */
-		eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
-		wr32(E1000_EECD, eecd);
-		/* Set CS */
-		eecd |= E1000_EECD_CS;
-		wr32(E1000_EECD, eecd);
-	} else if (nvm->type == e1000_nvm_eeprom_spi) {
+	if (nvm->type == e1000_nvm_eeprom_spi) {
 		/* Clear SK and CS */
 		eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
 		wr32(E1000_EECD, eecd);
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index ee460600e74b..5c9d73e9bb8d 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -39,6 +39,9 @@ static s32  igb_wait_autoneg(struct e1000_hw *hw);
 /* Cable length tables */
 static const u16 e1000_m88_cable_length_table[] =
 	{ 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
+#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
+                (sizeof(e1000_m88_cable_length_table) / \
+                 sizeof(e1000_m88_cable_length_table[0]))
 
 static const u16 e1000_igp_2_cable_length_table[] =
     { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
@@ -109,7 +112,10 @@ out:
  **/
 static s32 igb_phy_reset_dsp(struct e1000_hw *hw)
 {
-	s32 ret_val;
+	s32 ret_val = 0;
+
+	if (!(hw->phy.ops.write_reg))
+		goto out;
 
 	ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
 	if (ret_val)
@@ -130,7 +136,7 @@ out:
  *  Reads the MDI control regsiter in the PHY at offset and stores the
  *  information read to data.
  **/
-static s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
+s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
 {
 	struct e1000_phy_info *phy = &hw->phy;
 	u32 i, mdic = 0;
@@ -188,7 +194,7 @@ out:
  *
  *  Writes data to MDI control register in the PHY at offset.
  **/
-static s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
+s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
 {
 	struct e1000_phy_info *phy = &hw->phy;
 	u32 i, mdic = 0;
@@ -239,6 +245,103 @@ out:
 }
 
 /**
+ *  igb_read_phy_reg_i2c - Read PHY register using i2c
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY register at offset using the i2c interface and stores the
+ *  retrieved information in data.
+ **/
+s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 i, i2ccmd = 0;
+
+
+	/*
+	 * Set up Op-code, Phy Address, and register address in the I2CCMD
+	 * register.  The MAC will take care of interfacing with the
+	 * PHY to retrieve the desired data.
+	 */
+	i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+	          (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
+	          (E1000_I2CCMD_OPCODE_READ));
+
+	wr32(E1000_I2CCMD, i2ccmd);
+
+	/* Poll the ready bit to see if the I2C read completed */
+	for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+		udelay(50);
+		i2ccmd = rd32(E1000_I2CCMD);
+		if (i2ccmd & E1000_I2CCMD_READY)
+			break;
+	}
+	if (!(i2ccmd & E1000_I2CCMD_READY)) {
+		hw_dbg("I2CCMD Read did not complete\n");
+		return -E1000_ERR_PHY;
+	}
+	if (i2ccmd & E1000_I2CCMD_ERROR) {
+		hw_dbg("I2CCMD Error bit set\n");
+		return -E1000_ERR_PHY;
+	}
+
+	/* Need to byte-swap the 16-bit value. */
+	*data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
+
+	return 0;
+}
+
+/**
+ *  igb_write_phy_reg_i2c - Write PHY register using i2c
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes the data to PHY register at the offset using the i2c interface.
+ **/
+s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 i, i2ccmd = 0;
+	u16 phy_data_swapped;
+
+
+	/* Swap the data bytes for the I2C interface */
+	phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
+
+	/*
+	 * Set up Op-code, Phy Address, and register address in the I2CCMD
+	 * register.  The MAC will take care of interfacing with the
+	 * PHY to retrieve the desired data.
+	 */
+	i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+	          (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
+	          E1000_I2CCMD_OPCODE_WRITE |
+	          phy_data_swapped);
+
+	wr32(E1000_I2CCMD, i2ccmd);
+
+	/* Poll the ready bit to see if the I2C read completed */
+	for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+		udelay(50);
+		i2ccmd = rd32(E1000_I2CCMD);
+		if (i2ccmd & E1000_I2CCMD_READY)
+			break;
+	}
+	if (!(i2ccmd & E1000_I2CCMD_READY)) {
+		hw_dbg("I2CCMD Write did not complete\n");
+		return -E1000_ERR_PHY;
+	}
+	if (i2ccmd & E1000_I2CCMD_ERROR) {
+		hw_dbg("I2CCMD Error bit set\n");
+		return -E1000_ERR_PHY;
+	}
+
+	return 0;
+}
+
+/**
  *  igb_read_phy_reg_igp - Read igp PHY register
  *  @hw: pointer to the HW structure
  *  @offset: register offset to be read
@@ -318,6 +421,57 @@ out:
 }
 
 /**
+ *  igb_copper_link_setup_82580 - Setup 82580 PHY for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up Carrier-sense on Transmit and downshift values.
+ **/
+s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+
+
+	if (phy->reset_disable) {
+		ret_val = 0;
+		goto out;
+	}
+
+	if (phy->type == e1000_phy_82580) {
+		ret_val = hw->phy.ops.reset(hw);
+		if (ret_val) {
+			hw_dbg("Error resetting the PHY.\n");
+			goto out;
+		}
+	}
+
+	/* Enable CRS on TX. This must be set for half-duplex operation. */
+	ret_val = phy->ops.read_reg(hw, I82580_CFG_REG, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data |= I82580_CFG_ASSERT_CRS_ON_TX;
+
+	/* Enable downshift */
+	phy_data |= I82580_CFG_ENABLE_DOWNSHIFT;
+
+	ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data);
+	if (ret_val)
+		goto out;
+
+	/* Set number of link attempts before downshift */
+	ret_val = phy->ops.read_reg(hw, I82580_CTRL_REG, &phy_data);
+	if (ret_val)
+		goto out;
+	phy_data &= ~I82580_CTRL_DOWNSHIFT_MASK;
+	ret_val = phy->ops.write_reg(hw, I82580_CTRL_REG, phy_data);
+
+out:
+	return ret_val;
+}
+
+/**
  *  igb_copper_link_setup_m88 - Setup m88 PHY's for copper link
  *  @hw: pointer to the HW structure
  *
@@ -572,7 +726,7 @@ out:
  *  and restart the negotiation process between the link partner.  If
  *  autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
  **/
-s32 igb_copper_link_autoneg(struct e1000_hw *hw)
+static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
 {
 	struct e1000_phy_info *phy = &hw->phy;
 	s32 ret_val;
@@ -796,6 +950,65 @@ out:
 }
 
 /**
+ *  igb_setup_copper_link - Configure copper link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the appropriate function to configure the link for auto-neg or forced
+ *  speed and duplex.  Then we check for link, once link is established calls
+ *  to configure collision distance and flow control are called.  If link is
+ *  not established, we return -E1000_ERR_PHY (-2).
+ **/
+s32 igb_setup_copper_link(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	bool link;
+
+
+	if (hw->mac.autoneg) {
+		/*
+		 * Setup autoneg and flow control advertisement and perform
+		 * autonegotiation.
+		 */
+		ret_val = igb_copper_link_autoneg(hw);
+		if (ret_val)
+			goto out;
+	} else {
+		/*
+		 * PHY will be set to 10H, 10F, 100H or 100F
+		 * depending on user settings.
+		 */
+		hw_dbg("Forcing Speed and Duplex\n");
+		ret_val = hw->phy.ops.force_speed_duplex(hw);
+		if (ret_val) {
+			hw_dbg("Error Forcing Speed and Duplex\n");
+			goto out;
+		}
+	}
+
+	/*
+	 * Check link status. Wait up to 100 microseconds for link to become
+	 * valid.
+	 */
+	ret_val = igb_phy_has_link(hw,
+	                           COPPER_LINK_UP_LIMIT,
+	                           10,
+	                           &link);
+	if (ret_val)
+		goto out;
+
+	if (link) {
+		hw_dbg("Valid link established!!!\n");
+		igb_config_collision_dist(hw);
+		ret_val = igb_config_fc_after_link_up(hw);
+	} else {
+		hw_dbg("Unable to establish link!!!\n");
+	}
+
+out:
+	return ret_val;
+}
+
+/**
  *  igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
  *  @hw: pointer to the HW structure
  *
@@ -903,22 +1116,19 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
 
 	igb_phy_force_speed_duplex_setup(hw, &phy_data);
 
-	/* Reset the phy to commit changes. */
-	phy_data |= MII_CR_RESET;
-
 	ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
 	if (ret_val)
 		goto out;
 
-	udelay(1);
+	/* Reset the phy to commit changes. */
+	ret_val = igb_phy_sw_reset(hw);
+	if (ret_val)
+		goto out;
 
 	if (phy->autoneg_wait_to_complete) {
 		hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n");
 
-		ret_val = igb_phy_has_link(hw,
-						     PHY_FORCE_LIMIT,
-						     100000,
-						     &link);
+		ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);
 		if (ret_val)
 			goto out;
 
@@ -928,8 +1138,8 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
 			 * Reset the DSP and cross our fingers.
 			 */
 			ret_val = phy->ops.write_reg(hw,
-						      M88E1000_PHY_PAGE_SELECT,
-						      0x001d);
+						     M88E1000_PHY_PAGE_SELECT,
+						     0x001d);
 			if (ret_val)
 				goto out;
 			ret_val = igb_phy_reset_dsp(hw);
@@ -939,7 +1149,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
 
 		/* Try once more */
 		ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT,
-					     100000, &link);
+					   100000, &link);
 		if (ret_val)
 			goto out;
 	}
@@ -1051,9 +1261,12 @@ static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
 s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
 {
 	struct e1000_phy_info *phy = &hw->phy;
-	s32 ret_val;
+	s32 ret_val = 0;
 	u16 data;
 
+	if (!(hw->phy.ops.read_reg))
+		goto out;
+
 	ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
 	if (ret_val)
 		goto out;
@@ -1288,8 +1501,14 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
 		 * it across the board.
 		 */
 		ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
-		if (ret_val)
-			break;
+		if (ret_val) {
+			/*
+			 * If the first read fails, another entity may have
+			 * ownership of the resources, wait and try again to
+			 * see if they have relinquished the resources yet.
+			 */
+			udelay(usec_interval);
+		}
 		ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
 		if (ret_val)
 			break;
@@ -1333,8 +1552,13 @@ s32 igb_get_cable_length_m88(struct e1000_hw *hw)
 
 	index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
 		M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+	if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
 	phy->min_cable_length = e1000_m88_cable_length_table[index];
-	phy->max_cable_length = e1000_m88_cable_length_table[index+1];
+	phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
 
 	phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
 
@@ -1715,3 +1939,194 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
 	return 0;
 }
 
+/**
+ *  igb_check_polarity_82580 - Checks the polarity.
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ *  Polarity is determined based on the PHY specific status register.
+ **/
+static s32 igb_check_polarity_82580(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+
+	ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data);
+
+	if (!ret_val)
+		phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY)
+		                      ? e1000_rev_polarity_reversed
+		                      : e1000_rev_polarity_normal;
+
+	return ret_val;
+}
+
+/**
+ *  igb_phy_force_speed_duplex_82580 - Force speed/duplex for I82580 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the PHY setup function to force speed and duplex.  Clears the
+ *  auto-crossover to force MDI manually.  Waits for link and returns
+ *  successful if link up is successful, else -E1000_ERR_PHY (-2).
+ **/
+s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+	bool link;
+
+
+	ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	igb_phy_force_speed_duplex_setup(hw, &phy_data);
+
+	ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * Clear Auto-Crossover to force MDI manually.  82580 requires MDI
+	 * forced whenever speed and duplex are forced.
+	 */
+	ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data &= ~I82580_PHY_CTRL2_AUTO_MDIX;
+	phy_data &= ~I82580_PHY_CTRL2_FORCE_MDI_MDIX;
+
+	ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
+	if (ret_val)
+		goto out;
+
+	hw_dbg("I82580_PHY_CTRL_2: %X\n", phy_data);
+
+	udelay(1);
+
+	if (phy->autoneg_wait_to_complete) {
+		hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n");
+
+		ret_val = igb_phy_has_link(hw,
+		                           PHY_FORCE_LIMIT,
+		                           100000,
+		                           &link);
+		if (ret_val)
+			goto out;
+
+		if (!link)
+			hw_dbg("Link taking longer than expected.\n");
+
+		/* Try once more */
+		ret_val = igb_phy_has_link(hw,
+		                           PHY_FORCE_LIMIT,
+		                           100000,
+		                           &link);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_get_phy_info_82580 - Retrieve I82580 PHY information
+ *  @hw: pointer to the HW structure
+ *
+ *  Read PHY status to determine if link is up.  If link is up, then
+ *  set/determine 10base-T extended distance and polarity correction.  Read
+ *  PHY port status to determine MDI/MDIx and speed.  Based on the speed,
+ *  determine on the cable length, local and remote receiver.
+ **/
+s32 igb_get_phy_info_82580(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+	bool link;
+
+
+	ret_val = igb_phy_has_link(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link) {
+		hw_dbg("Phy info is only valid if link is up\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	phy->polarity_correction = true;
+
+	ret_val = igb_check_polarity_82580(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data);
+	if (ret_val)
+		goto out;
+
+	phy->is_mdix = (data & I82580_PHY_STATUS2_MDIX) ? true : false;
+
+	if ((data & I82580_PHY_STATUS2_SPEED_MASK) ==
+	    I82580_PHY_STATUS2_SPEED_1000MBPS) {
+		ret_val = hw->phy.ops.get_cable_length(hw);
+		if (ret_val)
+			goto out;
+
+		ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
+		if (ret_val)
+			goto out;
+
+		phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+		                ? e1000_1000t_rx_status_ok
+		                : e1000_1000t_rx_status_not_ok;
+
+		phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+		                 ? e1000_1000t_rx_status_ok
+		                 : e1000_1000t_rx_status_not_ok;
+	} else {
+		phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+		phy->local_rx = e1000_1000t_rx_status_undefined;
+		phy->remote_rx = e1000_1000t_rx_status_undefined;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_get_cable_length_82580 - Determine cable length for 82580 PHY
+ *  @hw: pointer to the HW structure
+ *
+ * Reads the diagnostic status register and verifies result is valid before
+ * placing it in the phy_cable_length field.
+ **/
+s32 igb_get_cable_length_82580(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, length;
+
+
+	ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data);
+	if (ret_val)
+		goto out;
+
+	length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >>
+	         I82580_DSTATUS_CABLE_LENGTH_SHIFT;
+
+	if (length == E1000_CABLE_LENGTH_UNDEFINED)
+		ret_val = -E1000_ERR_PHY;
+
+	phy->cable_length = length;
+
+out:
+	return ret_val;
+}
diff --git a/drivers/net/igb/e1000_phy.h b/drivers/net/igb/e1000_phy.h
index ebe4b616db8a..555eb54bb6ed 100644
--- a/drivers/net/igb/e1000_phy.h
+++ b/drivers/net/igb/e1000_phy.h
@@ -43,7 +43,6 @@ enum e1000_smart_speed {
 
 s32  igb_check_downshift(struct e1000_hw *hw);
 s32  igb_check_reset_block(struct e1000_hw *hw);
-s32  igb_copper_link_autoneg(struct e1000_hw *hw);
 s32  igb_copper_link_setup_igp(struct e1000_hw *hw);
 s32  igb_copper_link_setup_m88(struct e1000_hw *hw);
 s32  igb_phy_force_speed_duplex_igp(struct e1000_hw *hw);
@@ -57,10 +56,19 @@ s32  igb_phy_sw_reset(struct e1000_hw *hw);
 s32  igb_phy_hw_reset(struct e1000_hw *hw);
 s32  igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
 s32  igb_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+s32  igb_setup_copper_link(struct e1000_hw *hw);
 s32  igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
 s32  igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
 				u32 usec_interval, bool *success);
 s32  igb_phy_init_script_igp3(struct e1000_hw *hw);
+s32  igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
+s32  igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
+s32  igb_copper_link_setup_82580(struct e1000_hw *hw);
+s32  igb_get_phy_info_82580(struct e1000_hw *hw);
+s32  igb_phy_force_speed_duplex_82580(struct e1000_hw *hw);
+s32  igb_get_cable_length_82580(struct e1000_hw *hw);
 
 /* IGP01E1000 Specific Registers */
 #define IGP01E1000_PHY_PORT_CONFIG        0x10 /* Port Config */
@@ -75,6 +83,33 @@ s32  igb_phy_init_script_igp3(struct e1000_hw *hw);
 #define IGP01E1000_PSCR_FORCE_MDI_MDIX    0x2000 /* 0=MDI, 1=MDIX */
 #define IGP01E1000_PSCFR_SMART_SPEED      0x0080
 
+#define I82580_ADDR_REG                   16
+#define I82580_CFG_REG                    22
+#define I82580_CFG_ASSERT_CRS_ON_TX       (1 << 15)
+#define I82580_CFG_ENABLE_DOWNSHIFT       (3 << 10) /* auto downshift 100/10 */
+#define I82580_CTRL_REG                   23
+#define I82580_CTRL_DOWNSHIFT_MASK        (7 << 10)
+
+/* 82580 specific PHY registers */
+#define I82580_PHY_CTRL_2            18
+#define I82580_PHY_LBK_CTRL          19
+#define I82580_PHY_STATUS_2          26
+#define I82580_PHY_DIAG_STATUS       31
+
+/* I82580 PHY Status 2 */
+#define I82580_PHY_STATUS2_REV_POLARITY   0x0400
+#define I82580_PHY_STATUS2_MDIX           0x0800
+#define I82580_PHY_STATUS2_SPEED_MASK     0x0300
+#define I82580_PHY_STATUS2_SPEED_1000MBPS 0x0200
+#define I82580_PHY_STATUS2_SPEED_100MBPS  0x0100
+
+/* I82580 PHY Control 2 */
+#define I82580_PHY_CTRL2_AUTO_MDIX        0x0400
+#define I82580_PHY_CTRL2_FORCE_MDI_MDIX   0x0200
+
+/* I82580 PHY Diagnostics Status */
+#define I82580_DSTATUS_CABLE_LENGTH       0x03FC
+#define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2
 /* Enable flexible speed on link-up */
 #define IGP02E1000_PM_D0_LPLU             0x0002 /* For D0a states */
 #define IGP02E1000_PM_D3_LPLU             0x0004 /* For all other states */
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index 345d1442d6d6..dd4e6ffd29f5 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -34,6 +34,7 @@
 #define E1000_EERD     0x00014  /* EEPROM Read - RW */
 #define E1000_CTRL_EXT 0x00018  /* Extended Device Control - RW */
 #define E1000_MDIC     0x00020  /* MDI Control - RW */
+#define E1000_MDICNFG  0x00E04  /* MDI Config - RW */
 #define E1000_SCTL     0x00024  /* SerDes Control - RW */
 #define E1000_FCAL     0x00028  /* Flow Control Address Low - RW */
 #define E1000_FCAH     0x0002C  /* Flow Control Address High -RW */
@@ -76,59 +77,20 @@
 #define E1000_FCRTV    0x02460  /* Flow Control Refresh Timer Value - RW */
 
 /* IEEE 1588 TIMESYNCH */
-#define E1000_TSYNCTXCTL 0x0B614
-#define E1000_TSYNCTXCTL_VALID (1<<0)
-#define E1000_TSYNCTXCTL_ENABLED (1<<4)
-#define E1000_TSYNCRXCTL 0x0B620
-#define E1000_TSYNCRXCTL_VALID (1<<0)
-#define E1000_TSYNCRXCTL_ENABLED (1<<4)
-enum {
-	E1000_TSYNCRXCTL_TYPE_L2_V2 = 0,
-	E1000_TSYNCRXCTL_TYPE_L4_V1 = (1<<1),
-	E1000_TSYNCRXCTL_TYPE_L2_L4_V2 = (1<<2),
-	E1000_TSYNCRXCTL_TYPE_ALL = (1<<3),
-	E1000_TSYNCRXCTL_TYPE_EVENT_V2 = (1<<3) | (1<<1),
-};
-#define E1000_TSYNCRXCFG 0x05F50
-enum {
-	E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE = 0<<0,
-	E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE = 1<<0,
-	E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE = 2<<0,
-	E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE = 3<<0,
-	E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE = 4<<0,
-
-	E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE = 0<<8,
-	E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE = 1<<8,
-	E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE = 2<<8,
-	E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE = 3<<8,
-	E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE = 8<<8,
-	E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE = 9<<8,
-	E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE = 0xA<<8,
-	E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE = 0xB<<8,
-	E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE = 0xC<<8,
-	E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE = 0xD<<8,
-};
-#define E1000_SYSTIML 0x0B600
-#define E1000_SYSTIMH 0x0B604
-#define E1000_TIMINCA 0x0B608
-
-#define E1000_RXMTRL     0x0B634
-#define E1000_RXSTMPL 0x0B624
-#define E1000_RXSTMPH 0x0B628
-#define E1000_RXSATRL 0x0B62C
-#define E1000_RXSATRH 0x0B630
-
-#define E1000_TXSTMPL 0x0B618
-#define E1000_TXSTMPH 0x0B61C
-
-#define E1000_ETQF0   0x05CB0
-#define E1000_ETQF1   0x05CB4
-#define E1000_ETQF2   0x05CB8
-#define E1000_ETQF3   0x05CBC
-#define E1000_ETQF4   0x05CC0
-#define E1000_ETQF5   0x05CC4
-#define E1000_ETQF6   0x05CC8
-#define E1000_ETQF7   0x05CCC
+#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
+#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
+#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
+#define E1000_RXSTMPL    0x0B624 /* Rx timestamp Low - RO */
+#define E1000_RXSTMPH    0x0B628 /* Rx timestamp High - RO */
+#define E1000_RXSATRL    0x0B62C /* Rx timestamp attribute low - RO */
+#define E1000_RXSATRH    0x0B630 /* Rx timestamp attribute high - RO */
+#define E1000_TXSTMPL    0x0B618 /* Tx timestamp value Low - RO */
+#define E1000_TXSTMPH    0x0B61C /* Tx timestamp value High - RO */
+#define E1000_SYSTIML    0x0B600 /* System time register Low - RO */
+#define E1000_SYSTIMH    0x0B604 /* System time register High - RO */
+#define E1000_TIMINCA    0x0B608 /* Increment attributes register - RW */
+#define E1000_TSAUXC     0x0B640 /* Timesync Auxiliary Control register */
+#define E1000_SYSTIMR    0x0B6F8 /* System time register Residue */
 
 /* Filtering Registers */
 #define E1000_SAQF(_n) (0x5980 + 4 * (_n))
@@ -143,7 +105,9 @@ enum {
 #define E1000_ETQF(_n)  (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
 
 #define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
+
 /* Split and Replication RX Control - RW */
+#define E1000_RXPBS    0x02404  /* Rx Packet Buffer Size - RW */
 /*
  * Convenience macros
  *
@@ -288,10 +252,17 @@ enum {
 #define E1000_MTA      0x05200  /* Multicast Table Array - RW Array */
 #define E1000_RA       0x05400  /* Receive Address - RW Array */
 #define E1000_RA2      0x054E0  /* 2nd half of receive address array - RW Array */
+#define E1000_PSRTYPE(_i)       (0x05480 + ((_i) * 4))
 #define E1000_RAL(_i)  (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
                                        (0x054E0 + ((_i - 16) * 8)))
 #define E1000_RAH(_i)  (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
                                        (0x054E4 + ((_i - 16) * 8)))
+#define E1000_IP4AT_REG(_i)     (0x05840 + ((_i) * 8))
+#define E1000_IP6AT_REG(_i)     (0x05880 + ((_i) * 4))
+#define E1000_WUPM_REG(_i)      (0x05A00 + ((_i) * 4))
+#define E1000_FFMT_REG(_i)      (0x09000 + ((_i) * 8))
+#define E1000_FFVT_REG(_i)      (0x09800 + ((_i) * 8))
+#define E1000_FFLT_REG(_i)      (0x05F00 + ((_i) * 8))
 #define E1000_VFTA     0x05600  /* VLAN Filter Table Array - RW Array */
 #define E1000_VT_CTL   0x0581C  /* VMDq Control - RW */
 #define E1000_WUC      0x05800  /* Wakeup Control - RW */
@@ -331,6 +302,7 @@ enum {
 #define E1000_QDE       0x02408 /* Queue Drop Enable - RW */
 #define E1000_DTXSWC    0x03500 /* DMA Tx Switch Control - RW */
 #define E1000_RPLOLR    0x05AF0 /* Replication Offload - RW */
+#define E1000_UTA       0x0A000 /* Unicast Table Array - RW */
 #define E1000_IOVTCL    0x05BBC /* IOV Control Register */
 /* These act per VF so an array friendly macro is used */
 #define E1000_P2VMAILBOX(_n)   (0x00C00 + (4 * (_n)))
@@ -348,4 +320,6 @@ enum {
 #define array_rd32(reg, offset) \
 	(readl(hw->hw_addr + reg + ((offset) << 2)))
 
+/* DMA Coalescing registers */
+#define E1000_PCIEMISC          0x05BB8 /* PCIE misc config register */
 #endif
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 7126fea26fec..b1c1eb88893f 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -55,12 +55,14 @@ struct igb_adapter;
 #define IGB_DEFAULT_ITR                    3 /* dynamic */
 #define IGB_MAX_ITR_USECS              10000
 #define IGB_MIN_ITR_USECS                 10
+#define NON_Q_VECTORS                      1
+#define MAX_Q_VECTORS                      8
 
 /* Transmit and receive queues */
-#define IGB_MAX_RX_QUEUES     (adapter->vfs_allocated_count ? \
-                               (adapter->vfs_allocated_count > 6 ? 1 : 2) : 4)
-#define IGB_MAX_TX_QUEUES     IGB_MAX_RX_QUEUES
-#define IGB_ABS_MAX_TX_QUEUES     4
+#define IGB_MAX_RX_QUEUES                  (adapter->vfs_allocated_count ? 2 : \
+                                           (hw->mac.type > e1000_82575 ? 8 : 4))
+#define IGB_ABS_MAX_TX_QUEUES              8
+#define IGB_MAX_TX_QUEUES                  IGB_MAX_RX_QUEUES
 
 #define IGB_MAX_VF_MC_ENTRIES              30
 #define IGB_MAX_VF_FUNCTIONS               8
@@ -71,9 +73,14 @@ struct vf_data_storage {
 	u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
 	u16 num_vf_mc_hashes;
 	u16 vlans_enabled;
-	bool clear_to_send;
+	u32 flags;
+	unsigned long last_nack;
 };
 
+#define IGB_VF_FLAG_CTS            0x00000001 /* VF is clear to send data */
+#define IGB_VF_FLAG_UNI_PROMISC    0x00000002 /* VF has unicast promisc */
+#define IGB_VF_FLAG_MULTI_PROMISC  0x00000004 /* VF has multicast promisc */
+
 /* RX descriptor control thresholds.
  * PTHRESH - MAC will consider prefetch if it has fewer than this number of
  *           descriptors available in its onboard memory.
@@ -85,17 +92,19 @@ struct vf_data_storage {
  *           descriptors until either it has this many to write back, or the
  *           ITR timer expires.
  */
-#define IGB_RX_PTHRESH                    16
+#define IGB_RX_PTHRESH                    (hw->mac.type <= e1000_82576 ? 16 : 8)
 #define IGB_RX_HTHRESH                     8
 #define IGB_RX_WTHRESH                     1
+#define IGB_TX_PTHRESH                     8
+#define IGB_TX_HTHRESH                     1
+#define IGB_TX_WTHRESH                     ((hw->mac.type == e1000_82576 && \
+                                             adapter->msix_entries) ? 0 : 16)
 
 /* this is the size past which hardware will drop packets when setting LPE=0 */
 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
 
 /* Supported Rx Buffer Sizes */
 #define IGB_RXBUFFER_128   128    /* Used for packet split */
-#define IGB_RXBUFFER_256   256    /* Used for packet split */
-#define IGB_RXBUFFER_512   512
 #define IGB_RXBUFFER_1024  1024
 #define IGB_RXBUFFER_2048  2048
 #define IGB_RXBUFFER_16384 16384
@@ -128,12 +137,13 @@ struct igb_buffer {
 			unsigned long time_stamp;
 			u16 length;
 			u16 next_to_watch;
+			u16 mapped_as_page;
 		};
 		/* RX */
 		struct {
 			struct page *page;
-			u64 page_dma;
-			unsigned int page_offset;
+			dma_addr_t page_dma;
+			u16 page_offset;
 		};
 	};
 };
@@ -141,36 +151,55 @@ struct igb_buffer {
 struct igb_tx_queue_stats {
 	u64 packets;
 	u64 bytes;
+	u64 restart_queue;
 };
 
 struct igb_rx_queue_stats {
 	u64 packets;
 	u64 bytes;
 	u64 drops;
+	u64 csum_err;
+	u64 alloc_failed;
 };
 
-struct igb_ring {
+struct igb_q_vector {
 	struct igb_adapter *adapter; /* backlink */
-	void *desc;                  /* descriptor ring memory */
-	dma_addr_t dma;              /* phys address of the ring */
-	unsigned int size;           /* length of desc. ring in bytes */
-	unsigned int count;          /* number of desc. in the ring */
-	u16 next_to_use;
-	u16 next_to_clean;
-	u16 head;
-	u16 tail;
-	struct igb_buffer *buffer_info; /* array of buffer info structs */
+	struct igb_ring *rx_ring;
+	struct igb_ring *tx_ring;
+	struct napi_struct napi;
 
 	u32 eims_value;
-	u32 itr_val;
-	u16 itr_register;
 	u16 cpu;
 
-	u16 queue_index;
-	u16 reg_idx;
+	u16 itr_val;
+	u8 set_itr;
+	u8 itr_shift;
+	void __iomem *itr_register;
+
+	char name[IFNAMSIZ + 9];
+};
+
+struct igb_ring {
+	struct igb_q_vector *q_vector; /* backlink to q_vector */
+	struct net_device *netdev;     /* back pointer to net_device */
+	struct pci_dev *pdev;          /* pci device for dma mapping */
+	dma_addr_t dma;                /* phys address of the ring */
+	void *desc;                    /* descriptor ring memory */
+	unsigned int size;             /* length of desc. ring in bytes */
+	u16 count;                     /* number of desc. in the ring */
+	u16 next_to_use;
+	u16 next_to_clean;
+	u8 queue_index;
+	u8 reg_idx;
+	void __iomem *head;
+	void __iomem *tail;
+	struct igb_buffer *buffer_info; /* array of buffer info structs */
+
 	unsigned int total_bytes;
 	unsigned int total_packets;
 
+	u32 flags;
+
 	union {
 		/* TX */
 		struct {
@@ -180,16 +209,18 @@ struct igb_ring {
 		/* RX */
 		struct {
 			struct igb_rx_queue_stats rx_stats;
-			u64 rx_queue_drops;
-			struct napi_struct napi;
-			int set_itr;
-			struct igb_ring *buddy;
+			u32 rx_buffer_len;
 		};
 	};
-
-	char name[IFNAMSIZ + 5];
 };
 
+#define IGB_RING_FLAG_RX_CSUM        0x00000001 /* RX CSUM enabled */
+#define IGB_RING_FLAG_RX_SCTP_CSUM   0x00000002 /* SCTP CSUM offload enabled */
+
+#define IGB_RING_FLAG_TX_CTX_IDX     0x00000001 /* HW requires context index */
+
+#define IGB_ADVTXD_DCMD (E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS)
+
 #define E1000_RX_DESC_ADV(R, i)	    \
 	(&(((union e1000_adv_rx_desc *)((R).desc))[i]))
 #define E1000_TX_DESC_ADV(R, i)	    \
@@ -197,6 +228,15 @@ struct igb_ring {
 #define E1000_TX_CTXTDESC_ADV(R, i)	    \
 	(&(((struct e1000_adv_tx_context_desc *)((R).desc))[i]))
 
+/* igb_desc_unused - calculate if we have unused descriptors */
+static inline int igb_desc_unused(struct igb_ring *ring)
+{
+	if (ring->next_to_clean > ring->next_to_use)
+		return ring->next_to_clean - ring->next_to_use - 1;
+
+	return ring->count + ring->next_to_clean - ring->next_to_use - 1;
+}
+
 /* board specific private data structure */
 
 struct igb_adapter {
@@ -205,18 +245,14 @@ struct igb_adapter {
 	struct vlan_group *vlgrp;
 	u16 mng_vlan_id;
 	u32 bd_number;
-	u32 rx_buffer_len;
 	u32 wol;
 	u32 en_mng_pt;
 	u16 link_speed;
 	u16 link_duplex;
-	unsigned int total_tx_bytes;
-	unsigned int total_tx_packets;
-	unsigned int total_rx_bytes;
-	unsigned int total_rx_packets;
+
 	/* Interrupt Throttle Rate */
-	u32 itr;
-	u32 itr_setting;
+	u32 rx_itr_setting;
+	u32 tx_itr_setting;
 	u16 tx_itr;
 	u16 rx_itr;
 
@@ -229,13 +265,7 @@ struct igb_adapter {
 
 	/* TX */
 	struct igb_ring *tx_ring;      /* One per active queue */
-	unsigned int restart_queue;
 	unsigned long tx_queue_len;
-	u32 txd_cmd;
-	u32 gotc;
-	u64 gotc_old;
-	u64 tpt_old;
-	u64 colc_old;
 	u32 tx_timeout_count;
 
 	/* RX */
@@ -243,20 +273,12 @@ struct igb_adapter {
 	int num_tx_queues;
 	int num_rx_queues;
 
-	u64 hw_csum_err;
-	u64 hw_csum_good;
-	u32 alloc_rx_buff_failed;
-	u32 gorc;
-	u64 gorc_old;
-	u16 rx_ps_hdr_size;
 	u32 max_frame_size;
 	u32 min_frame_size;
 
 	/* OS defined structs */
 	struct net_device *netdev;
-	struct napi_struct napi;
 	struct pci_dev *pdev;
-	struct net_device_stats net_stats;
 	struct cyclecounter cycles;
 	struct timecounter clock;
 	struct timecompare compare;
@@ -273,6 +295,9 @@ struct igb_adapter {
 	struct igb_ring test_rx_ring;
 
 	int msg_enable;
+
+	unsigned int num_q_vectors;
+	struct igb_q_vector *q_vector[MAX_Q_VECTORS];
 	struct msix_entry *msix_entries;
 	u32 eims_enable_mask;
 	u32 eims_other;
@@ -283,18 +308,20 @@ struct igb_adapter {
 	u32 eeprom_wol;
 
 	struct igb_ring *multi_tx_table[IGB_ABS_MAX_TX_QUEUES];
-	unsigned int tx_ring_count;
-	unsigned int rx_ring_count;
+	u16 tx_ring_count;
+	u16 rx_ring_count;
 	unsigned int vfs_allocated_count;
 	struct vf_data_storage *vf_data;
+	u32 rss_queues;
 };
 
 #define IGB_FLAG_HAS_MSI           (1 << 0)
 #define IGB_FLAG_DCA_ENABLED       (1 << 1)
 #define IGB_FLAG_QUAD_PORT_A       (1 << 2)
-#define IGB_FLAG_NEED_CTX_IDX      (1 << 3)
-#define IGB_FLAG_RX_CSUM_DISABLED  (1 << 4)
+#define IGB_FLAG_QUEUE_PAIRS       (1 << 3)
 
+#define IGB_82576_TSYNC_SHIFT 19
+#define IGB_82580_TSYNC_SHIFT 24
 enum e1000_state_t {
 	__IGB_TESTING,
 	__IGB_RESETTING,
@@ -314,10 +341,18 @@ extern void igb_down(struct igb_adapter *);
 extern void igb_reinit_locked(struct igb_adapter *);
 extern void igb_reset(struct igb_adapter *);
 extern int igb_set_spd_dplx(struct igb_adapter *, u16);
-extern int igb_setup_tx_resources(struct igb_adapter *, struct igb_ring *);
-extern int igb_setup_rx_resources(struct igb_adapter *, struct igb_ring *);
+extern int igb_setup_tx_resources(struct igb_ring *);
+extern int igb_setup_rx_resources(struct igb_ring *);
 extern void igb_free_tx_resources(struct igb_ring *);
 extern void igb_free_rx_resources(struct igb_ring *);
+extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
+extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
+extern void igb_setup_tctl(struct igb_adapter *);
+extern void igb_setup_rctl(struct igb_adapter *);
+extern netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, struct igb_ring *);
+extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
+					   struct igb_buffer *);
+extern void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
 extern void igb_update_stats(struct igb_adapter *);
 extern void igb_set_ethtool_ops(struct net_device *);
 
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index b243ed3b0c36..ac9d5272650d 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -44,78 +44,94 @@ struct igb_stats {
 	int stat_offset;
 };
 
-#define IGB_STAT(m) FIELD_SIZEOF(struct igb_adapter, m), \
-		      offsetof(struct igb_adapter, m)
+#define IGB_STAT(_name, _stat) { \
+	.stat_string = _name, \
+	.sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \
+	.stat_offset = offsetof(struct igb_adapter, _stat) \
+}
 static const struct igb_stats igb_gstrings_stats[] = {
-	{ "rx_packets", IGB_STAT(stats.gprc) },
-	{ "tx_packets", IGB_STAT(stats.gptc) },
-	{ "rx_bytes", IGB_STAT(stats.gorc) },
-	{ "tx_bytes", IGB_STAT(stats.gotc) },
-	{ "rx_broadcast", IGB_STAT(stats.bprc) },
-	{ "tx_broadcast", IGB_STAT(stats.bptc) },
-	{ "rx_multicast", IGB_STAT(stats.mprc) },
-	{ "tx_multicast", IGB_STAT(stats.mptc) },
-	{ "rx_errors", IGB_STAT(net_stats.rx_errors) },
-	{ "tx_errors", IGB_STAT(net_stats.tx_errors) },
-	{ "tx_dropped", IGB_STAT(net_stats.tx_dropped) },
-	{ "multicast", IGB_STAT(stats.mprc) },
-	{ "collisions", IGB_STAT(stats.colc) },
-	{ "rx_length_errors", IGB_STAT(net_stats.rx_length_errors) },
-	{ "rx_over_errors", IGB_STAT(net_stats.rx_over_errors) },
-	{ "rx_crc_errors", IGB_STAT(stats.crcerrs) },
-	{ "rx_frame_errors", IGB_STAT(net_stats.rx_frame_errors) },
-	{ "rx_no_buffer_count", IGB_STAT(stats.rnbc) },
-	{ "rx_queue_drop_packet_count", IGB_STAT(net_stats.rx_fifo_errors) },
-	{ "rx_missed_errors", IGB_STAT(stats.mpc) },
-	{ "tx_aborted_errors", IGB_STAT(stats.ecol) },
-	{ "tx_carrier_errors", IGB_STAT(stats.tncrs) },
-	{ "tx_fifo_errors", IGB_STAT(net_stats.tx_fifo_errors) },
-	{ "tx_heartbeat_errors", IGB_STAT(net_stats.tx_heartbeat_errors) },
-	{ "tx_window_errors", IGB_STAT(stats.latecol) },
-	{ "tx_abort_late_coll", IGB_STAT(stats.latecol) },
-	{ "tx_deferred_ok", IGB_STAT(stats.dc) },
-	{ "tx_single_coll_ok", IGB_STAT(stats.scc) },
-	{ "tx_multi_coll_ok", IGB_STAT(stats.mcc) },
-	{ "tx_timeout_count", IGB_STAT(tx_timeout_count) },
-	{ "tx_restart_queue", IGB_STAT(restart_queue) },
-	{ "rx_long_length_errors", IGB_STAT(stats.roc) },
-	{ "rx_short_length_errors", IGB_STAT(stats.ruc) },
-	{ "rx_align_errors", IGB_STAT(stats.algnerrc) },
-	{ "tx_tcp_seg_good", IGB_STAT(stats.tsctc) },
-	{ "tx_tcp_seg_failed", IGB_STAT(stats.tsctfc) },
-	{ "rx_flow_control_xon", IGB_STAT(stats.xonrxc) },
-	{ "rx_flow_control_xoff", IGB_STAT(stats.xoffrxc) },
-	{ "tx_flow_control_xon", IGB_STAT(stats.xontxc) },
-	{ "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) },
-	{ "rx_long_byte_count", IGB_STAT(stats.gorc) },
-	{ "rx_csum_offload_good", IGB_STAT(hw_csum_good) },
-	{ "rx_csum_offload_errors", IGB_STAT(hw_csum_err) },
-	{ "tx_dma_out_of_sync", IGB_STAT(stats.doosync) },
-	{ "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) },
-	{ "tx_smbus", IGB_STAT(stats.mgptc) },
-	{ "rx_smbus", IGB_STAT(stats.mgprc) },
-	{ "dropped_smbus", IGB_STAT(stats.mgpdc) },
+	IGB_STAT("rx_packets", stats.gprc),
+	IGB_STAT("tx_packets", stats.gptc),
+	IGB_STAT("rx_bytes", stats.gorc),
+	IGB_STAT("tx_bytes", stats.gotc),
+	IGB_STAT("rx_broadcast", stats.bprc),
+	IGB_STAT("tx_broadcast", stats.bptc),
+	IGB_STAT("rx_multicast", stats.mprc),
+	IGB_STAT("tx_multicast", stats.mptc),
+	IGB_STAT("multicast", stats.mprc),
+	IGB_STAT("collisions", stats.colc),
+	IGB_STAT("rx_crc_errors", stats.crcerrs),
+	IGB_STAT("rx_no_buffer_count", stats.rnbc),
+	IGB_STAT("rx_missed_errors", stats.mpc),
+	IGB_STAT("tx_aborted_errors", stats.ecol),
+	IGB_STAT("tx_carrier_errors", stats.tncrs),
+	IGB_STAT("tx_window_errors", stats.latecol),
+	IGB_STAT("tx_abort_late_coll", stats.latecol),
+	IGB_STAT("tx_deferred_ok", stats.dc),
+	IGB_STAT("tx_single_coll_ok", stats.scc),
+	IGB_STAT("tx_multi_coll_ok", stats.mcc),
+	IGB_STAT("tx_timeout_count", tx_timeout_count),
+	IGB_STAT("rx_long_length_errors", stats.roc),
+	IGB_STAT("rx_short_length_errors", stats.ruc),
+	IGB_STAT("rx_align_errors", stats.algnerrc),
+	IGB_STAT("tx_tcp_seg_good", stats.tsctc),
+	IGB_STAT("tx_tcp_seg_failed", stats.tsctfc),
+	IGB_STAT("rx_flow_control_xon", stats.xonrxc),
+	IGB_STAT("rx_flow_control_xoff", stats.xoffrxc),
+	IGB_STAT("tx_flow_control_xon", stats.xontxc),
+	IGB_STAT("tx_flow_control_xoff", stats.xofftxc),
+	IGB_STAT("rx_long_byte_count", stats.gorc),
+	IGB_STAT("tx_dma_out_of_sync", stats.doosync),
+	IGB_STAT("tx_smbus", stats.mgptc),
+	IGB_STAT("rx_smbus", stats.mgprc),
+	IGB_STAT("dropped_smbus", stats.mgpdc),
+};
+
+#define IGB_NETDEV_STAT(_net_stat) { \
+	.stat_string = __stringify(_net_stat), \
+	.sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
+	.stat_offset = offsetof(struct net_device_stats, _net_stat) \
+}
+static const struct igb_stats igb_gstrings_net_stats[] = {
+	IGB_NETDEV_STAT(rx_errors),
+	IGB_NETDEV_STAT(tx_errors),
+	IGB_NETDEV_STAT(tx_dropped),
+	IGB_NETDEV_STAT(rx_length_errors),
+	IGB_NETDEV_STAT(rx_over_errors),
+	IGB_NETDEV_STAT(rx_frame_errors),
+	IGB_NETDEV_STAT(rx_fifo_errors),
+	IGB_NETDEV_STAT(tx_fifo_errors),
+	IGB_NETDEV_STAT(tx_heartbeat_errors)
 };
 
-#define IGB_QUEUE_STATS_LEN \
-	(((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues)* \
-	  (sizeof(struct igb_rx_queue_stats) / sizeof(u64))) + \
-	 ((((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues) * \
-	  (sizeof(struct igb_tx_queue_stats) / sizeof(u64))))
 #define IGB_GLOBAL_STATS_LEN	\
-	sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)
-#define IGB_STATS_LEN (IGB_GLOBAL_STATS_LEN + IGB_QUEUE_STATS_LEN)
+	(sizeof(igb_gstrings_stats) / sizeof(struct igb_stats))
+#define IGB_NETDEV_STATS_LEN	\
+	(sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats))
+#define IGB_RX_QUEUE_STATS_LEN \
+	(sizeof(struct igb_rx_queue_stats) / sizeof(u64))
+#define IGB_TX_QUEUE_STATS_LEN \
+	(sizeof(struct igb_tx_queue_stats) / sizeof(u64))
+#define IGB_QUEUE_STATS_LEN \
+	((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \
+	  IGB_RX_QUEUE_STATS_LEN) + \
+	 (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \
+	  IGB_TX_QUEUE_STATS_LEN))
+#define IGB_STATS_LEN \
+	(IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN)
+
 static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
 	"Register test  (offline)", "Eeprom test    (offline)",
 	"Interrupt test (offline)", "Loopback test  (offline)",
 	"Link test   (on/offline)"
 };
-#define IGB_TEST_LEN sizeof(igb_gstrings_test) / ETH_GSTRING_LEN
+#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
 
 static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
+	u32 status;
 
 	if (hw->phy.media_type == e1000_media_type_copper) {
 
@@ -150,17 +166,20 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 
 	ecmd->transceiver = XCVR_INTERNAL;
 
-	if (rd32(E1000_STATUS) & E1000_STATUS_LU) {
+	status = rd32(E1000_STATUS);
 
-		adapter->hw.mac.ops.get_speed_and_duplex(hw,
-					&adapter->link_speed,
-					&adapter->link_duplex);
-		ecmd->speed = adapter->link_speed;
+	if (status & E1000_STATUS_LU) {
 
-		/* unfortunately FULL_DUPLEX != DUPLEX_FULL
-		 *          and HALF_DUPLEX != DUPLEX_HALF */
+		if ((status & E1000_STATUS_SPEED_1000) ||
+		    hw->phy.media_type != e1000_media_type_copper)
+			ecmd->speed = SPEED_1000;
+		else if (status & E1000_STATUS_SPEED_100)
+			ecmd->speed = SPEED_100;
+		else
+			ecmd->speed = SPEED_10;
 
-		if (adapter->link_duplex == FULL_DUPLEX)
+		if ((status & E1000_STATUS_FD) ||
+		    hw->phy.media_type != e1000_media_type_copper)
 			ecmd->duplex = DUPLEX_FULL;
 		else
 			ecmd->duplex = DUPLEX_HALF;
@@ -251,8 +270,9 @@ static int igb_set_pauseparam(struct net_device *netdev,
 		if (netif_running(adapter->netdev)) {
 			igb_down(adapter);
 			igb_up(adapter);
-		} else
+		} else {
 			igb_reset(adapter);
+		}
 	} else {
 		if (pause->rx_pause && pause->tx_pause)
 			hw->fc.requested_mode = e1000_fc_full;
@@ -276,17 +296,20 @@ static int igb_set_pauseparam(struct net_device *netdev,
 static u32 igb_get_rx_csum(struct net_device *netdev)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
-	return !(adapter->flags & IGB_FLAG_RX_CSUM_DISABLED);
+	return !!(adapter->rx_ring[0].flags & IGB_RING_FLAG_RX_CSUM);
 }
 
 static int igb_set_rx_csum(struct net_device *netdev, u32 data)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
+	int i;
 
-	if (data)
-		adapter->flags &= ~IGB_FLAG_RX_CSUM_DISABLED;
-	else
-		adapter->flags |= IGB_FLAG_RX_CSUM_DISABLED;
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		if (data)
+			adapter->rx_ring[i].flags |= IGB_RING_FLAG_RX_CSUM;
+		else
+			adapter->rx_ring[i].flags &= ~IGB_RING_FLAG_RX_CSUM;
+	}
 
 	return 0;
 }
@@ -302,7 +325,7 @@ static int igb_set_tx_csum(struct net_device *netdev, u32 data)
 
 	if (data) {
 		netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
-		if (adapter->hw.mac.type == e1000_82576)
+		if (adapter->hw.mac.type >= e1000_82576)
 			netdev->features |= NETIF_F_SCTP_CSUM;
 	} else {
 		netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -496,19 +519,10 @@ static void igb_get_regs(struct net_device *netdev,
 	regs_buff[119] = adapter->stats.scvpc;
 	regs_buff[120] = adapter->stats.hrmpc;
 
-	/* These should probably be added to e1000_regs.h instead */
-	#define E1000_PSRTYPE_REG(_i) (0x05480 + ((_i) * 4))
-	#define E1000_IP4AT_REG(_i)   (0x05840 + ((_i) * 8))
-	#define E1000_IP6AT_REG(_i)   (0x05880 + ((_i) * 4))
-	#define E1000_WUPM_REG(_i)    (0x05A00 + ((_i) * 4))
-	#define E1000_FFMT_REG(_i)    (0x09000 + ((_i) * 8))
-	#define E1000_FFVT_REG(_i)    (0x09800 + ((_i) * 8))
-	#define E1000_FFLT_REG(_i)    (0x05F00 + ((_i) * 8))
-
 	for (i = 0; i < 4; i++)
 		regs_buff[121 + i] = rd32(E1000_SRRCTL(i));
 	for (i = 0; i < 4; i++)
-		regs_buff[125 + i] = rd32(E1000_PSRTYPE_REG(i));
+		regs_buff[125 + i] = rd32(E1000_PSRTYPE(i));
 	for (i = 0; i < 4; i++)
 		regs_buff[129 + i] = rd32(E1000_RDBAL(i));
 	for (i = 0; i < 4; i++)
@@ -733,17 +747,17 @@ static int igb_set_ringparam(struct net_device *netdev,
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct igb_ring *temp_ring;
 	int i, err = 0;
-	u32 new_rx_count, new_tx_count;
+	u16 new_rx_count, new_tx_count;
 
 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
 		return -EINVAL;
 
-	new_rx_count = max(ring->rx_pending, (u32)IGB_MIN_RXD);
-	new_rx_count = min(new_rx_count, (u32)IGB_MAX_RXD);
+	new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD);
+	new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD);
 	new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
 
-	new_tx_count = max(ring->tx_pending, (u32)IGB_MIN_TXD);
-	new_tx_count = min(new_tx_count, (u32)IGB_MAX_TXD);
+	new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD);
+	new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD);
 	new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
 
 	if ((new_tx_count == adapter->tx_ring_count) &&
@@ -788,7 +802,7 @@ static int igb_set_ringparam(struct net_device *netdev,
 
 		for (i = 0; i < adapter->num_tx_queues; i++) {
 			temp_ring[i].count = new_tx_count;
-			err = igb_setup_tx_resources(adapter, &temp_ring[i]);
+			err = igb_setup_tx_resources(&temp_ring[i]);
 			if (err) {
 				while (i) {
 					i--;
@@ -813,7 +827,7 @@ static int igb_set_ringparam(struct net_device *netdev,
 
 		for (i = 0; i < adapter->num_rx_queues; i++) {
 			temp_ring[i].count = new_rx_count;
-			err = igb_setup_rx_resources(adapter, &temp_ring[i]);
+			err = igb_setup_rx_resources(&temp_ring[i]);
 			if (err) {
 				while (i) {
 					i--;
@@ -867,6 +881,49 @@ struct igb_reg_test {
 #define TABLE64_TEST_LO	5
 #define TABLE64_TEST_HI	6
 
+/* 82580 reg test */
+static struct igb_reg_test reg_test_82580[] = {
+	{ E1000_FCAL,	   0x100, 1,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ E1000_FCAH,	   0x100, 1,  PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+	{ E1000_FCT,	   0x100, 1,  PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+	{ E1000_VET,	   0x100, 1,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ E1000_RDBAL(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+	{ E1000_RDBAH(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ E1000_RDLEN(0),  0x100, 4,  PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+	{ E1000_RDBAL(4),  0x40,  4,  PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+	{ E1000_RDBAH(4),  0x40,  4,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ E1000_RDLEN(4),  0x40,  4,  PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+	/* RDH is read-only for 82580, only test RDT. */
+	{ E1000_RDT(0),	   0x100, 4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+	{ E1000_RDT(4),	   0x40,  4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+	{ E1000_FCRTH,	   0x100, 1,  PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+	{ E1000_FCTTV,	   0x100, 1,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+	{ E1000_TIPG,	   0x100, 1,  PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+	{ E1000_TDBAL(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+	{ E1000_TDBAH(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ E1000_TDLEN(0),  0x100, 4,  PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+	{ E1000_TDBAL(4),  0x40,  4,  PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+	{ E1000_TDBAH(4),  0x40,  4,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ E1000_TDLEN(4),  0x40,  4,  PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+	{ E1000_TDT(0),	   0x100, 4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+	{ E1000_TDT(4),	   0x40,  4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+	{ E1000_RCTL,	   0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+	{ E1000_RCTL, 	   0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+	{ E1000_RCTL, 	   0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+	{ E1000_TCTL,	   0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+	{ E1000_RA,	   0, 16, TABLE64_TEST_LO,
+						0xFFFFFFFF, 0xFFFFFFFF },
+	{ E1000_RA,	   0, 16, TABLE64_TEST_HI,
+						0x83FFFFFF, 0xFFFFFFFF },
+	{ E1000_RA2,	   0, 8, TABLE64_TEST_LO,
+						0xFFFFFFFF, 0xFFFFFFFF },
+	{ E1000_RA2,	   0, 8, TABLE64_TEST_HI,
+						0x83FFFFFF, 0xFFFFFFFF },
+	{ E1000_MTA,	   0, 128, TABLE32_TEST,
+						0xFFFFFFFF, 0xFFFFFFFF },
+	{ 0, 0, 0, 0 }
+};
+
 /* 82576 reg test */
 static struct igb_reg_test reg_test_82576[] = {
 	{ E1000_FCAL,	   0x100, 1,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -944,7 +1001,7 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
 {
 	struct e1000_hw *hw = &adapter->hw;
 	u32 pat, val;
-	u32 _test[] =
+	static const u32 _test[] =
 		{0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
 	for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
 		wr32(reg, (_test[pat] & write));
@@ -957,6 +1014,7 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
 			return 1;
 		}
 	}
+
 	return 0;
 }
 
@@ -974,6 +1032,7 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
 		*data = reg;
 		return 1;
 	}
+
 	return 0;
 }
 
@@ -996,14 +1055,18 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
 	u32 value, before, after;
 	u32 i, toggle;
 
-	toggle = 0x7FFFF3FF;
-
 	switch (adapter->hw.mac.type) {
+	case e1000_82580:
+		test = reg_test_82580;
+		toggle = 0x7FEFF3FF;
+		break;
 	case e1000_82576:
 		test = reg_test_82576;
+		toggle = 0x7FFFF3FF;
 		break;
 	default:
 		test = reg_test_82575;
+		toggle = 0x7FFFF3FF;
 		break;
 	}
 
@@ -1081,8 +1144,7 @@ static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
 	*data = 0;
 	/* Read and add up the contents of the EEPROM */
 	for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
-		if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp))
-		    < 0) {
+		if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) < 0) {
 			*data = 1;
 			break;
 		}
@@ -1098,8 +1160,7 @@ static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
 
 static irqreturn_t igb_test_intr(int irq, void *data)
 {
-	struct net_device *netdev = (struct net_device *) data;
-	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct igb_adapter *adapter = (struct igb_adapter *) data;
 	struct e1000_hw *hw = &adapter->hw;
 
 	adapter->test_icr |= rd32(E1000_ICR);
@@ -1117,38 +1178,45 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
 	*data = 0;
 
 	/* Hook up test interrupt handler just for this test */
-	if (adapter->msix_entries)
-		/* NOTE: we don't test MSI-X interrupts here, yet */
-		return 0;
-
-	if (adapter->flags & IGB_FLAG_HAS_MSI) {
+	if (adapter->msix_entries) {
+		if (request_irq(adapter->msix_entries[0].vector,
+		                igb_test_intr, 0, netdev->name, adapter)) {
+			*data = 1;
+			return -1;
+		}
+	} else if (adapter->flags & IGB_FLAG_HAS_MSI) {
 		shared_int = false;
-		if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) {
+		if (request_irq(irq,
+		                igb_test_intr, 0, netdev->name, adapter)) {
 			*data = 1;
 			return -1;
 		}
-	} else if (!request_irq(irq, &igb_test_intr, IRQF_PROBE_SHARED,
-				netdev->name, netdev)) {
+	} else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED,
+				netdev->name, adapter)) {
 		shared_int = false;
-	} else if (request_irq(irq, &igb_test_intr, IRQF_SHARED,
-		 netdev->name, netdev)) {
+	} else if (request_irq(irq, igb_test_intr, IRQF_SHARED,
+		 netdev->name, adapter)) {
 		*data = 1;
 		return -1;
 	}
 	dev_info(&adapter->pdev->dev, "testing %s interrupt\n",
 		(shared_int ? "shared" : "unshared"));
+
 	/* Disable all the interrupts */
-	wr32(E1000_IMC, 0xFFFFFFFF);
+	wr32(E1000_IMC, ~0);
 	msleep(10);
 
 	/* Define all writable bits for ICS */
-	switch(hw->mac.type) {
+	switch (hw->mac.type) {
 	case e1000_82575:
 		ics_mask = 0x37F47EDD;
 		break;
 	case e1000_82576:
 		ics_mask = 0x77D4FBFD;
 		break;
+	case e1000_82580:
+		ics_mask = 0x77DCFED5;
+		break;
 	default:
 		ics_mask = 0x7FFFFFFF;
 		break;
@@ -1232,190 +1300,61 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
 	msleep(10);
 
 	/* Unhook test interrupt handler */
-	free_irq(irq, netdev);
+	if (adapter->msix_entries)
+		free_irq(adapter->msix_entries[0].vector, adapter);
+	else
+		free_irq(irq, adapter);
 
 	return *data;
 }
 
 static void igb_free_desc_rings(struct igb_adapter *adapter)
 {
-	struct igb_ring *tx_ring = &adapter->test_tx_ring;
-	struct igb_ring *rx_ring = &adapter->test_rx_ring;
-	struct pci_dev *pdev = adapter->pdev;
-	int i;
-
-	if (tx_ring->desc && tx_ring->buffer_info) {
-		for (i = 0; i < tx_ring->count; i++) {
-			struct igb_buffer *buf = &(tx_ring->buffer_info[i]);
-			if (buf->dma)
-				pci_unmap_single(pdev, buf->dma, buf->length,
-						 PCI_DMA_TODEVICE);
-			if (buf->skb)
-				dev_kfree_skb(buf->skb);
-		}
-	}
-
-	if (rx_ring->desc && rx_ring->buffer_info) {
-		for (i = 0; i < rx_ring->count; i++) {
-			struct igb_buffer *buf = &(rx_ring->buffer_info[i]);
-			if (buf->dma)
-				pci_unmap_single(pdev, buf->dma,
-						 IGB_RXBUFFER_2048,
-						 PCI_DMA_FROMDEVICE);
-			if (buf->skb)
-				dev_kfree_skb(buf->skb);
-		}
-	}
-
-	if (tx_ring->desc) {
-		pci_free_consistent(pdev, tx_ring->size, tx_ring->desc,
-				    tx_ring->dma);
-		tx_ring->desc = NULL;
-	}
-	if (rx_ring->desc) {
-		pci_free_consistent(pdev, rx_ring->size, rx_ring->desc,
-				    rx_ring->dma);
-		rx_ring->desc = NULL;
-	}
-
-	kfree(tx_ring->buffer_info);
-	tx_ring->buffer_info = NULL;
-	kfree(rx_ring->buffer_info);
-	rx_ring->buffer_info = NULL;
-
-	return;
+	igb_free_tx_resources(&adapter->test_tx_ring);
+	igb_free_rx_resources(&adapter->test_rx_ring);
 }
 
 static int igb_setup_desc_rings(struct igb_adapter *adapter)
 {
-	struct e1000_hw *hw = &adapter->hw;
 	struct igb_ring *tx_ring = &adapter->test_tx_ring;
 	struct igb_ring *rx_ring = &adapter->test_rx_ring;
-	struct pci_dev *pdev = adapter->pdev;
-	struct igb_buffer *buffer_info;
-	u32 rctl;
-	int i, ret_val;
+	struct e1000_hw *hw = &adapter->hw;
+	int ret_val;
 
 	/* Setup Tx descriptor ring and Tx buffers */
+	tx_ring->count = IGB_DEFAULT_TXD;
+	tx_ring->pdev = adapter->pdev;
+	tx_ring->netdev = adapter->netdev;
+	tx_ring->reg_idx = adapter->vfs_allocated_count;
 
-	if (!tx_ring->count)
-		tx_ring->count = IGB_DEFAULT_TXD;
-
-	tx_ring->buffer_info = kcalloc(tx_ring->count,
-				       sizeof(struct igb_buffer),
-				       GFP_KERNEL);
-	if (!tx_ring->buffer_info) {
+	if (igb_setup_tx_resources(tx_ring)) {
 		ret_val = 1;
 		goto err_nomem;
 	}
 
-	tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
-	tx_ring->size = ALIGN(tx_ring->size, 4096);
-	tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
-					     &tx_ring->dma);
-	if (!tx_ring->desc) {
-		ret_val = 2;
-		goto err_nomem;
-	}
-	tx_ring->next_to_use = tx_ring->next_to_clean = 0;
-
-	wr32(E1000_TDBAL(0),
-			((u64) tx_ring->dma & 0x00000000FFFFFFFF));
-	wr32(E1000_TDBAH(0), ((u64) tx_ring->dma >> 32));
-	wr32(E1000_TDLEN(0),
-			tx_ring->count * sizeof(union e1000_adv_tx_desc));
-	wr32(E1000_TDH(0), 0);
-	wr32(E1000_TDT(0), 0);
-	wr32(E1000_TCTL,
-			E1000_TCTL_PSP | E1000_TCTL_EN |
-			E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
-			E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
-
-	for (i = 0; i < tx_ring->count; i++) {
-		union e1000_adv_tx_desc *tx_desc;
-		struct sk_buff *skb;
-		unsigned int size = 1024;
-
-		tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
-		skb = alloc_skb(size, GFP_KERNEL);
-		if (!skb) {
-			ret_val = 3;
-			goto err_nomem;
-		}
-		skb_put(skb, size);
-		buffer_info = &tx_ring->buffer_info[i];
-		buffer_info->skb = skb;
-		buffer_info->length = skb->len;
-		buffer_info->dma = pci_map_single(pdev, skb->data, skb->len,
-		                                  PCI_DMA_TODEVICE);
-		tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
-		tx_desc->read.olinfo_status = cpu_to_le32(skb->len) <<
-		                              E1000_ADVTXD_PAYLEN_SHIFT;
-		tx_desc->read.cmd_type_len = cpu_to_le32(skb->len);
-		tx_desc->read.cmd_type_len |= cpu_to_le32(E1000_TXD_CMD_EOP |
-		                                          E1000_TXD_CMD_IFCS |
-		                                          E1000_TXD_CMD_RS |
-		                                          E1000_ADVTXD_DTYP_DATA |
-		                                          E1000_ADVTXD_DCMD_DEXT);
-	}
+	igb_setup_tctl(adapter);
+	igb_configure_tx_ring(adapter, tx_ring);
 
 	/* Setup Rx descriptor ring and Rx buffers */
-
-	if (!rx_ring->count)
-		rx_ring->count = IGB_DEFAULT_RXD;
-
-	rx_ring->buffer_info = kcalloc(rx_ring->count,
-				       sizeof(struct igb_buffer),
-				       GFP_KERNEL);
-	if (!rx_ring->buffer_info) {
-		ret_val = 4;
+	rx_ring->count = IGB_DEFAULT_RXD;
+	rx_ring->pdev = adapter->pdev;
+	rx_ring->netdev = adapter->netdev;
+	rx_ring->rx_buffer_len = IGB_RXBUFFER_2048;
+	rx_ring->reg_idx = adapter->vfs_allocated_count;
+
+	if (igb_setup_rx_resources(rx_ring)) {
+		ret_val = 3;
 		goto err_nomem;
 	}
 
-	rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
-	rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
-					     &rx_ring->dma);
-	if (!rx_ring->desc) {
-		ret_val = 5;
-		goto err_nomem;
-	}
-	rx_ring->next_to_use = rx_ring->next_to_clean = 0;
+	/* set the default queue to queue 0 of PF */
+	wr32(E1000_MRQC, adapter->vfs_allocated_count << 3);
 
-	rctl = rd32(E1000_RCTL);
-	wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
-	wr32(E1000_RDBAL(0),
-			((u64) rx_ring->dma & 0xFFFFFFFF));
-	wr32(E1000_RDBAH(0),
-			((u64) rx_ring->dma >> 32));
-	wr32(E1000_RDLEN(0), rx_ring->size);
-	wr32(E1000_RDH(0), 0);
-	wr32(E1000_RDT(0), 0);
-	rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
-	rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
-		(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
-	wr32(E1000_RCTL, rctl);
-	wr32(E1000_SRRCTL(0), E1000_SRRCTL_DESCTYPE_ADV_ONEBUF);
-
-	for (i = 0; i < rx_ring->count; i++) {
-		union e1000_adv_rx_desc *rx_desc;
-		struct sk_buff *skb;
-
-		buffer_info = &rx_ring->buffer_info[i];
-		rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
-		skb = alloc_skb(IGB_RXBUFFER_2048 + NET_IP_ALIGN,
-				GFP_KERNEL);
-		if (!skb) {
-			ret_val = 6;
-			goto err_nomem;
-		}
-		skb_reserve(skb, NET_IP_ALIGN);
-		buffer_info->skb = skb;
-		buffer_info->dma = pci_map_single(pdev, skb->data,
-		                                  IGB_RXBUFFER_2048,
-		                                  PCI_DMA_FROMDEVICE);
-		rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
-		memset(skb->data, 0x00, skb->len);
-	}
+	/* enable receive ring */
+	igb_setup_rctl(adapter);
+	igb_configure_rx_ring(adapter, rx_ring);
+
+	igb_alloc_rx_buffers_adv(rx_ring, igb_desc_unused(rx_ring));
 
 	return 0;
 
@@ -1449,6 +1388,9 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
 		igb_write_phy_reg(hw, PHY_CONTROL, 0x9140);
 		/* autoneg off */
 		igb_write_phy_reg(hw, PHY_CONTROL, 0x8140);
+	} else if (hw->phy.type == e1000_phy_82580) {
+		/* enable MII loopback */
+		igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041);
 	}
 
 	ctrl_reg = rd32(E1000_CTRL);
@@ -1491,7 +1433,10 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
 	struct e1000_hw *hw = &adapter->hw;
 	u32 reg;
 
-	if (hw->phy.media_type == e1000_media_type_internal_serdes) {
+	reg = rd32(E1000_CTRL_EXT);
+
+	/* use CTRL_EXT to identify link type as SGMII can appear as copper */
+	if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) {
 		reg = rd32(E1000_RCTL);
 		reg |= E1000_RCTL_LBM_TCVR;
 		wr32(E1000_RCTL, reg);
@@ -1522,11 +1467,9 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
 		wr32(E1000_PCS_LCTL, reg);
 
 		return 0;
-	} else if (hw->phy.media_type == e1000_media_type_copper) {
-		return igb_set_phy_loopback(adapter);
 	}
 
-	return 7;
+	return igb_set_phy_loopback(adapter);
 }
 
 static void igb_loopback_cleanup(struct igb_adapter *adapter)
@@ -1552,35 +1495,99 @@ static void igb_create_lbtest_frame(struct sk_buff *skb,
 				    unsigned int frame_size)
 {
 	memset(skb->data, 0xFF, frame_size);
-	frame_size &= ~1;
-	memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
-	memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
-	memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+	frame_size /= 2;
+	memset(&skb->data[frame_size], 0xAA, frame_size - 1);
+	memset(&skb->data[frame_size + 10], 0xBE, 1);
+	memset(&skb->data[frame_size + 12], 0xAF, 1);
 }
 
 static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
 {
-	frame_size &= ~1;
-	if (*(skb->data + 3) == 0xFF)
-		if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
-		   (*(skb->data + frame_size / 2 + 12) == 0xAF))
+	frame_size /= 2;
+	if (*(skb->data + 3) == 0xFF) {
+		if ((*(skb->data + frame_size + 10) == 0xBE) &&
+		   (*(skb->data + frame_size + 12) == 0xAF)) {
 			return 0;
+		}
+	}
 	return 13;
 }
 
+static int igb_clean_test_rings(struct igb_ring *rx_ring,
+                                struct igb_ring *tx_ring,
+                                unsigned int size)
+{
+	union e1000_adv_rx_desc *rx_desc;
+	struct igb_buffer *buffer_info;
+	int rx_ntc, tx_ntc, count = 0;
+	u32 staterr;
+
+	/* initialize next to clean and descriptor values */
+	rx_ntc = rx_ring->next_to_clean;
+	tx_ntc = tx_ring->next_to_clean;
+	rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc);
+	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+	while (staterr & E1000_RXD_STAT_DD) {
+		/* check rx buffer */
+		buffer_info = &rx_ring->buffer_info[rx_ntc];
+
+		/* unmap rx buffer, will be remapped by alloc_rx_buffers */
+		pci_unmap_single(rx_ring->pdev,
+		                 buffer_info->dma,
+				 rx_ring->rx_buffer_len,
+				 PCI_DMA_FROMDEVICE);
+		buffer_info->dma = 0;
+
+		/* verify contents of skb */
+		if (!igb_check_lbtest_frame(buffer_info->skb, size))
+			count++;
+
+		/* unmap buffer on tx side */
+		buffer_info = &tx_ring->buffer_info[tx_ntc];
+		igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
+
+		/* increment rx/tx next to clean counters */
+		rx_ntc++;
+		if (rx_ntc == rx_ring->count)
+			rx_ntc = 0;
+		tx_ntc++;
+		if (tx_ntc == tx_ring->count)
+			tx_ntc = 0;
+
+		/* fetch next descriptor */
+		rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc);
+		staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+	}
+
+	/* re-map buffers to ring, store next to clean values */
+	igb_alloc_rx_buffers_adv(rx_ring, count);
+	rx_ring->next_to_clean = rx_ntc;
+	tx_ring->next_to_clean = tx_ntc;
+
+	return count;
+}
+
 static int igb_run_loopback_test(struct igb_adapter *adapter)
 {
-	struct e1000_hw *hw = &adapter->hw;
 	struct igb_ring *tx_ring = &adapter->test_tx_ring;
 	struct igb_ring *rx_ring = &adapter->test_rx_ring;
-	struct pci_dev *pdev = adapter->pdev;
-	int i, j, k, l, lc, good_cnt;
-	int ret_val = 0;
-	unsigned long time;
+	int i, j, lc, good_cnt, ret_val = 0;
+	unsigned int size = 1024;
+	netdev_tx_t tx_ret_val;
+	struct sk_buff *skb;
 
-	wr32(E1000_RDT(0), rx_ring->count - 1);
+	/* allocate test skb */
+	skb = alloc_skb(size, GFP_KERNEL);
+	if (!skb)
+		return 11;
 
-	/* Calculate the loop count based on the largest descriptor ring
+	/* place data into test skb */
+	igb_create_lbtest_frame(skb, size);
+	skb_put(skb, size);
+
+	/*
+	 * Calculate the loop count based on the largest descriptor ring
 	 * The idea is to wrap the largest ring a number of times using 64
 	 * send/receive pairs during each loop
 	 */
@@ -1590,50 +1597,36 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
 	else
 		lc = ((rx_ring->count / 64) * 2) + 1;
 
-	k = l = 0;
 	for (j = 0; j <= lc; j++) { /* loop count loop */
-		for (i = 0; i < 64; i++) { /* send the packets */
-			igb_create_lbtest_frame(tx_ring->buffer_info[k].skb,
-						1024);
-			pci_dma_sync_single_for_device(pdev,
-				tx_ring->buffer_info[k].dma,
-				tx_ring->buffer_info[k].length,
-				PCI_DMA_TODEVICE);
-			k++;
-			if (k == tx_ring->count)
-				k = 0;
-		}
-		wr32(E1000_TDT(0), k);
-		msleep(200);
-		time = jiffies; /* set the start time for the receive */
+		/* reset count of good packets */
 		good_cnt = 0;
-		do { /* receive the sent packets */
-			pci_dma_sync_single_for_cpu(pdev,
-					rx_ring->buffer_info[l].dma,
-					IGB_RXBUFFER_2048,
-					PCI_DMA_FROMDEVICE);
-
-			ret_val = igb_check_lbtest_frame(
-					     rx_ring->buffer_info[l].skb, 1024);
-			if (!ret_val)
+
+		/* place 64 packets on the transmit queue*/
+		for (i = 0; i < 64; i++) {
+			skb_get(skb);
+			tx_ret_val = igb_xmit_frame_ring_adv(skb, tx_ring);
+			if (tx_ret_val == NETDEV_TX_OK)
 				good_cnt++;
-			l++;
-			if (l == rx_ring->count)
-				l = 0;
-			/* time + 20 msecs (200 msecs on 2.4) is more than
-			 * enough time to complete the receives, if it's
-			 * exceeded, break and error off
-			 */
-		} while (good_cnt < 64 && jiffies < (time + 20));
+		}
+
 		if (good_cnt != 64) {
-			ret_val = 13; /* ret_val is the same as mis-compare */
+			ret_val = 12;
 			break;
 		}
-		if (jiffies >= (time + 20)) {
-			ret_val = 14; /* error code for time out error */
+
+		/* allow 200 milliseconds for packets to go from tx to rx */
+		msleep(200);
+
+		good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size);
+		if (good_cnt != 64) {
+			ret_val = 13;
 			break;
 		}
 	} /* end loop count loop */
+
+	/* free the original skb */
+	kfree_skb(skb);
+
 	return ret_val;
 }
 
@@ -1686,8 +1679,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
 		if (hw->mac.autoneg)
 			msleep(4000);
 
-		if (!(rd32(E1000_STATUS) &
-		      E1000_STATUS_LU))
+		if (!(rd32(E1000_STATUS) & E1000_STATUS_LU))
 			*data = 1;
 	}
 	return *data;
@@ -1869,7 +1861,6 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 		adapter->wol |= E1000_WUFC_BC;
 	if (wol->wolopts & WAKE_MAGIC)
 		adapter->wol |= E1000_WUFC_MAG;
-
 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
 
 	return 0;
@@ -1882,12 +1873,19 @@ static int igb_phys_id(struct net_device *netdev, u32 data)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
+	unsigned long timeout;
 
-	if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
-		data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
+	timeout = data * 1000;
+
+	/*
+	 *  msleep_interruptable only accepts unsigned int so we are limited
+	 * in how long a duration we can wait
+	 */
+	if (!timeout || timeout > UINT_MAX)
+		timeout = UINT_MAX;
 
 	igb_blink_led(hw);
-	msleep_interruptible(data * 1000);
+	msleep_interruptible(timeout);
 
 	igb_led_off(hw);
 	clear_bit(IGB_LED_ON, &adapter->led_status);
@@ -1900,7 +1898,6 @@ static int igb_set_coalesce(struct net_device *netdev,
 			    struct ethtool_coalesce *ec)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
-	struct e1000_hw *hw = &adapter->hw;
 	int i;
 
 	if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
@@ -1909,17 +1906,39 @@ static int igb_set_coalesce(struct net_device *netdev,
 	    (ec->rx_coalesce_usecs == 2))
 		return -EINVAL;
 
+	if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
+	    ((ec->tx_coalesce_usecs > 3) &&
+	     (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
+	    (ec->tx_coalesce_usecs == 2))
+		return -EINVAL;
+
+	if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
+		return -EINVAL;
+
 	/* convert to rate of irq's per second */
-	if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
-		adapter->itr_setting = ec->rx_coalesce_usecs;
-		adapter->itr = IGB_START_ITR;
-	} else {
-		adapter->itr_setting = ec->rx_coalesce_usecs << 2;
-		adapter->itr = adapter->itr_setting;
-	}
+	if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
+		adapter->rx_itr_setting = ec->rx_coalesce_usecs;
+	else
+		adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
 
-	for (i = 0; i < adapter->num_rx_queues; i++)
-		wr32(adapter->rx_ring[i].itr_register, adapter->itr);
+	/* convert to rate of irq's per second */
+	if (adapter->flags & IGB_FLAG_QUEUE_PAIRS)
+		adapter->tx_itr_setting = adapter->rx_itr_setting;
+	else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3)
+		adapter->tx_itr_setting = ec->tx_coalesce_usecs;
+	else
+		adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
+
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[i];
+		if (q_vector->rx_ring)
+			q_vector->itr_val = adapter->rx_itr_setting;
+		else
+			q_vector->itr_val = adapter->tx_itr_setting;
+		if (q_vector->itr_val && q_vector->itr_val <= 3)
+			q_vector->itr_val = IGB_START_ITR;
+		q_vector->set_itr = 1;
+	}
 
 	return 0;
 }
@@ -1929,15 +1948,21 @@ static int igb_get_coalesce(struct net_device *netdev,
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
 
-	if (adapter->itr_setting <= 3)
-		ec->rx_coalesce_usecs = adapter->itr_setting;
+	if (adapter->rx_itr_setting <= 3)
+		ec->rx_coalesce_usecs = adapter->rx_itr_setting;
 	else
-		ec->rx_coalesce_usecs = adapter->itr_setting >> 2;
+		ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
+
+	if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) {
+		if (adapter->tx_itr_setting <= 3)
+			ec->tx_coalesce_usecs = adapter->tx_itr_setting;
+		else
+			ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
+	}
 
 	return 0;
 }
 
-
 static int igb_nway_reset(struct net_device *netdev)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
@@ -1962,31 +1987,32 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
 				  struct ethtool_stats *stats, u64 *data)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct net_device_stats *net_stats = &netdev->stats;
 	u64 *queue_stat;
-	int stat_count_tx = sizeof(struct igb_tx_queue_stats) / sizeof(u64);
-	int stat_count_rx = sizeof(struct igb_rx_queue_stats) / sizeof(u64);
-	int j;
-	int i;
+	int i, j, k;
+	char *p;
 
 	igb_update_stats(adapter);
+
 	for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
-		char *p = (char *)adapter+igb_gstrings_stats[i].stat_offset;
+		p = (char *)adapter + igb_gstrings_stats[i].stat_offset;
 		data[i] = (igb_gstrings_stats[i].sizeof_stat ==
 			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
 	}
+	for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) {
+		p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset;
+		data[i] = (igb_gstrings_net_stats[j].sizeof_stat ==
+			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+	}
 	for (j = 0; j < adapter->num_tx_queues; j++) {
-		int k;
 		queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats;
-		for (k = 0; k < stat_count_tx; k++)
-			data[i + k] = queue_stat[k];
-		i += k;
+		for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++)
+			data[i] = queue_stat[k];
 	}
 	for (j = 0; j < adapter->num_rx_queues; j++) {
-		int k;
 		queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats;
-		for (k = 0; k < stat_count_rx; k++)
-			data[i + k] = queue_stat[k];
-		i += k;
+		for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++)
+			data[i] = queue_stat[k];
 	}
 }
 
@@ -2007,11 +2033,18 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
 			       ETH_GSTRING_LEN);
 			p += ETH_GSTRING_LEN;
 		}
+		for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) {
+			memcpy(p, igb_gstrings_net_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
 		for (i = 0; i < adapter->num_tx_queues; i++) {
 			sprintf(p, "tx_queue_%u_packets", i);
 			p += ETH_GSTRING_LEN;
 			sprintf(p, "tx_queue_%u_bytes", i);
 			p += ETH_GSTRING_LEN;
+			sprintf(p, "tx_queue_%u_restart", i);
+			p += ETH_GSTRING_LEN;
 		}
 		for (i = 0; i < adapter->num_rx_queues; i++) {
 			sprintf(p, "rx_queue_%u_packets", i);
@@ -2020,6 +2053,10 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
 			p += ETH_GSTRING_LEN;
 			sprintf(p, "rx_queue_%u_drops", i);
 			p += ETH_GSTRING_LEN;
+			sprintf(p, "rx_queue_%u_csum_err", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "rx_queue_%u_alloc_failed", i);
+			p += ETH_GSTRING_LEN;
 		}
 /*		BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
 		break;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 714c3a4a44ef..16349ba68736 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -49,7 +49,7 @@
 #endif
 #include "igb.h"
 
-#define DRV_VERSION "1.3.16-k2"
+#define DRV_VERSION "2.1.0-k2"
 char igb_driver_name[] = "igb";
 char igb_driver_version[] = DRV_VERSION;
 static const char igb_driver_string[] =
@@ -61,8 +61,14 @@ static const struct e1000_info *igb_info_tbl[] = {
 };
 
 static struct pci_device_id igb_pci_tbl[] = {
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
@@ -81,6 +87,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *);
 static int igb_setup_all_rx_resources(struct igb_adapter *);
 static void igb_free_all_tx_resources(struct igb_adapter *);
 static void igb_free_all_rx_resources(struct igb_adapter *);
+static void igb_setup_mrqc(struct igb_adapter *);
 void igb_update_stats(struct igb_adapter *);
 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
 static void __devexit igb_remove(struct pci_dev *pdev);
@@ -89,7 +96,6 @@ static int igb_open(struct net_device *);
 static int igb_close(struct net_device *);
 static void igb_configure_tx(struct igb_adapter *);
 static void igb_configure_rx(struct igb_adapter *);
-static void igb_setup_rctl(struct igb_adapter *);
 static void igb_clean_all_tx_rings(struct igb_adapter *);
 static void igb_clean_all_rx_rings(struct igb_adapter *);
 static void igb_clean_tx_ring(struct igb_ring *);
@@ -98,28 +104,22 @@ static void igb_set_rx_mode(struct net_device *);
 static void igb_update_phy_info(unsigned long);
 static void igb_watchdog(unsigned long);
 static void igb_watchdog_task(struct work_struct *);
-static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *,
-					   struct net_device *,
-					   struct igb_ring *);
-static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
-				      struct net_device *);
+static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
 static struct net_device_stats *igb_get_stats(struct net_device *);
 static int igb_change_mtu(struct net_device *, int);
 static int igb_set_mac(struct net_device *, void *);
+static void igb_set_uta(struct igb_adapter *adapter);
 static irqreturn_t igb_intr(int irq, void *);
 static irqreturn_t igb_intr_msi(int irq, void *);
 static irqreturn_t igb_msix_other(int irq, void *);
-static irqreturn_t igb_msix_rx(int irq, void *);
-static irqreturn_t igb_msix_tx(int irq, void *);
+static irqreturn_t igb_msix_ring(int irq, void *);
 #ifdef CONFIG_IGB_DCA
-static void igb_update_rx_dca(struct igb_ring *);
-static void igb_update_tx_dca(struct igb_ring *);
+static void igb_update_dca(struct igb_q_vector *);
 static void igb_setup_dca(struct igb_adapter *);
 #endif /* CONFIG_IGB_DCA */
-static bool igb_clean_tx_irq(struct igb_ring *);
+static bool igb_clean_tx_irq(struct igb_q_vector *);
 static int igb_poll(struct napi_struct *, int);
-static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int);
-static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
+static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
 static void igb_tx_timeout(struct net_device *);
 static void igb_reset_task(struct work_struct *);
@@ -127,57 +127,13 @@ static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
 static void igb_vlan_rx_add_vid(struct net_device *, u16);
 static void igb_vlan_rx_kill_vid(struct net_device *, u16);
 static void igb_restore_vlan(struct igb_adapter *);
+static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
 static void igb_ping_all_vfs(struct igb_adapter *);
 static void igb_msg_task(struct igb_adapter *);
-static int igb_rcv_msg_from_vf(struct igb_adapter *, u32);
-static inline void igb_set_rah_pool(struct e1000_hw *, int , int);
 static void igb_vmm_control(struct igb_adapter *);
-static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *);
+static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
 static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
 
-static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn)
-{
-	u32 reg_data;
-
-	reg_data = rd32(E1000_VMOLR(vfn));
-	reg_data |= E1000_VMOLR_BAM |	 /* Accept broadcast */
-	            E1000_VMOLR_ROPE |   /* Accept packets matched in UTA */
-	            E1000_VMOLR_ROMPE |  /* Accept packets matched in MTA */
-	            E1000_VMOLR_AUPE |   /* Accept untagged packets */
-	            E1000_VMOLR_STRVLAN; /* Strip vlan tags */
-	wr32(E1000_VMOLR(vfn), reg_data);
-}
-
-static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
-                                 int vfn)
-{
-	struct e1000_hw *hw = &adapter->hw;
-	u32 vmolr;
-
-	/* if it isn't the PF check to see if VFs are enabled and
-	 * increase the size to support vlan tags */
-	if (vfn < adapter->vfs_allocated_count &&
-	    adapter->vf_data[vfn].vlans_enabled)
-		size += VLAN_TAG_SIZE;
-
-	vmolr = rd32(E1000_VMOLR(vfn));
-	vmolr &= ~E1000_VMOLR_RLPML_MASK;
-	vmolr |= size | E1000_VMOLR_LPE;
-	wr32(E1000_VMOLR(vfn), vmolr);
-
-	return 0;
-}
-
-static inline void igb_set_rah_pool(struct e1000_hw *hw, int pool, int entry)
-{
-	u32 reg_data;
-
-	reg_data = rd32(E1000_RAH(entry));
-	reg_data &= ~E1000_RAH_POOL_MASK;
-	reg_data |= E1000_RAH_POOL_1 << pool;;
-	wr32(E1000_RAH(entry), reg_data);
-}
-
 #ifdef CONFIG_PM
 static int igb_suspend(struct pci_dev *, pm_message_t);
 static int igb_resume(struct pci_dev *);
@@ -228,46 +184,12 @@ static struct pci_driver igb_driver = {
 	.err_handler = &igb_err_handler
 };
 
-static int global_quad_port_a; /* global quad port a indication */
-
 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
 /**
- * Scale the NIC clock cycle by a large factor so that
- * relatively small clock corrections can be added or
- * substracted at each clock tick. The drawbacks of a
- * large factor are a) that the clock register overflows
- * more quickly (not such a big deal) and b) that the
- * increment per tick has to fit into 24 bits.
- *
- * Note that
- *   TIMINCA = IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS *
- *             IGB_TSYNC_SCALE
- *   TIMINCA += TIMINCA * adjustment [ppm] / 1e9
- *
- * The base scale factor is intentionally a power of two
- * so that the division in %struct timecounter can be done with
- * a shift.
- */
-#define IGB_TSYNC_SHIFT (19)
-#define IGB_TSYNC_SCALE (1<<IGB_TSYNC_SHIFT)
-
-/**
- * The duration of one clock cycle of the NIC.
- *
- * @todo This hard-coded value is part of the specification and might change
- * in future hardware revisions. Add revision check.
- */
-#define IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS 16
-
-#if (IGB_TSYNC_SCALE * IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS) >= (1<<24)
-# error IGB_TSYNC_SCALE and/or IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS are too large to fit into TIMINCA
-#endif
-
-/**
  * igb_read_clock - read raw cycle counter (to be used by time counter)
  */
 static cycle_t igb_read_clock(const struct cyclecounter *tc)
@@ -275,11 +197,21 @@ static cycle_t igb_read_clock(const struct cyclecounter *tc)
 	struct igb_adapter *adapter =
 		container_of(tc, struct igb_adapter, cycles);
 	struct e1000_hw *hw = &adapter->hw;
-	u64 stamp;
+	u64 stamp = 0;
+	int shift = 0;
 
-	stamp =  rd32(E1000_SYSTIML);
-	stamp |= (u64)rd32(E1000_SYSTIMH) << 32ULL;
+	/*
+	 * The timestamp latches on lowest register read. For the 82580
+	 * the lowest register is SYSTIMR instead of SYSTIML.  However we never
+	 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
+	 */
+	if (hw->mac.type == e1000_82580) {
+		stamp = rd32(E1000_SYSTIMR) >> 8;
+		shift = IGB_82580_TSYNC_SHIFT;
+	}
 
+	stamp |= (u64)rd32(E1000_SYSTIML) << shift;
+	stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
 	return stamp;
 }
 
@@ -320,17 +252,6 @@ static char *igb_get_time_str(struct igb_adapter *adapter,
 #endif
 
 /**
- * igb_desc_unused - calculate if we have unused descriptors
- **/
-static int igb_desc_unused(struct igb_ring *ring)
-{
-	if (ring->next_to_clean > ring->next_to_use)
-		return ring->next_to_clean - ring->next_to_use - 1;
-
-	return ring->count + ring->next_to_clean - ring->next_to_use - 1;
-}
-
-/**
  * igb_init_module - Driver Registration Routine
  *
  * igb_init_module is the first routine called when the driver is
@@ -344,12 +265,9 @@ static int __init igb_init_module(void)
 
 	printk(KERN_INFO "%s\n", igb_copyright);
 
-	global_quad_port_a = 0;
-
 #ifdef CONFIG_IGB_DCA
 	dca_register_notify(&dca_notifier);
 #endif
-
 	ret = pci_register_driver(&igb_driver);
 	return ret;
 }
@@ -382,8 +300,8 @@ module_exit(igb_exit_module);
  **/
 static void igb_cache_ring_register(struct igb_adapter *adapter)
 {
-	int i;
-	unsigned int rbase_offset = adapter->vfs_allocated_count;
+	int i = 0, j = 0;
+	u32 rbase_offset = adapter->vfs_allocated_count;
 
 	switch (adapter->hw.mac.type) {
 	case e1000_82576:
@@ -392,23 +310,37 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
 		 * In order to avoid collision we start at the first free queue
 		 * and continue consuming queues in the same sequence
 		 */
-		for (i = 0; i < adapter->num_rx_queues; i++)
-			adapter->rx_ring[i].reg_idx = rbase_offset +
-			                              Q_IDX_82576(i);
-		for (i = 0; i < adapter->num_tx_queues; i++)
-			adapter->tx_ring[i].reg_idx = rbase_offset +
-			                              Q_IDX_82576(i);
-		break;
+		if (adapter->vfs_allocated_count) {
+			for (; i < adapter->rss_queues; i++)
+				adapter->rx_ring[i].reg_idx = rbase_offset +
+				                              Q_IDX_82576(i);
+			for (; j < adapter->rss_queues; j++)
+				adapter->tx_ring[j].reg_idx = rbase_offset +
+				                              Q_IDX_82576(j);
+		}
 	case e1000_82575:
+	case e1000_82580:
 	default:
-		for (i = 0; i < adapter->num_rx_queues; i++)
-			adapter->rx_ring[i].reg_idx = i;
-		for (i = 0; i < adapter->num_tx_queues; i++)
-			adapter->tx_ring[i].reg_idx = i;
+		for (; i < adapter->num_rx_queues; i++)
+			adapter->rx_ring[i].reg_idx = rbase_offset + i;
+		for (; j < adapter->num_tx_queues; j++)
+			adapter->tx_ring[j].reg_idx = rbase_offset + j;
 		break;
 	}
 }
 
+static void igb_free_queues(struct igb_adapter *adapter)
+{
+	kfree(adapter->tx_ring);
+	kfree(adapter->rx_ring);
+
+	adapter->tx_ring = NULL;
+	adapter->rx_ring = NULL;
+
+	adapter->num_rx_queues = 0;
+	adapter->num_tx_queues = 0;
+}
+
 /**
  * igb_alloc_queues - Allocate memory for all rings
  * @adapter: board private structure to initialize
@@ -423,59 +355,61 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
 	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
 				   sizeof(struct igb_ring), GFP_KERNEL);
 	if (!adapter->tx_ring)
-		return -ENOMEM;
+		goto err;
 
 	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
 				   sizeof(struct igb_ring), GFP_KERNEL);
-	if (!adapter->rx_ring) {
-		kfree(adapter->tx_ring);
-		return -ENOMEM;
-	}
-
-	adapter->rx_ring->buddy = adapter->tx_ring;
+	if (!adapter->rx_ring)
+		goto err;
 
 	for (i = 0; i < adapter->num_tx_queues; i++) {
 		struct igb_ring *ring = &(adapter->tx_ring[i]);
 		ring->count = adapter->tx_ring_count;
-		ring->adapter = adapter;
 		ring->queue_index = i;
+		ring->pdev = adapter->pdev;
+		ring->netdev = adapter->netdev;
+		/* For 82575, context index must be unique per ring. */
+		if (adapter->hw.mac.type == e1000_82575)
+			ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
 	}
+
 	for (i = 0; i < adapter->num_rx_queues; i++) {
 		struct igb_ring *ring = &(adapter->rx_ring[i]);
 		ring->count = adapter->rx_ring_count;
-		ring->adapter = adapter;
 		ring->queue_index = i;
-		ring->itr_register = E1000_ITR;
-
-		/* set a default napi handler for each rx_ring */
-		netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64);
+		ring->pdev = adapter->pdev;
+		ring->netdev = adapter->netdev;
+		ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+		ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
+		/* set flag indicating ring supports SCTP checksum offload */
+		if (adapter->hw.mac.type >= e1000_82576)
+			ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
 	}
 
 	igb_cache_ring_register(adapter);
-	return 0;
-}
-
-static void igb_free_queues(struct igb_adapter *adapter)
-{
-	int i;
 
-	for (i = 0; i < adapter->num_rx_queues; i++)
-		netif_napi_del(&adapter->rx_ring[i].napi);
+	return 0;
 
-	adapter->num_rx_queues = 0;
-	adapter->num_tx_queues = 0;
+err:
+	igb_free_queues(adapter);
 
-	kfree(adapter->tx_ring);
-	kfree(adapter->rx_ring);
+	return -ENOMEM;
 }
 
 #define IGB_N0_QUEUE -1
-static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
-			      int tx_queue, int msix_vector)
+static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
 {
 	u32 msixbm = 0;
+	struct igb_adapter *adapter = q_vector->adapter;
 	struct e1000_hw *hw = &adapter->hw;
 	u32 ivar, index;
+	int rx_queue = IGB_N0_QUEUE;
+	int tx_queue = IGB_N0_QUEUE;
+
+	if (q_vector->rx_ring)
+		rx_queue = q_vector->rx_ring->reg_idx;
+	if (q_vector->tx_ring)
+		tx_queue = q_vector->tx_ring->reg_idx;
 
 	switch (hw->mac.type) {
 	case e1000_82575:
@@ -483,16 +417,12 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
 		   bitmask for the EICR/EIMS/EIMC registers.  To assign one
 		   or more queues to a vector, we write the appropriate bits
 		   into the MSIXBM register for that vector. */
-		if (rx_queue > IGB_N0_QUEUE) {
+		if (rx_queue > IGB_N0_QUEUE)
 			msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
-			adapter->rx_ring[rx_queue].eims_value = msixbm;
-		}
-		if (tx_queue > IGB_N0_QUEUE) {
+		if (tx_queue > IGB_N0_QUEUE)
 			msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
-			adapter->tx_ring[tx_queue].eims_value =
-				  E1000_EICR_TX_QUEUE0 << tx_queue;
-		}
 		array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
+		q_vector->eims_value = msixbm;
 		break;
 	case e1000_82576:
 		/* 82576 uses a table-based method for assigning vectors.
@@ -500,7 +430,40 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
 		   a vector number along with a "valid" bit.  Sadly, the layout
 		   of the table is somewhat counterintuitive. */
 		if (rx_queue > IGB_N0_QUEUE) {
-			index = (rx_queue >> 1) + adapter->vfs_allocated_count;
+			index = (rx_queue & 0x7);
+			ivar = array_rd32(E1000_IVAR0, index);
+			if (rx_queue < 8) {
+				/* vector goes into low byte of register */
+				ivar = ivar & 0xFFFFFF00;
+				ivar |= msix_vector | E1000_IVAR_VALID;
+			} else {
+				/* vector goes into third byte of register */
+				ivar = ivar & 0xFF00FFFF;
+				ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
+			}
+			array_wr32(E1000_IVAR0, index, ivar);
+		}
+		if (tx_queue > IGB_N0_QUEUE) {
+			index = (tx_queue & 0x7);
+			ivar = array_rd32(E1000_IVAR0, index);
+			if (tx_queue < 8) {
+				/* vector goes into second byte of register */
+				ivar = ivar & 0xFFFF00FF;
+				ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
+			} else {
+				/* vector goes into high byte of register */
+				ivar = ivar & 0x00FFFFFF;
+				ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
+			}
+			array_wr32(E1000_IVAR0, index, ivar);
+		}
+		q_vector->eims_value = 1 << msix_vector;
+		break;
+	case e1000_82580:
+		/* 82580 uses the same table-based approach as 82576 but has fewer
+		   entries as a result we carry over for queues greater than 4. */
+		if (rx_queue > IGB_N0_QUEUE) {
+			index = (rx_queue >> 1);
 			ivar = array_rd32(E1000_IVAR0, index);
 			if (rx_queue & 0x1) {
 				/* vector goes into third byte of register */
@@ -511,11 +474,10 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
 				ivar = ivar & 0xFFFFFF00;
 				ivar |= msix_vector | E1000_IVAR_VALID;
 			}
-			adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector;
 			array_wr32(E1000_IVAR0, index, ivar);
 		}
 		if (tx_queue > IGB_N0_QUEUE) {
-			index = (tx_queue >> 1) + adapter->vfs_allocated_count;
+			index = (tx_queue >> 1);
 			ivar = array_rd32(E1000_IVAR0, index);
 			if (tx_queue & 0x1) {
 				/* vector goes into high byte of register */
@@ -526,9 +488,9 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
 				ivar = ivar & 0xFFFF00FF;
 				ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
 			}
-			adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector;
 			array_wr32(E1000_IVAR0, index, ivar);
 		}
+		q_vector->eims_value = 1 << msix_vector;
 		break;
 	default:
 		BUG();
@@ -549,43 +511,10 @@ static void igb_configure_msix(struct igb_adapter *adapter)
 	struct e1000_hw *hw = &adapter->hw;
 
 	adapter->eims_enable_mask = 0;
-	if (hw->mac.type == e1000_82576)
-		/* Turn on MSI-X capability first, or our settings
-		 * won't stick.  And it will take days to debug. */
-		wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
-				   E1000_GPIE_PBA | E1000_GPIE_EIAME |
- 				   E1000_GPIE_NSICR);
-
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		struct igb_ring *tx_ring = &adapter->tx_ring[i];
-		igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++);
-		adapter->eims_enable_mask |= tx_ring->eims_value;
-		if (tx_ring->itr_val)
-			writel(tx_ring->itr_val,
-			       hw->hw_addr + tx_ring->itr_register);
-		else
-			writel(1, hw->hw_addr + tx_ring->itr_register);
-	}
-
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		struct igb_ring *rx_ring = &adapter->rx_ring[i];
-		rx_ring->buddy = NULL;
-		igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++);
-		adapter->eims_enable_mask |= rx_ring->eims_value;
-		if (rx_ring->itr_val)
-			writel(rx_ring->itr_val,
-			       hw->hw_addr + rx_ring->itr_register);
-		else
-			writel(1, hw->hw_addr + rx_ring->itr_register);
-	}
-
 
 	/* set vector for other causes, i.e. link changes */
 	switch (hw->mac.type) {
 	case e1000_82575:
-		array_wr32(E1000_MSIXBM(0), vector++,
-				      E1000_EIMS_OTHER);
-
 		tmp = rd32(E1000_CTRL_EXT);
 		/* enable MSI-X PBA support*/
 		tmp |= E1000_CTRL_EXT_PBA_CLR;
@@ -595,22 +524,41 @@ static void igb_configure_msix(struct igb_adapter *adapter)
 		tmp |= E1000_CTRL_EXT_IRCA;
 
 		wr32(E1000_CTRL_EXT, tmp);
-		adapter->eims_enable_mask |= E1000_EIMS_OTHER;
+
+		/* enable msix_other interrupt */
+		array_wr32(E1000_MSIXBM(0), vector++,
+		                      E1000_EIMS_OTHER);
 		adapter->eims_other = E1000_EIMS_OTHER;
 
 		break;
 
 	case e1000_82576:
+	case e1000_82580:
+		/* Turn on MSI-X capability first, or our settings
+		 * won't stick.  And it will take days to debug. */
+		wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
+		                E1000_GPIE_PBA | E1000_GPIE_EIAME |
+		                E1000_GPIE_NSICR);
+
+		/* enable msix_other interrupt */
+		adapter->eims_other = 1 << vector;
 		tmp = (vector++ | E1000_IVAR_VALID) << 8;
-		wr32(E1000_IVAR_MISC, tmp);
 
-		adapter->eims_enable_mask = (1 << (vector)) - 1;
-		adapter->eims_other = 1 << (vector - 1);
+		wr32(E1000_IVAR_MISC, tmp);
 		break;
 	default:
 		/* do nothing, since nothing else supports MSI-X */
 		break;
 	} /* switch (hw->mac.type) */
+
+	adapter->eims_enable_mask |= adapter->eims_other;
+
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[i];
+		igb_assign_vector(q_vector, vector++);
+		adapter->eims_enable_mask |= q_vector->eims_value;
+	}
+
 	wrfl();
 }
 
@@ -623,43 +571,40 @@ static void igb_configure_msix(struct igb_adapter *adapter)
 static int igb_request_msix(struct igb_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
+	struct e1000_hw *hw = &adapter->hw;
 	int i, err = 0, vector = 0;
 
-	vector = 0;
-
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		struct igb_ring *ring = &(adapter->tx_ring[i]);
-		sprintf(ring->name, "%s-tx-%d", netdev->name, i);
-		err = request_irq(adapter->msix_entries[vector].vector,
-				  &igb_msix_tx, 0, ring->name,
-				  &(adapter->tx_ring[i]));
-		if (err)
-			goto out;
-		ring->itr_register = E1000_EITR(0) + (vector << 2);
-		ring->itr_val = 976; /* ~4000 ints/sec */
-		vector++;
-	}
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		struct igb_ring *ring = &(adapter->rx_ring[i]);
-		if (strlen(netdev->name) < (IFNAMSIZ - 5))
-			sprintf(ring->name, "%s-rx-%d", netdev->name, i);
+	err = request_irq(adapter->msix_entries[vector].vector,
+	                  igb_msix_other, 0, netdev->name, adapter);
+	if (err)
+		goto out;
+	vector++;
+
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[i];
+
+		q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
+
+		if (q_vector->rx_ring && q_vector->tx_ring)
+			sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
+			        q_vector->rx_ring->queue_index);
+		else if (q_vector->tx_ring)
+			sprintf(q_vector->name, "%s-tx-%u", netdev->name,
+			        q_vector->tx_ring->queue_index);
+		else if (q_vector->rx_ring)
+			sprintf(q_vector->name, "%s-rx-%u", netdev->name,
+			        q_vector->rx_ring->queue_index);
 		else
-			memcpy(ring->name, netdev->name, IFNAMSIZ);
+			sprintf(q_vector->name, "%s-unused", netdev->name);
+
 		err = request_irq(adapter->msix_entries[vector].vector,
-				  &igb_msix_rx, 0, ring->name,
-				  &(adapter->rx_ring[i]));
+		                  igb_msix_ring, 0, q_vector->name,
+		                  q_vector);
 		if (err)
 			goto out;
-		ring->itr_register = E1000_EITR(0) + (vector << 2);
-		ring->itr_val = adapter->itr;
 		vector++;
 	}
 
-	err = request_irq(adapter->msix_entries[vector].vector,
-			  &igb_msix_other, 0, netdev->name, netdev);
-	if (err)
-		goto out;
-
 	igb_configure_msix(adapter);
 	return 0;
 out:
@@ -672,11 +617,44 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
 		pci_disable_msix(adapter->pdev);
 		kfree(adapter->msix_entries);
 		adapter->msix_entries = NULL;
-	} else if (adapter->flags & IGB_FLAG_HAS_MSI)
+	} else if (adapter->flags & IGB_FLAG_HAS_MSI) {
 		pci_disable_msi(adapter->pdev);
-	return;
+	}
+}
+
+/**
+ * igb_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void igb_free_q_vectors(struct igb_adapter *adapter)
+{
+	int v_idx;
+
+	for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+		adapter->q_vector[v_idx] = NULL;
+		netif_napi_del(&q_vector->napi);
+		kfree(q_vector);
+	}
+	adapter->num_q_vectors = 0;
 }
 
+/**
+ * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
+ *
+ * This function resets the device so that it has 0 rx queues, tx queues, and
+ * MSI-X interrupts allocated.
+ */
+static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
+{
+	igb_free_queues(adapter);
+	igb_free_q_vectors(adapter);
+	igb_reset_interrupt_capability(adapter);
+}
 
 /**
  * igb_set_interrupt_capability - set MSI or MSI-X if supported
@@ -690,11 +668,21 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
 	int numvecs, i;
 
 	/* Number of supported queues. */
-	/* Having more queues than CPUs doesn't make sense. */
-	adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
-	adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
+	adapter->num_rx_queues = adapter->rss_queues;
+	adapter->num_tx_queues = adapter->rss_queues;
+
+	/* start with one vector for every rx queue */
+	numvecs = adapter->num_rx_queues;
+
+	/* if tx handler is seperate add 1 for every tx queue */
+	if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
+		numvecs += adapter->num_tx_queues;
+
+	/* store the number of vectors reserved for queues */
+	adapter->num_q_vectors = numvecs;
 
-	numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1;
+	/* add 1 vector for link status interrupts */
+	numvecs++;
 	adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
 					GFP_KERNEL);
 	if (!adapter->msix_entries)
@@ -728,8 +716,12 @@ msi_only:
 		dev_info(&adapter->pdev->dev, "IOV Disabled\n");
 	}
 #endif
+	adapter->vfs_allocated_count = 0;
+	adapter->rss_queues = 1;
+	adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
 	adapter->num_rx_queues = 1;
 	adapter->num_tx_queues = 1;
+	adapter->num_q_vectors = 1;
 	if (!pci_enable_msi(adapter->pdev))
 		adapter->flags |= IGB_FLAG_HAS_MSI;
 out:
@@ -739,6 +731,143 @@ out:
 }
 
 /**
+ * igb_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt.  If allocation fails we
+ * return -ENOMEM.
+ **/
+static int igb_alloc_q_vectors(struct igb_adapter *adapter)
+{
+	struct igb_q_vector *q_vector;
+	struct e1000_hw *hw = &adapter->hw;
+	int v_idx;
+
+	for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
+		q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
+		if (!q_vector)
+			goto err_out;
+		q_vector->adapter = adapter;
+		q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0;
+		q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
+		q_vector->itr_val = IGB_START_ITR;
+		q_vector->set_itr = 1;
+		netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
+		adapter->q_vector[v_idx] = q_vector;
+	}
+	return 0;
+
+err_out:
+	while (v_idx) {
+		v_idx--;
+		q_vector = adapter->q_vector[v_idx];
+		netif_napi_del(&q_vector->napi);
+		kfree(q_vector);
+		adapter->q_vector[v_idx] = NULL;
+	}
+	return -ENOMEM;
+}
+
+static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
+                                      int ring_idx, int v_idx)
+{
+	struct igb_q_vector *q_vector;
+
+	q_vector = adapter->q_vector[v_idx];
+	q_vector->rx_ring = &adapter->rx_ring[ring_idx];
+	q_vector->rx_ring->q_vector = q_vector;
+	q_vector->itr_val = adapter->rx_itr_setting;
+	if (q_vector->itr_val && q_vector->itr_val <= 3)
+		q_vector->itr_val = IGB_START_ITR;
+}
+
+static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
+                                      int ring_idx, int v_idx)
+{
+	struct igb_q_vector *q_vector;
+
+	q_vector = adapter->q_vector[v_idx];
+	q_vector->tx_ring = &adapter->tx_ring[ring_idx];
+	q_vector->tx_ring->q_vector = q_vector;
+	q_vector->itr_val = adapter->tx_itr_setting;
+	if (q_vector->itr_val && q_vector->itr_val <= 3)
+		q_vector->itr_val = IGB_START_ITR;
+}
+
+/**
+ * igb_map_ring_to_vector - maps allocated queues to vectors
+ *
+ * This function maps the recently allocated queues to vectors.
+ **/
+static int igb_map_ring_to_vector(struct igb_adapter *adapter)
+{
+	int i;
+	int v_idx = 0;
+
+	if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
+	    (adapter->num_q_vectors < adapter->num_tx_queues))
+		return -ENOMEM;
+
+	if (adapter->num_q_vectors >=
+	    (adapter->num_rx_queues + adapter->num_tx_queues)) {
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			igb_map_rx_ring_to_vector(adapter, i, v_idx++);
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			igb_map_tx_ring_to_vector(adapter, i, v_idx++);
+	} else {
+		for (i = 0; i < adapter->num_rx_queues; i++) {
+			if (i < adapter->num_tx_queues)
+				igb_map_tx_ring_to_vector(adapter, i, v_idx);
+			igb_map_rx_ring_to_vector(adapter, i, v_idx++);
+		}
+		for (; i < adapter->num_tx_queues; i++)
+			igb_map_tx_ring_to_vector(adapter, i, v_idx++);
+	}
+	return 0;
+}
+
+/**
+ * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
+ *
+ * This function initializes the interrupts and allocates all of the queues.
+ **/
+static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int err;
+
+	igb_set_interrupt_capability(adapter);
+
+	err = igb_alloc_q_vectors(adapter);
+	if (err) {
+		dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
+		goto err_alloc_q_vectors;
+	}
+
+	err = igb_alloc_queues(adapter);
+	if (err) {
+		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+		goto err_alloc_queues;
+	}
+
+	err = igb_map_ring_to_vector(adapter);
+	if (err) {
+		dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
+		goto err_map_queues;
+	}
+
+
+	return 0;
+err_map_queues:
+	igb_free_queues(adapter);
+err_alloc_queues:
+	igb_free_q_vectors(adapter);
+err_alloc_q_vectors:
+	igb_reset_interrupt_capability(adapter);
+	return err;
+}
+
+/**
  * igb_request_irq - initialize interrupts
  *
  * Attempts to configure interrupts using the best available
@@ -747,6 +876,7 @@ out:
 static int igb_request_irq(struct igb_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
 	struct e1000_hw *hw = &adapter->hw;
 	int err = 0;
 
@@ -755,19 +885,38 @@ static int igb_request_irq(struct igb_adapter *adapter)
 		if (!err)
 			goto request_done;
 		/* fall back to MSI */
-		igb_reset_interrupt_capability(adapter);
+		igb_clear_interrupt_scheme(adapter);
 		if (!pci_enable_msi(adapter->pdev))
 			adapter->flags |= IGB_FLAG_HAS_MSI;
 		igb_free_all_tx_resources(adapter);
 		igb_free_all_rx_resources(adapter);
+		adapter->num_tx_queues = 1;
 		adapter->num_rx_queues = 1;
-		igb_alloc_queues(adapter);
+		adapter->num_q_vectors = 1;
+		err = igb_alloc_q_vectors(adapter);
+		if (err) {
+			dev_err(&pdev->dev,
+			        "Unable to allocate memory for vectors\n");
+			goto request_done;
+		}
+		err = igb_alloc_queues(adapter);
+		if (err) {
+			dev_err(&pdev->dev,
+			        "Unable to allocate memory for queues\n");
+			igb_free_q_vectors(adapter);
+			goto request_done;
+		}
+		igb_setup_all_tx_resources(adapter);
+		igb_setup_all_rx_resources(adapter);
 	} else {
 		switch (hw->mac.type) {
 		case e1000_82575:
 			wr32(E1000_MSIXBM(0),
-			     (E1000_EICR_RX_QUEUE0 | E1000_EIMS_OTHER));
+			     (E1000_EICR_RX_QUEUE0 |
+			      E1000_EICR_TX_QUEUE0 |
+			      E1000_EIMS_OTHER));
 			break;
+		case e1000_82580:
 		case e1000_82576:
 			wr32(E1000_IVAR0, E1000_IVAR_VALID);
 			break;
@@ -777,17 +926,18 @@ static int igb_request_irq(struct igb_adapter *adapter)
 	}
 
 	if (adapter->flags & IGB_FLAG_HAS_MSI) {
-		err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
-				  netdev->name, netdev);
+		err = request_irq(adapter->pdev->irq, igb_intr_msi, 0,
+				  netdev->name, adapter);
 		if (!err)
 			goto request_done;
+
 		/* fall back to legacy interrupts */
 		igb_reset_interrupt_capability(adapter);
 		adapter->flags &= ~IGB_FLAG_HAS_MSI;
 	}
 
-	err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
-			  netdev->name, netdev);
+	err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED,
+			  netdev->name, adapter);
 
 	if (err)
 		dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
@@ -799,23 +949,19 @@ request_done:
 
 static void igb_free_irq(struct igb_adapter *adapter)
 {
-	struct net_device *netdev = adapter->netdev;
-
 	if (adapter->msix_entries) {
 		int vector = 0, i;
 
-		for (i = 0; i < adapter->num_tx_queues; i++)
-			free_irq(adapter->msix_entries[vector++].vector,
-				&(adapter->tx_ring[i]));
-		for (i = 0; i < adapter->num_rx_queues; i++)
-			free_irq(adapter->msix_entries[vector++].vector,
-				&(adapter->rx_ring[i]));
+		free_irq(adapter->msix_entries[vector++].vector, adapter);
 
-		free_irq(adapter->msix_entries[vector++].vector, netdev);
-		return;
+		for (i = 0; i < adapter->num_q_vectors; i++) {
+			struct igb_q_vector *q_vector = adapter->q_vector[i];
+			free_irq(adapter->msix_entries[vector++].vector,
+			         q_vector);
+		}
+	} else {
+		free_irq(adapter->pdev->irq, adapter);
 	}
-
-	free_irq(adapter->pdev->irq, netdev);
 }
 
 /**
@@ -826,6 +972,11 @@ static void igb_irq_disable(struct igb_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
 
+	/*
+	 * we need to be careful when disabling interrupts.  The VFs are also
+	 * mapped into these registers and so clearing the bits can cause
+	 * issues on the VF drivers so we only need to clear what we set
+	 */
 	if (adapter->msix_entries) {
 		u32 regval = rd32(E1000_EIAM);
 		wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
@@ -849,41 +1000,47 @@ static void igb_irq_enable(struct igb_adapter *adapter)
 	struct e1000_hw *hw = &adapter->hw;
 
 	if (adapter->msix_entries) {
+		u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
 		u32 regval = rd32(E1000_EIAC);
 		wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
 		regval = rd32(E1000_EIAM);
 		wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
 		wr32(E1000_EIMS, adapter->eims_enable_mask);
-		if (adapter->vfs_allocated_count)
+		if (adapter->vfs_allocated_count) {
 			wr32(E1000_MBVFIMR, 0xFF);
-		wr32(E1000_IMS, (E1000_IMS_LSC | E1000_IMS_VMMB |
-		                 E1000_IMS_DOUTSYNC));
+			ims |= E1000_IMS_VMMB;
+		}
+		if (adapter->hw.mac.type == e1000_82580)
+			ims |= E1000_IMS_DRSTA;
+
+		wr32(E1000_IMS, ims);
 	} else {
-		wr32(E1000_IMS, IMS_ENABLE_MASK);
-		wr32(E1000_IAM, IMS_ENABLE_MASK);
+		wr32(E1000_IMS, IMS_ENABLE_MASK |
+				E1000_IMS_DRSTA);
+		wr32(E1000_IAM, IMS_ENABLE_MASK |
+				E1000_IMS_DRSTA);
 	}
 }
 
 static void igb_update_mng_vlan(struct igb_adapter *adapter)
 {
-	struct net_device *netdev = adapter->netdev;
+	struct e1000_hw *hw = &adapter->hw;
 	u16 vid = adapter->hw.mng_cookie.vlan_id;
 	u16 old_vid = adapter->mng_vlan_id;
-	if (adapter->vlgrp) {
-		if (!vlan_group_get_device(adapter->vlgrp, vid)) {
-			if (adapter->hw.mng_cookie.status &
-				E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
-				igb_vlan_rx_add_vid(netdev, vid);
-				adapter->mng_vlan_id = vid;
-			} else
-				adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
 
-			if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
-					(vid != old_vid) &&
-			    !vlan_group_get_device(adapter->vlgrp, old_vid))
-				igb_vlan_rx_kill_vid(netdev, old_vid);
-		} else
-			adapter->mng_vlan_id = vid;
+	if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
+		/* add VID to filter table */
+		igb_vfta_set(hw, vid, true);
+		adapter->mng_vlan_id = vid;
+	} else {
+		adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
+	}
+
+	if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
+	    (vid != old_vid) &&
+	    !vlan_group_get_device(adapter->vlgrp, old_vid)) {
+		/* remove VID from filter table */
+		igb_vfta_set(hw, old_vid, false);
 	}
 }
 
@@ -907,7 +1064,6 @@ static void igb_release_hw_control(struct igb_adapter *adapter)
 			ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
 }
 
-
 /**
  * igb_get_hw_control - get control of the h/w from f/w
  * @adapter: address of board private structure
@@ -942,8 +1098,11 @@ static void igb_configure(struct igb_adapter *adapter)
 
 	igb_restore_vlan(adapter);
 
-	igb_configure_tx(adapter);
+	igb_setup_tctl(adapter);
+	igb_setup_mrqc(adapter);
 	igb_setup_rctl(adapter);
+
+	igb_configure_tx(adapter);
 	igb_configure_rx(adapter);
 
 	igb_rx_fifo_flush_82575(&adapter->hw);
@@ -965,7 +1124,6 @@ static void igb_configure(struct igb_adapter *adapter)
  * igb_up - Open the interface and prepare it to handle traffic
  * @adapter: board private structure
  **/
-
 int igb_up(struct igb_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
@@ -976,30 +1134,37 @@ int igb_up(struct igb_adapter *adapter)
 
 	clear_bit(__IGB_DOWN, &adapter->state);
 
-	for (i = 0; i < adapter->num_rx_queues; i++)
-		napi_enable(&adapter->rx_ring[i].napi);
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[i];
+		napi_enable(&q_vector->napi);
+	}
 	if (adapter->msix_entries)
 		igb_configure_msix(adapter);
 
-	igb_vmm_control(adapter);
-	igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
-	igb_set_vmolr(hw, adapter->vfs_allocated_count);
-
 	/* Clear any pending interrupts. */
 	rd32(E1000_ICR);
 	igb_irq_enable(adapter);
 
+	/* notify VFs that reset has been completed */
+	if (adapter->vfs_allocated_count) {
+		u32 reg_data = rd32(E1000_CTRL_EXT);
+		reg_data |= E1000_CTRL_EXT_PFRSTD;
+		wr32(E1000_CTRL_EXT, reg_data);
+	}
+
 	netif_tx_start_all_queues(adapter->netdev);
 
-	/* Fire a link change interrupt to start the watchdog. */
-	wr32(E1000_ICS, E1000_ICS_LSC);
+	/* start the watchdog. */
+	hw->mac.get_link_status = 1;
+	schedule_work(&adapter->watchdog_task);
+
 	return 0;
 }
 
 void igb_down(struct igb_adapter *adapter)
 {
-	struct e1000_hw *hw = &adapter->hw;
 	struct net_device *netdev = adapter->netdev;
+	struct e1000_hw *hw = &adapter->hw;
 	u32 tctl, rctl;
 	int i;
 
@@ -1022,8 +1187,10 @@ void igb_down(struct igb_adapter *adapter)
 	wrfl();
 	msleep(10);
 
-	for (i = 0; i < adapter->num_rx_queues; i++)
-		napi_disable(&adapter->rx_ring[i].napi);
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[i];
+		napi_disable(&q_vector->napi);
+	}
 
 	igb_irq_disable(adapter);
 
@@ -1062,6 +1229,7 @@ void igb_reinit_locked(struct igb_adapter *adapter)
 
 void igb_reset(struct igb_adapter *adapter)
 {
+	struct pci_dev *pdev = adapter->pdev;
 	struct e1000_hw *hw = &adapter->hw;
 	struct e1000_mac_info *mac = &hw->mac;
 	struct e1000_fc_info *fc = &hw->fc;
@@ -1072,8 +1240,13 @@ void igb_reset(struct igb_adapter *adapter)
 	 * To take effect CTRL.RST is required.
 	 */
 	switch (mac->type) {
+	case e1000_82580:
+		pba = rd32(E1000_RXPBS);
+		pba = igb_rxpbs_adjust_82580(pba);
+		break;
 	case e1000_82576:
-		pba = E1000_PBA_64K;
+		pba = rd32(E1000_RXPBS);
+		pba &= E1000_RXPBS_SIZE_MASK_82576;
 		break;
 	case e1000_82575:
 	default:
@@ -1148,10 +1321,10 @@ void igb_reset(struct igb_adapter *adapter)
 	if (adapter->vfs_allocated_count) {
 		int i;
 		for (i = 0 ; i < adapter->vfs_allocated_count; i++)
-			adapter->vf_data[i].clear_to_send = false;
+			adapter->vf_data[i].flags = 0;
 
 		/* ping all the active vfs to let them know we are going down */
-			igb_ping_all_vfs(adapter);
+		igb_ping_all_vfs(adapter);
 
 		/* disable transmits and receives */
 		wr32(E1000_VFRE, 0);
@@ -1159,23 +1332,28 @@ void igb_reset(struct igb_adapter *adapter)
 	}
 
 	/* Allow time for pending master requests to run */
-	adapter->hw.mac.ops.reset_hw(&adapter->hw);
+	hw->mac.ops.reset_hw(hw);
 	wr32(E1000_WUC, 0);
 
-	if (adapter->hw.mac.ops.init_hw(&adapter->hw))
-		dev_err(&adapter->pdev->dev, "Hardware Error\n");
+	if (hw->mac.ops.init_hw(hw))
+		dev_err(&pdev->dev, "Hardware Error\n");
 
+	if (hw->mac.type == e1000_82580) {
+		u32 reg = rd32(E1000_PCIEMISC);
+		wr32(E1000_PCIEMISC,
+		                reg & ~E1000_PCIEMISC_LX_DECISION);
+	}
 	igb_update_mng_vlan(adapter);
 
 	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
 	wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
 
-	igb_reset_adaptive(&adapter->hw);
-	igb_get_phy_info(&adapter->hw);
+	igb_reset_adaptive(hw);
+	igb_get_phy_info(hw);
 }
 
 static const struct net_device_ops igb_netdev_ops = {
-	.ndo_open 		= igb_open,
+	.ndo_open		= igb_open,
 	.ndo_stop		= igb_close,
 	.ndo_start_xmit		= igb_xmit_frame_adv,
 	.ndo_get_stats		= igb_get_stats,
@@ -1211,10 +1389,11 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 	struct net_device *netdev;
 	struct igb_adapter *adapter;
 	struct e1000_hw *hw;
+	u16 eeprom_data = 0;
+	static int global_quad_port_a; /* global quad port a indication */
 	const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
 	unsigned long mmio_start, mmio_len;
 	int err, pci_using_dac;
-	u16 eeprom_data = 0;
 	u16 eeprom_apme_mask = IGB_EEPROM_APME;
 	u32 part_num;
 
@@ -1291,8 +1470,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
 	hw->subsystem_device_id = pdev->subsystem_device;
 
-	/* setup the private structure */
-	hw->back = adapter;
 	/* Copy the default MAC, PHY and NVM function pointers */
 	memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
 	memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
@@ -1302,46 +1479,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 	if (err)
 		goto err_sw_init;
 
-#ifdef CONFIG_PCI_IOV
-	/* since iov functionality isn't critical to base device function we
-	 * can accept failure.  If it fails we don't allow iov to be enabled */
-	if (hw->mac.type == e1000_82576) {
-		/* 82576 supports a maximum of 7 VFs in addition to the PF */
-		unsigned int num_vfs = (max_vfs > 7) ? 7 : max_vfs;
-		int i;
-		unsigned char mac_addr[ETH_ALEN];
-
-		if (num_vfs) {
-			adapter->vf_data = kcalloc(num_vfs,
-						sizeof(struct vf_data_storage),
-						GFP_KERNEL);
-			if (!adapter->vf_data) {
-				dev_err(&pdev->dev,
-				        "Could not allocate VF private data - "
-					"IOV enable failed\n");
-			} else {
-				err = pci_enable_sriov(pdev, num_vfs);
-				if (!err) {
-					adapter->vfs_allocated_count = num_vfs;
-					dev_info(&pdev->dev,
-					         "%d vfs allocated\n",
-					         num_vfs);
-					for (i = 0;
-					     i < adapter->vfs_allocated_count;
-					     i++) {
-						random_ether_addr(mac_addr);
-						igb_set_vf_mac(adapter, i,
-						               mac_addr);
-					}
-				} else {
-					kfree(adapter->vf_data);
-					adapter->vf_data = NULL;
-				}
-			}
-		}
-	}
-
-#endif
 	/* setup the private structure */
 	err = igb_sw_init(adapter);
 	if (err)
@@ -1349,16 +1486,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 
 	igb_get_bus_info_pcie(hw);
 
-	/* set flags */
-	switch (hw->mac.type) {
-	case e1000_82575:
-		adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
-		break;
-	case e1000_82576:
-	default:
-		break;
-	}
-
 	hw->phy.autoneg_wait_to_complete = false;
 	hw->mac.adaptive_ifs = true;
 
@@ -1382,7 +1509,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 	netdev->features |= NETIF_F_IPV6_CSUM;
 	netdev->features |= NETIF_F_TSO;
 	netdev->features |= NETIF_F_TSO6;
-
 	netdev->features |= NETIF_F_GRO;
 
 	netdev->vlan_features |= NETIF_F_TSO;
@@ -1394,10 +1520,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 	if (pci_using_dac)
 		netdev->features |= NETIF_F_HIGHDMA;
 
-	if (adapter->hw.mac.type == e1000_82576)
+	if (hw->mac.type >= e1000_82576)
 		netdev->features |= NETIF_F_SCTP_CSUM;
 
-	adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
+	adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
 
 	/* before reading the NVM, reset the controller to put the device in a
 	 * known good starting state */
@@ -1439,9 +1565,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 	hw->fc.requested_mode = e1000_fc_default;
 	hw->fc.current_mode = e1000_fc_default;
 
-	adapter->itr_setting = IGB_DEFAULT_ITR;
-	adapter->itr = IGB_START_ITR;
-
 	igb_validate_mdi_setting(hw);
 
 	/* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
@@ -1450,6 +1573,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 
 	if (hw->bus.func == 0)
 		hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
+	else if (hw->mac.type == e1000_82580)
+		hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
+		                 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
+		                 &eeprom_data);
 	else if (hw->bus.func == 1)
 		hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
 
@@ -1508,66 +1635,14 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 		dev_info(&pdev->dev, "DCA enabled\n");
 		igb_setup_dca(adapter);
 	}
-#endif
-
-	/*
-	 * Initialize hardware timer: we keep it running just in case
-	 * that some program needs it later on.
-	 */
-	memset(&adapter->cycles, 0, sizeof(adapter->cycles));
-	adapter->cycles.read = igb_read_clock;
-	adapter->cycles.mask = CLOCKSOURCE_MASK(64);
-	adapter->cycles.mult = 1;
-	adapter->cycles.shift = IGB_TSYNC_SHIFT;
-	wr32(E1000_TIMINCA,
-	     (1<<24) |
-	     IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS * IGB_TSYNC_SCALE);
-#if 0
-	/*
-	 * Avoid rollover while we initialize by resetting the time counter.
-	 */
-	wr32(E1000_SYSTIML, 0x00000000);
-	wr32(E1000_SYSTIMH, 0x00000000);
-#else
-	/*
-	 * Set registers so that rollover occurs soon to test this.
-	 */
-	wr32(E1000_SYSTIML, 0x00000000);
-	wr32(E1000_SYSTIMH, 0xFF800000);
-#endif
-	wrfl();
-	timecounter_init(&adapter->clock,
-			 &adapter->cycles,
-			 ktime_to_ns(ktime_get_real()));
 
-	/*
-	 * Synchronize our NIC clock against system wall clock. NIC
-	 * time stamp reading requires ~3us per sample, each sample
-	 * was pretty stable even under load => only require 10
-	 * samples for each offset comparison.
-	 */
-	memset(&adapter->compare, 0, sizeof(adapter->compare));
-	adapter->compare.source = &adapter->clock;
-	adapter->compare.target = ktime_get_real;
-	adapter->compare.num_samples = 10;
-	timecompare_update(&adapter->compare, 0);
-
-#ifdef DEBUG
-	{
-		char buffer[160];
-		printk(KERN_DEBUG
-			"igb: %s: hw %p initialized timer\n",
-			igb_get_time_str(adapter, buffer),
-			&adapter->hw);
-	}
 #endif
-
 	dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
 	/* print bus type/speed/width info */
 	dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
 		 netdev->name,
-		 ((hw->bus.speed == e1000_bus_speed_2500)
-		  ? "2.5Gb/s" : "unknown"),
+		 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
+		                                            "unknown"),
 		 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
 		  (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
 		  (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
@@ -1594,15 +1669,14 @@ err_eeprom:
 
 	if (hw->flash_address)
 		iounmap(hw->flash_address);
-
-	igb_free_queues(adapter);
 err_sw_init:
+	igb_clear_interrupt_scheme(adapter);
 	iounmap(hw->hw_addr);
 err_ioremap:
 	free_netdev(netdev);
 err_alloc_etherdev:
-	pci_release_selected_regions(pdev, pci_select_bars(pdev,
-	                             IORESOURCE_MEM));
+	pci_release_selected_regions(pdev,
+	                             pci_select_bars(pdev, IORESOURCE_MEM));
 err_pci_reg:
 err_dma:
 	pci_disable_device(pdev);
@@ -1647,12 +1721,10 @@ static void __devexit igb_remove(struct pci_dev *pdev)
 
 	unregister_netdev(netdev);
 
-	if (!igb_check_reset_block(&adapter->hw))
-		igb_reset_phy(&adapter->hw);
-
-	igb_reset_interrupt_capability(adapter);
+	if (!igb_check_reset_block(hw))
+		igb_reset_phy(hw);
 
-	igb_free_queues(adapter);
+	igb_clear_interrupt_scheme(adapter);
 
 #ifdef CONFIG_PCI_IOV
 	/* reclaim resources allocated to VFs */
@@ -1668,11 +1740,12 @@ static void __devexit igb_remove(struct pci_dev *pdev)
 		dev_info(&pdev->dev, "IOV Disabled\n");
 	}
 #endif
+
 	iounmap(hw->hw_addr);
 	if (hw->flash_address)
 		iounmap(hw->flash_address);
-	pci_release_selected_regions(pdev, pci_select_bars(pdev,
-	                             IORESOURCE_MEM));
+	pci_release_selected_regions(pdev,
+	                             pci_select_bars(pdev, IORESOURCE_MEM));
 
 	free_netdev(netdev);
 
@@ -1682,6 +1755,160 @@ static void __devexit igb_remove(struct pci_dev *pdev)
 }
 
 /**
+ * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
+ * @adapter: board private structure to initialize
+ *
+ * This function initializes the vf specific data storage and then attempts to
+ * allocate the VFs.  The reason for ordering it this way is because it is much
+ * mor expensive time wise to disable SR-IOV than it is to allocate and free
+ * the memory for the VFs.
+ **/
+static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
+{
+#ifdef CONFIG_PCI_IOV
+	struct pci_dev *pdev = adapter->pdev;
+
+	if (adapter->vfs_allocated_count > 7)
+		adapter->vfs_allocated_count = 7;
+
+	if (adapter->vfs_allocated_count) {
+		adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
+		                           sizeof(struct vf_data_storage),
+		                           GFP_KERNEL);
+		/* if allocation failed then we do not support SR-IOV */
+		if (!adapter->vf_data) {
+			adapter->vfs_allocated_count = 0;
+			dev_err(&pdev->dev, "Unable to allocate memory for VF "
+			        "Data Storage\n");
+		}
+	}
+
+	if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
+		kfree(adapter->vf_data);
+		adapter->vf_data = NULL;
+#endif /* CONFIG_PCI_IOV */
+		adapter->vfs_allocated_count = 0;
+#ifdef CONFIG_PCI_IOV
+	} else {
+		unsigned char mac_addr[ETH_ALEN];
+		int i;
+		dev_info(&pdev->dev, "%d vfs allocated\n",
+		         adapter->vfs_allocated_count);
+		for (i = 0; i < adapter->vfs_allocated_count; i++) {
+			random_ether_addr(mac_addr);
+			igb_set_vf_mac(adapter, i, mac_addr);
+		}
+	}
+#endif /* CONFIG_PCI_IOV */
+}
+
+
+/**
+ * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
+ * @adapter: board private structure to initialize
+ *
+ * igb_init_hw_timer initializes the function pointer and values for the hw
+ * timer found in hardware.
+ **/
+static void igb_init_hw_timer(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	switch (hw->mac.type) {
+	case e1000_82580:
+		memset(&adapter->cycles, 0, sizeof(adapter->cycles));
+		adapter->cycles.read = igb_read_clock;
+		adapter->cycles.mask = CLOCKSOURCE_MASK(64);
+		adapter->cycles.mult = 1;
+		/*
+		 * The 82580 timesync updates the system timer every 8ns by 8ns
+		 * and the value cannot be shifted.  Instead we need to shift
+		 * the registers to generate a 64bit timer value.  As a result
+		 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
+		 * 24 in order to generate a larger value for synchronization.
+		 */
+		adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
+		/* disable system timer temporarily by setting bit 31 */
+		wr32(E1000_TSAUXC, 0x80000000);
+		wrfl();
+
+		/* Set registers so that rollover occurs soon to test this. */
+		wr32(E1000_SYSTIMR, 0x00000000);
+		wr32(E1000_SYSTIML, 0x80000000);
+		wr32(E1000_SYSTIMH, 0x000000FF);
+		wrfl();
+
+		/* enable system timer by clearing bit 31 */
+		wr32(E1000_TSAUXC, 0x0);
+		wrfl();
+
+		timecounter_init(&adapter->clock,
+				 &adapter->cycles,
+				 ktime_to_ns(ktime_get_real()));
+		/*
+		 * Synchronize our NIC clock against system wall clock. NIC
+		 * time stamp reading requires ~3us per sample, each sample
+		 * was pretty stable even under load => only require 10
+		 * samples for each offset comparison.
+		 */
+		memset(&adapter->compare, 0, sizeof(adapter->compare));
+		adapter->compare.source = &adapter->clock;
+		adapter->compare.target = ktime_get_real;
+		adapter->compare.num_samples = 10;
+		timecompare_update(&adapter->compare, 0);
+		break;
+	case e1000_82576:
+		/*
+		 * Initialize hardware timer: we keep it running just in case
+		 * that some program needs it later on.
+		 */
+		memset(&adapter->cycles, 0, sizeof(adapter->cycles));
+		adapter->cycles.read = igb_read_clock;
+		adapter->cycles.mask = CLOCKSOURCE_MASK(64);
+		adapter->cycles.mult = 1;
+		/**
+		 * Scale the NIC clock cycle by a large factor so that
+		 * relatively small clock corrections can be added or
+		 * substracted at each clock tick. The drawbacks of a large
+		 * factor are a) that the clock register overflows more quickly
+		 * (not such a big deal) and b) that the increment per tick has
+		 * to fit into 24 bits.  As a result we need to use a shift of
+		 * 19 so we can fit a value of 16 into the TIMINCA register.
+		 */
+		adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
+		wr32(E1000_TIMINCA,
+		                (1 << E1000_TIMINCA_16NS_SHIFT) |
+		                (16 << IGB_82576_TSYNC_SHIFT));
+
+		/* Set registers so that rollover occurs soon to test this. */
+		wr32(E1000_SYSTIML, 0x00000000);
+		wr32(E1000_SYSTIMH, 0xFF800000);
+		wrfl();
+
+		timecounter_init(&adapter->clock,
+				 &adapter->cycles,
+				 ktime_to_ns(ktime_get_real()));
+		/*
+		 * Synchronize our NIC clock against system wall clock. NIC
+		 * time stamp reading requires ~3us per sample, each sample
+		 * was pretty stable even under load => only require 10
+		 * samples for each offset comparison.
+		 */
+		memset(&adapter->compare, 0, sizeof(adapter->compare));
+		adapter->compare.source = &adapter->clock;
+		adapter->compare.target = ktime_get_real;
+		adapter->compare.num_samples = 10;
+		timecompare_update(&adapter->compare, 0);
+		break;
+	case e1000_82575:
+		/* 82575 does not support timesync */
+	default:
+		break;
+	}
+
+}
+
+/**
  * igb_sw_init - Initialize general software structures (struct igb_adapter)
  * @adapter: board private structure to initialize
  *
@@ -1699,20 +1926,37 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
 
 	adapter->tx_ring_count = IGB_DEFAULT_TXD;
 	adapter->rx_ring_count = IGB_DEFAULT_RXD;
-	adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
-	adapter->rx_ps_hdr_size = 0; /* disable packet split */
+	adapter->rx_itr_setting = IGB_DEFAULT_ITR;
+	adapter->tx_itr_setting = IGB_DEFAULT_ITR;
+
 	adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
 	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
 
-	/* This call may decrease the number of queues depending on
-	 * interrupt mode. */
-	igb_set_interrupt_capability(adapter);
+#ifdef CONFIG_PCI_IOV
+	if (hw->mac.type == e1000_82576)
+		adapter->vfs_allocated_count = max_vfs;
 
-	if (igb_alloc_queues(adapter)) {
+#endif /* CONFIG_PCI_IOV */
+	adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
+
+	/*
+	 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
+	 * then we should combine the queues into a queue pair in order to
+	 * conserve interrupts due to limited supply
+	 */
+	if ((adapter->rss_queues > 4) ||
+	    ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
+		adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
+
+	/* This call may decrease the number of queues */
+	if (igb_init_interrupt_scheme(adapter)) {
 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
 		return -ENOMEM;
 	}
 
+	igb_init_hw_timer(adapter);
+	igb_probe_vfs(adapter);
+
 	/* Explicitly disable IRQ since the NIC can be in any state. */
 	igb_irq_disable(adapter);
 
@@ -1757,21 +2001,12 @@ static int igb_open(struct net_device *netdev)
 
 	/* e1000_power_up_phy(adapter); */
 
-	adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
-	if ((adapter->hw.mng_cookie.status &
-	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
-		igb_update_mng_vlan(adapter);
-
 	/* before we allocate an interrupt, we must be ready to handle it.
 	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
 	 * as soon as we call pci_request_irq, so we have to setup our
 	 * clean_rx handler before we do so.  */
 	igb_configure(adapter);
 
-	igb_vmm_control(adapter);
-	igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
-	igb_set_vmolr(hw, adapter->vfs_allocated_count);
-
 	err = igb_request_irq(adapter);
 	if (err)
 		goto err_req_irq;
@@ -1779,18 +2014,28 @@ static int igb_open(struct net_device *netdev)
 	/* From here on the code is the same as igb_up() */
 	clear_bit(__IGB_DOWN, &adapter->state);
 
-	for (i = 0; i < adapter->num_rx_queues; i++)
-		napi_enable(&adapter->rx_ring[i].napi);
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[i];
+		napi_enable(&q_vector->napi);
+	}
 
 	/* Clear any pending interrupts. */
 	rd32(E1000_ICR);
 
 	igb_irq_enable(adapter);
 
+	/* notify VFs that reset has been completed */
+	if (adapter->vfs_allocated_count) {
+		u32 reg_data = rd32(E1000_CTRL_EXT);
+		reg_data |= E1000_CTRL_EXT_PFRSTD;
+		wr32(E1000_CTRL_EXT, reg_data);
+	}
+
 	netif_tx_start_all_queues(netdev);
 
-	/* Fire a link status change interrupt to start the watchdog. */
-	wr32(E1000_ICS, E1000_ICS_LSC);
+	/* start the watchdog. */
+	hw->mac.get_link_status = 1;
+	schedule_work(&adapter->watchdog_task);
 
 	return 0;
 
@@ -1829,28 +2074,18 @@ static int igb_close(struct net_device *netdev)
 	igb_free_all_tx_resources(adapter);
 	igb_free_all_rx_resources(adapter);
 
-	/* kill manageability vlan ID if supported, but not if a vlan with
-	 * the same ID is registered on the host OS (let 8021q kill it) */
-	if ((adapter->hw.mng_cookie.status &
-			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
-	     !(adapter->vlgrp &&
-	       vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
-		igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
-
 	return 0;
 }
 
 /**
  * igb_setup_tx_resources - allocate Tx resources (Descriptors)
- * @adapter: board private structure
  * @tx_ring: tx descriptor ring (for a specific queue) to setup
  *
  * Return 0 on success, negative on failure
  **/
-int igb_setup_tx_resources(struct igb_adapter *adapter,
-			   struct igb_ring *tx_ring)
+int igb_setup_tx_resources(struct igb_ring *tx_ring)
 {
-	struct pci_dev *pdev = adapter->pdev;
+	struct pci_dev *pdev = tx_ring->pdev;
 	int size;
 
 	size = sizeof(struct igb_buffer) * tx_ring->count;
@@ -1863,20 +2098,20 @@ int igb_setup_tx_resources(struct igb_adapter *adapter,
 	tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
 	tx_ring->size = ALIGN(tx_ring->size, 4096);
 
-	tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
+	tx_ring->desc = pci_alloc_consistent(pdev,
+	                                     tx_ring->size,
 					     &tx_ring->dma);
 
 	if (!tx_ring->desc)
 		goto err;
 
-	tx_ring->adapter = adapter;
 	tx_ring->next_to_use = 0;
 	tx_ring->next_to_clean = 0;
 	return 0;
 
 err:
 	vfree(tx_ring->buffer_info);
-	dev_err(&adapter->pdev->dev,
+	dev_err(&pdev->dev,
 		"Unable to allocate memory for the transmit descriptor ring\n");
 	return -ENOMEM;
 }
@@ -1890,13 +2125,13 @@ err:
  **/
 static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
 {
+	struct pci_dev *pdev = adapter->pdev;
 	int i, err = 0;
-	int r_idx;
 
 	for (i = 0; i < adapter->num_tx_queues; i++) {
-		err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+		err = igb_setup_tx_resources(&adapter->tx_ring[i]);
 		if (err) {
-			dev_err(&adapter->pdev->dev,
+			dev_err(&pdev->dev,
 				"Allocation for Tx Queue %u failed\n", i);
 			for (i--; i >= 0; i--)
 				igb_free_tx_resources(&adapter->tx_ring[i]);
@@ -1904,57 +2139,24 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
 		}
 	}
 
-	for (i = 0; i < IGB_MAX_TX_QUEUES; i++) {
-		r_idx = i % adapter->num_tx_queues;
+	for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
+		int r_idx = i % adapter->num_tx_queues;
 		adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
 	}
 	return err;
 }
 
 /**
- * igb_configure_tx - Configure transmit Unit after Reset
- * @adapter: board private structure
- *
- * Configure the Tx unit of the MAC after a reset.
+ * igb_setup_tctl - configure the transmit control registers
+ * @adapter: Board private structure
  **/
-static void igb_configure_tx(struct igb_adapter *adapter)
+void igb_setup_tctl(struct igb_adapter *adapter)
 {
-	u64 tdba;
 	struct e1000_hw *hw = &adapter->hw;
 	u32 tctl;
-	u32 txdctl, txctrl;
-	int i, j;
-
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		struct igb_ring *ring = &adapter->tx_ring[i];
-		j = ring->reg_idx;
-		wr32(E1000_TDLEN(j),
-		     ring->count * sizeof(union e1000_adv_tx_desc));
-		tdba = ring->dma;
-		wr32(E1000_TDBAL(j),
-		     tdba & 0x00000000ffffffffULL);
-		wr32(E1000_TDBAH(j), tdba >> 32);
-
-		ring->head = E1000_TDH(j);
-		ring->tail = E1000_TDT(j);
-		writel(0, hw->hw_addr + ring->tail);
-		writel(0, hw->hw_addr + ring->head);
-		txdctl = rd32(E1000_TXDCTL(j));
-		txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
-		wr32(E1000_TXDCTL(j), txdctl);
-
-		/* Turn off Relaxed Ordering on head write-backs.  The
-		 * writebacks MUST be delivered in order or it will
-		 * completely screw up our bookeeping.
-		 */
-		txctrl = rd32(E1000_DCA_TXCTRL(j));
-		txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
-		wr32(E1000_DCA_TXCTRL(j), txctrl);
-	}
 
-	/* disable queue 0 to prevent tail bump w/o re-configuration */
-	if (adapter->vfs_allocated_count)
-		wr32(E1000_TXDCTL(0), 0);
+	/* disable queue 0 which is enabled by default on 82575 and 82576 */
+	wr32(E1000_TXDCTL(0), 0);
 
 	/* Program the Transmit Control Register */
 	tctl = rd32(E1000_TCTL);
@@ -1964,9 +2166,6 @@ static void igb_configure_tx(struct igb_adapter *adapter)
 
 	igb_config_collision_dist(hw);
 
-	/* Setup Transmit Descriptor Settings for eop descriptor */
-	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS;
-
 	/* Enable transmits */
 	tctl |= E1000_TCTL_EN;
 
@@ -1974,16 +2173,69 @@ static void igb_configure_tx(struct igb_adapter *adapter)
 }
 
 /**
- * igb_setup_rx_resources - allocate Rx resources (Descriptors)
+ * igb_configure_tx_ring - Configure transmit ring after Reset
+ * @adapter: board private structure
+ * @ring: tx ring to configure
+ *
+ * Configure a transmit ring after a reset.
+ **/
+void igb_configure_tx_ring(struct igb_adapter *adapter,
+                           struct igb_ring *ring)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 txdctl;
+	u64 tdba = ring->dma;
+	int reg_idx = ring->reg_idx;
+
+	/* disable the queue */
+	txdctl = rd32(E1000_TXDCTL(reg_idx));
+	wr32(E1000_TXDCTL(reg_idx),
+	                txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
+	wrfl();
+	mdelay(10);
+
+	wr32(E1000_TDLEN(reg_idx),
+	                ring->count * sizeof(union e1000_adv_tx_desc));
+	wr32(E1000_TDBAL(reg_idx),
+	                tdba & 0x00000000ffffffffULL);
+	wr32(E1000_TDBAH(reg_idx), tdba >> 32);
+
+	ring->head = hw->hw_addr + E1000_TDH(reg_idx);
+	ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
+	writel(0, ring->head);
+	writel(0, ring->tail);
+
+	txdctl |= IGB_TX_PTHRESH;
+	txdctl |= IGB_TX_HTHRESH << 8;
+	txdctl |= IGB_TX_WTHRESH << 16;
+
+	txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
+	wr32(E1000_TXDCTL(reg_idx), txdctl);
+}
+
+/**
+ * igb_configure_tx - Configure transmit Unit after Reset
  * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void igb_configure_tx(struct igb_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		igb_configure_tx_ring(adapter, &adapter->tx_ring[i]);
+}
+
+/**
+ * igb_setup_rx_resources - allocate Rx resources (Descriptors)
  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
  *
  * Returns 0 on success, negative on failure
  **/
-int igb_setup_rx_resources(struct igb_adapter *adapter,
-			   struct igb_ring *rx_ring)
+int igb_setup_rx_resources(struct igb_ring *rx_ring)
 {
-	struct pci_dev *pdev = adapter->pdev;
+	struct pci_dev *pdev = rx_ring->pdev;
 	int size, desc_len;
 
 	size = sizeof(struct igb_buffer) * rx_ring->count;
@@ -2007,13 +2259,12 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
 	rx_ring->next_to_clean = 0;
 	rx_ring->next_to_use = 0;
 
-	rx_ring->adapter = adapter;
-
 	return 0;
 
 err:
 	vfree(rx_ring->buffer_info);
-	dev_err(&adapter->pdev->dev, "Unable to allocate memory for "
+	rx_ring->buffer_info = NULL;
+	dev_err(&pdev->dev, "Unable to allocate memory for "
 		"the receive descriptor ring\n");
 	return -ENOMEM;
 }
@@ -2027,12 +2278,13 @@ err:
  **/
 static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
 {
+	struct pci_dev *pdev = adapter->pdev;
 	int i, err = 0;
 
 	for (i = 0; i < adapter->num_rx_queues; i++) {
-		err = igb_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+		err = igb_setup_rx_resources(&adapter->rx_ring[i]);
 		if (err) {
-			dev_err(&adapter->pdev->dev,
+			dev_err(&pdev->dev,
 				"Allocation for Rx Queue %u failed\n", i);
 			for (i--; i >= 0; i--)
 				igb_free_rx_resources(&adapter->rx_ring[i]);
@@ -2044,15 +2296,122 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
 }
 
 /**
+ * igb_setup_mrqc - configure the multiple receive queue control registers
+ * @adapter: Board private structure
+ **/
+static void igb_setup_mrqc(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 mrqc, rxcsum;
+	u32 j, num_rx_queues, shift = 0, shift2 = 0;
+	union e1000_reta {
+		u32 dword;
+		u8  bytes[4];
+	} reta;
+	static const u8 rsshash[40] = {
+		0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
+		0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
+		0xae, 0x7b, 0x30, 0xb4,	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
+		0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
+
+	/* Fill out hash function seeds */
+	for (j = 0; j < 10; j++) {
+		u32 rsskey = rsshash[(j * 4)];
+		rsskey |= rsshash[(j * 4) + 1] << 8;
+		rsskey |= rsshash[(j * 4) + 2] << 16;
+		rsskey |= rsshash[(j * 4) + 3] << 24;
+		array_wr32(E1000_RSSRK(0), j, rsskey);
+	}
+
+	num_rx_queues = adapter->rss_queues;
+
+	if (adapter->vfs_allocated_count) {
+		/* 82575 and 82576 supports 2 RSS queues for VMDq */
+		switch (hw->mac.type) {
+		case e1000_82580:
+			num_rx_queues = 1;
+			shift = 0;
+			break;
+		case e1000_82576:
+			shift = 3;
+			num_rx_queues = 2;
+			break;
+		case e1000_82575:
+			shift = 2;
+			shift2 = 6;
+		default:
+			break;
+		}
+	} else {
+		if (hw->mac.type == e1000_82575)
+			shift = 6;
+	}
+
+	for (j = 0; j < (32 * 4); j++) {
+		reta.bytes[j & 3] = (j % num_rx_queues) << shift;
+		if (shift2)
+			reta.bytes[j & 3] |= num_rx_queues << shift2;
+		if ((j & 3) == 3)
+			wr32(E1000_RETA(j >> 2), reta.dword);
+	}
+
+	/*
+	 * Disable raw packet checksumming so that RSS hash is placed in
+	 * descriptor on writeback.  No need to enable TCP/UDP/IP checksum
+	 * offloads as they are enabled by default
+	 */
+	rxcsum = rd32(E1000_RXCSUM);
+	rxcsum |= E1000_RXCSUM_PCSD;
+
+	if (adapter->hw.mac.type >= e1000_82576)
+		/* Enable Receive Checksum Offload for SCTP */
+		rxcsum |= E1000_RXCSUM_CRCOFL;
+
+	/* Don't need to set TUOFL or IPOFL, they default to 1 */
+	wr32(E1000_RXCSUM, rxcsum);
+
+	/* If VMDq is enabled then we set the appropriate mode for that, else
+	 * we default to RSS so that an RSS hash is calculated per packet even
+	 * if we are only using one queue */
+	if (adapter->vfs_allocated_count) {
+		if (hw->mac.type > e1000_82575) {
+			/* Set the default pool for the PF's first queue */
+			u32 vtctl = rd32(E1000_VT_CTL);
+			vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
+				   E1000_VT_CTL_DISABLE_DEF_POOL);
+			vtctl |= adapter->vfs_allocated_count <<
+				E1000_VT_CTL_DEFAULT_POOL_SHIFT;
+			wr32(E1000_VT_CTL, vtctl);
+		}
+		if (adapter->rss_queues > 1)
+			mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
+		else
+			mrqc = E1000_MRQC_ENABLE_VMDQ;
+	} else {
+		mrqc = E1000_MRQC_ENABLE_RSS_4Q;
+	}
+	igb_vmm_control(adapter);
+
+	mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
+		 E1000_MRQC_RSS_FIELD_IPV4_TCP);
+	mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
+		 E1000_MRQC_RSS_FIELD_IPV6_TCP);
+	mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
+		 E1000_MRQC_RSS_FIELD_IPV6_UDP);
+	mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
+		 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
+
+	wr32(E1000_MRQC, mrqc);
+}
+
+/**
  * igb_setup_rctl - configure the receive control registers
  * @adapter: Board private structure
  **/
-static void igb_setup_rctl(struct igb_adapter *adapter)
+void igb_setup_rctl(struct igb_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
 	u32 rctl;
-	u32 srrctl = 0;
-	int i;
 
 	rctl = rd32(E1000_RCTL);
 
@@ -2069,75 +2428,45 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
 	 */
 	rctl |= E1000_RCTL_SECRC;
 
-	/*
-	 * disable store bad packets and clear size bits.
-	 */
+	/* disable store bad packets and clear size bits. */
 	rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
 
-	/* enable LPE when to prevent packets larger than max_frame_size */
-		rctl |= E1000_RCTL_LPE;
-
-	/* Setup buffer sizes */
-	switch (adapter->rx_buffer_len) {
-	case IGB_RXBUFFER_256:
-		rctl |= E1000_RCTL_SZ_256;
-		break;
-	case IGB_RXBUFFER_512:
-		rctl |= E1000_RCTL_SZ_512;
-		break;
-	default:
-		srrctl = ALIGN(adapter->rx_buffer_len, 1024)
-		         >> E1000_SRRCTL_BSIZEPKT_SHIFT;
-		break;
-	}
+	/* enable LPE to prevent packets larger than max_frame_size */
+	rctl |= E1000_RCTL_LPE;
 
-	/* 82575 and greater support packet-split where the protocol
-	 * header is placed in skb->data and the packet data is
-	 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
-	 * In the case of a non-split, skb->data is linearly filled,
-	 * followed by the page buffers.  Therefore, skb->data is
-	 * sized to hold the largest protocol header.
-	 */
-	/* allocations using alloc_page take too long for regular MTU
-	 * so only enable packet split for jumbo frames */
-	if (adapter->netdev->mtu > ETH_DATA_LEN) {
-		adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
-		srrctl |= adapter->rx_ps_hdr_size <<
-			 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
-		srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
-	} else {
-		adapter->rx_ps_hdr_size = 0;
-		srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
-	}
+	/* disable queue 0 to prevent tail write w/o re-config */
+	wr32(E1000_RXDCTL(0), 0);
 
 	/* Attention!!!  For SR-IOV PF driver operations you must enable
 	 * queue drop for all VF and PF queues to prevent head of line blocking
 	 * if an un-trusted VF does not provide descriptors to hardware.
 	 */
 	if (adapter->vfs_allocated_count) {
-		u32 vmolr;
-
 		/* set all queue drop enable bits */
 		wr32(E1000_QDE, ALL_QUEUES);
-		srrctl |= E1000_SRRCTL_DROP_EN;
+	}
 
-		/* disable queue 0 to prevent tail write w/o re-config */
-		wr32(E1000_RXDCTL(0), 0);
+	wr32(E1000_RCTL, rctl);
+}
 
-		vmolr = rd32(E1000_VMOLR(adapter->vfs_allocated_count));
-		if (rctl & E1000_RCTL_LPE)
-			vmolr |= E1000_VMOLR_LPE;
-		if (adapter->num_rx_queues > 1)
-			vmolr |= E1000_VMOLR_RSSE;
-		wr32(E1000_VMOLR(adapter->vfs_allocated_count), vmolr);
-	}
+static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
+                                   int vfn)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 vmolr;
 
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		int j = adapter->rx_ring[i].reg_idx;
-		wr32(E1000_SRRCTL(j), srrctl);
-	}
+	/* if it isn't the PF check to see if VFs are enabled and
+	 * increase the size to support vlan tags */
+	if (vfn < adapter->vfs_allocated_count &&
+	    adapter->vf_data[vfn].vlans_enabled)
+		size += VLAN_TAG_SIZE;
 
-	wr32(E1000_RCTL, rctl);
+	vmolr = rd32(E1000_VMOLR(vfn));
+	vmolr &= ~E1000_VMOLR_RLPML_MASK;
+	vmolr |= size | E1000_VMOLR_LPE;
+	wr32(E1000_VMOLR(vfn), vmolr);
+
+	return 0;
 }
 
 /**
@@ -2159,33 +2488,107 @@ static void igb_rlpml_set(struct igb_adapter *adapter)
 	 * size and set the VMOLR RLPML to the size we need */
 	if (pf_id) {
 		igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
-		max_frame_size = MAX_STD_JUMBO_FRAME_SIZE + VLAN_TAG_SIZE;
+		max_frame_size = MAX_JUMBO_FRAME_SIZE;
 	}
 
 	wr32(E1000_RLPML, max_frame_size);
 }
 
+static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 vmolr;
+
+	/*
+	 * This register exists only on 82576 and newer so if we are older then
+	 * we should exit and do nothing
+	 */
+	if (hw->mac.type < e1000_82576)
+		return;
+
+	vmolr = rd32(E1000_VMOLR(vfn));
+	vmolr |= E1000_VMOLR_AUPE |        /* Accept untagged packets */
+	         E1000_VMOLR_STRVLAN;      /* Strip vlan tags */
+
+	/* clear all bits that might not be set */
+	vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
+
+	if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
+		vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
+	/*
+	 * for VMDq only allow the VFs and pool 0 to accept broadcast and
+	 * multicast packets
+	 */
+	if (vfn <= adapter->vfs_allocated_count)
+		vmolr |= E1000_VMOLR_BAM;	   /* Accept broadcast */
+
+	wr32(E1000_VMOLR(vfn), vmolr);
+}
+
 /**
- * igb_configure_vt_default_pool - Configure VT default pool
+ * igb_configure_rx_ring - Configure a receive ring after Reset
  * @adapter: board private structure
+ * @ring: receive ring to be configured
  *
- * Configure the default pool
+ * Configure the Rx unit of the MAC after a reset.
  **/
-static void igb_configure_vt_default_pool(struct igb_adapter *adapter)
+void igb_configure_rx_ring(struct igb_adapter *adapter,
+                           struct igb_ring *ring)
 {
 	struct e1000_hw *hw = &adapter->hw;
-	u16 pf_id = adapter->vfs_allocated_count;
-	u32 vtctl;
+	u64 rdba = ring->dma;
+	int reg_idx = ring->reg_idx;
+	u32 srrctl, rxdctl;
+
+	/* disable the queue */
+	rxdctl = rd32(E1000_RXDCTL(reg_idx));
+	wr32(E1000_RXDCTL(reg_idx),
+	                rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
+
+	/* Set DMA base address registers */
+	wr32(E1000_RDBAL(reg_idx),
+	     rdba & 0x00000000ffffffffULL);
+	wr32(E1000_RDBAH(reg_idx), rdba >> 32);
+	wr32(E1000_RDLEN(reg_idx),
+	               ring->count * sizeof(union e1000_adv_rx_desc));
+
+	/* initialize head and tail */
+	ring->head = hw->hw_addr + E1000_RDH(reg_idx);
+	ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
+	writel(0, ring->head);
+	writel(0, ring->tail);
+
+	/* set descriptor configuration */
+	if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
+		srrctl = ALIGN(ring->rx_buffer_len, 64) <<
+		         E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
+		srrctl |= IGB_RXBUFFER_16384 >>
+		          E1000_SRRCTL_BSIZEPKT_SHIFT;
+#else
+		srrctl |= (PAGE_SIZE / 2) >>
+		          E1000_SRRCTL_BSIZEPKT_SHIFT;
+#endif
+		srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+	} else {
+		srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
+		         E1000_SRRCTL_BSIZEPKT_SHIFT;
+		srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
+	}
 
-	/* not in sr-iov mode - do nothing */
-	if (!pf_id)
-		return;
+	wr32(E1000_SRRCTL(reg_idx), srrctl);
+
+	/* set filtering for VMDQ pools */
+	igb_set_vmolr(adapter, reg_idx & 0x7);
 
-	vtctl = rd32(E1000_VT_CTL);
-	vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
-		   E1000_VT_CTL_DISABLE_DEF_POOL);
-	vtctl |= pf_id << E1000_VT_CTL_DEFAULT_POOL_SHIFT;
-	wr32(E1000_VT_CTL, vtctl);
+	/* enable receive descriptor fetching */
+	rxdctl = rd32(E1000_RXDCTL(reg_idx));
+	rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
+	rxdctl &= 0xFFF00000;
+	rxdctl |= IGB_RX_PTHRESH;
+	rxdctl |= IGB_RX_HTHRESH << 8;
+	rxdctl |= IGB_RX_WTHRESH << 16;
+	wr32(E1000_RXDCTL(reg_idx), rxdctl);
 }
 
 /**
@@ -2196,112 +2599,19 @@ static void igb_configure_vt_default_pool(struct igb_adapter *adapter)
  **/
 static void igb_configure_rx(struct igb_adapter *adapter)
 {
-	u64 rdba;
-	struct e1000_hw *hw = &adapter->hw;
-	u32 rctl, rxcsum;
-	u32 rxdctl;
 	int i;
 
-	/* disable receives while setting up the descriptors */
-	rctl = rd32(E1000_RCTL);
-	wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
-	wrfl();
-	mdelay(10);
+	/* set UTA to appropriate mode */
+	igb_set_uta(adapter);
 
-	if (adapter->itr_setting > 3)
-		wr32(E1000_ITR, adapter->itr);
+	/* set the correct pool for the PF default MAC address in entry 0 */
+	igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
+	                 adapter->vfs_allocated_count);
 
 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
 	 * the Base and Length of the Rx Descriptor Ring */
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		struct igb_ring *ring = &adapter->rx_ring[i];
-		int j = ring->reg_idx;
-		rdba = ring->dma;
-		wr32(E1000_RDBAL(j),
-		     rdba & 0x00000000ffffffffULL);
-		wr32(E1000_RDBAH(j), rdba >> 32);
-		wr32(E1000_RDLEN(j),
-		     ring->count * sizeof(union e1000_adv_rx_desc));
-
-		ring->head = E1000_RDH(j);
-		ring->tail = E1000_RDT(j);
-		writel(0, hw->hw_addr + ring->tail);
-		writel(0, hw->hw_addr + ring->head);
-
-		rxdctl = rd32(E1000_RXDCTL(j));
-		rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
-		rxdctl &= 0xFFF00000;
-		rxdctl |= IGB_RX_PTHRESH;
-		rxdctl |= IGB_RX_HTHRESH << 8;
-		rxdctl |= IGB_RX_WTHRESH << 16;
-		wr32(E1000_RXDCTL(j), rxdctl);
-	}
-
-	if (adapter->num_rx_queues > 1) {
-		u32 random[10];
-		u32 mrqc;
-		u32 j, shift;
-		union e1000_reta {
-			u32 dword;
-			u8  bytes[4];
-		} reta;
-
-		get_random_bytes(&random[0], 40);
-
-		if (hw->mac.type >= e1000_82576)
-			shift = 0;
-		else
-			shift = 6;
-		for (j = 0; j < (32 * 4); j++) {
-			reta.bytes[j & 3] =
-				adapter->rx_ring[(j % adapter->num_rx_queues)].reg_idx << shift;
-			if ((j & 3) == 3)
-				writel(reta.dword,
-				       hw->hw_addr + E1000_RETA(0) + (j & ~3));
-		}
-		if (adapter->vfs_allocated_count)
-			mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
-		else
-			mrqc = E1000_MRQC_ENABLE_RSS_4Q;
-
-		/* Fill out hash function seeds */
-		for (j = 0; j < 10; j++)
-			array_wr32(E1000_RSSRK(0), j, random[j]);
-
-		mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
-			 E1000_MRQC_RSS_FIELD_IPV4_TCP);
-		mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
-			 E1000_MRQC_RSS_FIELD_IPV6_TCP);
-		mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
-			 E1000_MRQC_RSS_FIELD_IPV6_UDP);
-		mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
-			 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
-
-		wr32(E1000_MRQC, mrqc);
-	} else if (adapter->vfs_allocated_count) {
-		/* Enable multi-queue for sr-iov */
-		wr32(E1000_MRQC, E1000_MRQC_ENABLE_VMDQ);
-	}
-
-	/* Enable Receive Checksum Offload for TCP and UDP */
-	rxcsum = rd32(E1000_RXCSUM);
-	/* Disable raw packet checksumming */
-	rxcsum |= E1000_RXCSUM_PCSD;
-
-	if (adapter->hw.mac.type == e1000_82576)
-		/* Enable Receive Checksum Offload for SCTP */
-		rxcsum |= E1000_RXCSUM_CRCOFL;
-
-	/* Don't need to set TUOFL or IPOFL, they default to 1 */
-	wr32(E1000_RXCSUM, rxcsum);
-
-	/* Set the default pool for the PF's first queue */
-	igb_configure_vt_default_pool(adapter);
-
-	igb_rlpml_set(adapter);
-
-	/* Enable Receives */
-	wr32(E1000_RCTL, rctl);
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
 }
 
 /**
@@ -2312,14 +2622,17 @@ static void igb_configure_rx(struct igb_adapter *adapter)
  **/
 void igb_free_tx_resources(struct igb_ring *tx_ring)
 {
-	struct pci_dev *pdev = tx_ring->adapter->pdev;
-
 	igb_clean_tx_ring(tx_ring);
 
 	vfree(tx_ring->buffer_info);
 	tx_ring->buffer_info = NULL;
 
-	pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+	/* if not set, then don't free */
+	if (!tx_ring->desc)
+		return;
+
+	pci_free_consistent(tx_ring->pdev, tx_ring->size,
+	                    tx_ring->desc, tx_ring->dma);
 
 	tx_ring->desc = NULL;
 }
@@ -2338,18 +2651,30 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
 		igb_free_tx_resources(&adapter->tx_ring[i]);
 }
 
-static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
-					   struct igb_buffer *buffer_info)
+void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
+				    struct igb_buffer *buffer_info)
 {
-	buffer_info->dma = 0;
+	if (buffer_info->dma) {
+		if (buffer_info->mapped_as_page)
+			pci_unmap_page(tx_ring->pdev,
+					buffer_info->dma,
+					buffer_info->length,
+					PCI_DMA_TODEVICE);
+		else
+			pci_unmap_single(tx_ring->pdev,
+					buffer_info->dma,
+					buffer_info->length,
+					PCI_DMA_TODEVICE);
+		buffer_info->dma = 0;
+	}
 	if (buffer_info->skb) {
-		skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
-		              DMA_TO_DEVICE);
 		dev_kfree_skb_any(buffer_info->skb);
 		buffer_info->skb = NULL;
 	}
 	buffer_info->time_stamp = 0;
-	/* buffer_info must be completely set up in the transmit path */
+	buffer_info->length = 0;
+	buffer_info->next_to_watch = 0;
+	buffer_info->mapped_as_page = false;
 }
 
 /**
@@ -2358,7 +2683,6 @@ static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
  **/
 static void igb_clean_tx_ring(struct igb_ring *tx_ring)
 {
-	struct igb_adapter *adapter = tx_ring->adapter;
 	struct igb_buffer *buffer_info;
 	unsigned long size;
 	unsigned int i;
@@ -2369,21 +2693,17 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
 
 	for (i = 0; i < tx_ring->count; i++) {
 		buffer_info = &tx_ring->buffer_info[i];
-		igb_unmap_and_free_tx_resource(adapter, buffer_info);
+		igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
 	}
 
 	size = sizeof(struct igb_buffer) * tx_ring->count;
 	memset(tx_ring->buffer_info, 0, size);
 
 	/* Zero out the descriptor ring */
-
 	memset(tx_ring->desc, 0, tx_ring->size);
 
 	tx_ring->next_to_use = 0;
 	tx_ring->next_to_clean = 0;
-
-	writel(0, adapter->hw.hw_addr + tx_ring->head);
-	writel(0, adapter->hw.hw_addr + tx_ring->tail);
 }
 
 /**
@@ -2406,14 +2726,17 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
  **/
 void igb_free_rx_resources(struct igb_ring *rx_ring)
 {
-	struct pci_dev *pdev = rx_ring->adapter->pdev;
-
 	igb_clean_rx_ring(rx_ring);
 
 	vfree(rx_ring->buffer_info);
 	rx_ring->buffer_info = NULL;
 
-	pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+	/* if not set, then don't free */
+	if (!rx_ring->desc)
+		return;
+
+	pci_free_consistent(rx_ring->pdev, rx_ring->size,
+	                    rx_ring->desc, rx_ring->dma);
 
 	rx_ring->desc = NULL;
 }
@@ -2438,26 +2761,21 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
  **/
 static void igb_clean_rx_ring(struct igb_ring *rx_ring)
 {
-	struct igb_adapter *adapter = rx_ring->adapter;
 	struct igb_buffer *buffer_info;
-	struct pci_dev *pdev = adapter->pdev;
 	unsigned long size;
 	unsigned int i;
 
 	if (!rx_ring->buffer_info)
 		return;
+
 	/* Free all the Rx ring sk_buffs */
 	for (i = 0; i < rx_ring->count; i++) {
 		buffer_info = &rx_ring->buffer_info[i];
 		if (buffer_info->dma) {
-			if (adapter->rx_ps_hdr_size)
-				pci_unmap_single(pdev, buffer_info->dma,
-						 adapter->rx_ps_hdr_size,
-						 PCI_DMA_FROMDEVICE);
-			else
-				pci_unmap_single(pdev, buffer_info->dma,
-						 adapter->rx_buffer_len,
-						 PCI_DMA_FROMDEVICE);
+			pci_unmap_single(rx_ring->pdev,
+			                 buffer_info->dma,
+					 rx_ring->rx_buffer_len,
+					 PCI_DMA_FROMDEVICE);
 			buffer_info->dma = 0;
 		}
 
@@ -2465,14 +2783,16 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
 			dev_kfree_skb(buffer_info->skb);
 			buffer_info->skb = NULL;
 		}
+		if (buffer_info->page_dma) {
+			pci_unmap_page(rx_ring->pdev,
+			               buffer_info->page_dma,
+				       PAGE_SIZE / 2,
+				       PCI_DMA_FROMDEVICE);
+			buffer_info->page_dma = 0;
+		}
 		if (buffer_info->page) {
-			if (buffer_info->page_dma)
-				pci_unmap_page(pdev, buffer_info->page_dma,
-					       PAGE_SIZE / 2,
-					       PCI_DMA_FROMDEVICE);
 			put_page(buffer_info->page);
 			buffer_info->page = NULL;
-			buffer_info->page_dma = 0;
 			buffer_info->page_offset = 0;
 		}
 	}
@@ -2485,9 +2805,6 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
 
 	rx_ring->next_to_clean = 0;
 	rx_ring->next_to_use = 0;
-
-	writel(0, adapter->hw.hw_addr + rx_ring->head);
-	writel(0, adapter->hw.hw_addr + rx_ring->tail);
 }
 
 /**
@@ -2521,61 +2838,90 @@ static int igb_set_mac(struct net_device *netdev, void *p)
 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
 	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
 
-	igb_rar_set(hw, hw->mac.addr, 0);
-	igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
+	/* set the correct pool for the new PF MAC address in entry 0 */
+	igb_rar_set_qsel(adapter, hw->mac.addr, 0,
+	                 adapter->vfs_allocated_count);
 
 	return 0;
 }
 
 /**
- * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
+ * igb_write_mc_addr_list - write multicast addresses to MTA
  * @netdev: network interface device structure
  *
- * The set_rx_mode entry point is called whenever the unicast or multicast
- * address lists or the network interface flags are updated.  This routine is
- * responsible for configuring the hardware for proper unicast, multicast,
- * promiscuous mode, and all-multi behavior.
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ *                0 on no addresses written
+ *                X on writing X addresses to MTA
  **/
-static void igb_set_rx_mode(struct net_device *netdev)
+static int igb_write_mc_addr_list(struct net_device *netdev)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
-	unsigned int rar_entries = hw->mac.rar_entry_count -
-	                           (adapter->vfs_allocated_count + 1);
 	struct dev_mc_list *mc_ptr = netdev->mc_list;
-	u8  *mta_list = NULL;
-	u32 rctl;
+	u8  *mta_list;
+	u32 vmolr = 0;
 	int i;
 
-	/* Check for Promiscuous and All Multicast modes */
-	rctl = rd32(E1000_RCTL);
+	if (!netdev->mc_count) {
+		/* nothing to program, so clear mc list */
+		igb_update_mc_addr_list(hw, NULL, 0);
+		igb_restore_vf_multicasts(adapter);
+		return 0;
+	}
 
-	if (netdev->flags & IFF_PROMISC) {
-		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
-		rctl &= ~E1000_RCTL_VFE;
-	} else {
-		if (netdev->flags & IFF_ALLMULTI)
-			rctl |= E1000_RCTL_MPE;
-		else
-			rctl &= ~E1000_RCTL_MPE;
+	mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
+	if (!mta_list)
+		return -ENOMEM;
 
-		if (netdev->uc.count > rar_entries)
-			rctl |= E1000_RCTL_UPE;
-		else
-			rctl &= ~E1000_RCTL_UPE;
-		rctl |= E1000_RCTL_VFE;
+	/* set vmolr receive overflow multicast bit */
+	vmolr |= E1000_VMOLR_ROMPE;
+
+	/* The shared function expects a packed array of only addresses. */
+	mc_ptr = netdev->mc_list;
+
+	for (i = 0; i < netdev->mc_count; i++) {
+		if (!mc_ptr)
+			break;
+		memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
+		mc_ptr = mc_ptr->next;
 	}
-	wr32(E1000_RCTL, rctl);
+	igb_update_mc_addr_list(hw, mta_list, i);
+	kfree(mta_list);
+
+	return netdev->mc_count;
+}
+
+/**
+ * igb_write_uc_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
+ *
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ *                0 on no addresses written
+ *                X on writing X addresses to the RAR table
+ **/
+static int igb_write_uc_addr_list(struct net_device *netdev)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	unsigned int vfn = adapter->vfs_allocated_count;
+	unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
+	int count = 0;
+
+	/* return ENOMEM indicating insufficient memory for addresses */
+	if (netdev->uc.count > rar_entries)
+		return -ENOMEM;
 
 	if (netdev->uc.count && rar_entries) {
 		struct netdev_hw_addr *ha;
 		list_for_each_entry(ha, &netdev->uc.list, list) {
 			if (!rar_entries)
 				break;
-			igb_rar_set(hw, ha->addr, rar_entries);
-			igb_set_rah_pool(hw, adapter->vfs_allocated_count,
-			                 rar_entries);
-			rar_entries--;
+			igb_rar_set_qsel(adapter, ha->addr,
+			                 rar_entries--,
+			                 vfn);
+			count++;
 		}
 	}
 	/* write the addresses in reverse order to avoid write combining */
@@ -2585,29 +2931,79 @@ static void igb_set_rx_mode(struct net_device *netdev)
 	}
 	wrfl();
 
-	if (!netdev->mc_count) {
-		/* nothing to program, so clear mc list */
-		igb_update_mc_addr_list(hw, NULL, 0);
-		igb_restore_vf_multicasts(adapter);
-		return;
+	return count;
+}
+
+/**
+ * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_mode entry point is called whenever the unicast or multicast
+ * address lists or the network interface flags are updated.  This routine is
+ * responsible for configuring the hardware for proper unicast, multicast,
+ * promiscuous mode, and all-multi behavior.
+ **/
+static void igb_set_rx_mode(struct net_device *netdev)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	unsigned int vfn = adapter->vfs_allocated_count;
+	u32 rctl, vmolr = 0;
+	int count;
+
+	/* Check for Promiscuous and All Multicast modes */
+	rctl = rd32(E1000_RCTL);
+
+	/* clear the effected bits */
+	rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
+
+	if (netdev->flags & IFF_PROMISC) {
+		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+		vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
+	} else {
+		if (netdev->flags & IFF_ALLMULTI) {
+			rctl |= E1000_RCTL_MPE;
+			vmolr |= E1000_VMOLR_MPME;
+		} else {
+			/*
+			 * Write addresses to the MTA, if the attempt fails
+			 * then we should just turn on promiscous mode so
+			 * that we can at least receive multicast traffic
+			 */
+			count = igb_write_mc_addr_list(netdev);
+			if (count < 0) {
+				rctl |= E1000_RCTL_MPE;
+				vmolr |= E1000_VMOLR_MPME;
+			} else if (count) {
+				vmolr |= E1000_VMOLR_ROMPE;
+			}
+		}
+		/*
+		 * Write addresses to available RAR registers, if there is not
+		 * sufficient space to store all the addresses then enable
+		 * unicast promiscous mode
+		 */
+		count = igb_write_uc_addr_list(netdev);
+		if (count < 0) {
+			rctl |= E1000_RCTL_UPE;
+			vmolr |= E1000_VMOLR_ROPE;
+		}
+		rctl |= E1000_RCTL_VFE;
 	}
+	wr32(E1000_RCTL, rctl);
 
-	mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
-	if (!mta_list) {
-		dev_err(&adapter->pdev->dev,
-		        "failed to allocate multicast filter list\n");
+	/*
+	 * In order to support SR-IOV and eventually VMDq it is necessary to set
+	 * the VMOLR to enable the appropriate modes.  Without this workaround
+	 * we will have issues with VLAN tag stripping not being done for frames
+	 * that are only arriving because we are the default pool
+	 */
+	if (hw->mac.type < e1000_82576)
 		return;
-	}
 
-	/* The shared function expects a packed array of only addresses. */
-	for (i = 0; i < netdev->mc_count; i++) {
-		if (!mc_ptr)
-			break;
-		memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
-		mc_ptr = mc_ptr->next;
-	}
-	igb_update_mc_addr_list(hw, mta_list, i);
-	kfree(mta_list);
+	vmolr |= rd32(E1000_VMOLR(vfn)) &
+	         ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
+	wr32(E1000_VMOLR(vfn), vmolr);
 	igb_restore_vf_multicasts(adapter);
 }
 
@@ -2669,37 +3065,33 @@ static void igb_watchdog(unsigned long data)
 static void igb_watchdog_task(struct work_struct *work)
 {
 	struct igb_adapter *adapter = container_of(work,
-					struct igb_adapter, watchdog_task);
+	                                           struct igb_adapter,
+                                                   watchdog_task);
 	struct e1000_hw *hw = &adapter->hw;
 	struct net_device *netdev = adapter->netdev;
-	struct igb_ring *tx_ring = adapter->tx_ring;
 	u32 link;
-	u32 eics = 0;
 	int i;
 
 	link = igb_has_link(adapter);
-	if ((netif_carrier_ok(netdev)) && link)
-		goto link_up;
-
 	if (link) {
 		if (!netif_carrier_ok(netdev)) {
 			u32 ctrl;
-			hw->mac.ops.get_speed_and_duplex(&adapter->hw,
-						   &adapter->link_speed,
-						   &adapter->link_duplex);
+			hw->mac.ops.get_speed_and_duplex(hw,
+			                                 &adapter->link_speed,
+			                                 &adapter->link_duplex);
 
 			ctrl = rd32(E1000_CTRL);
 			/* Links status message must follow this format */
 			printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
 				 "Flow Control: %s\n",
-			         netdev->name,
-				 adapter->link_speed,
-				 adapter->link_duplex == FULL_DUPLEX ?
+			       netdev->name,
+			       adapter->link_speed,
+			       adapter->link_duplex == FULL_DUPLEX ?
 				 "Full Duplex" : "Half Duplex",
-				 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
-				 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
-				 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
-				 E1000_CTRL_TFCE) ? "TX" : "None")));
+			       ((ctrl & E1000_CTRL_TFCE) &&
+			        (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
+			       ((ctrl & E1000_CTRL_RFCE) ?  "RX" :
+			       ((ctrl & E1000_CTRL_TFCE) ?  "TX" : "None")));
 
 			/* tweak tx_queue_len according to speed/duplex and
 			 * adjust the timeout factor */
@@ -2743,46 +3135,40 @@ static void igb_watchdog_task(struct work_struct *work)
 		}
 	}
 
-link_up:
 	igb_update_stats(adapter);
+	igb_update_adaptive(hw);
 
-	hw->mac.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
-	adapter->tpt_old = adapter->stats.tpt;
-	hw->mac.collision_delta = adapter->stats.colc - adapter->colc_old;
-	adapter->colc_old = adapter->stats.colc;
-
-	adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
-	adapter->gorc_old = adapter->stats.gorc;
-	adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
-	adapter->gotc_old = adapter->stats.gotc;
-
-	igb_update_adaptive(&adapter->hw);
-
-	if (!netif_carrier_ok(netdev)) {
-		if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct igb_ring *tx_ring = &adapter->tx_ring[i];
+		if (!netif_carrier_ok(netdev)) {
 			/* We've lost link, so the controller stops DMA,
 			 * but we've got queued Tx work that's never going
 			 * to get done, so reset controller to flush Tx.
 			 * (Do the reset outside of interrupt context). */
-			adapter->tx_timeout_count++;
-			schedule_work(&adapter->reset_task);
-			/* return immediately since reset is imminent */
-			return;
+			if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
+				adapter->tx_timeout_count++;
+				schedule_work(&adapter->reset_task);
+				/* return immediately since reset is imminent */
+				return;
+			}
 		}
+
+		/* Force detection of hung controller every watchdog period */
+		tx_ring->detect_tx_hung = true;
 	}
 
 	/* Cause software interrupt to ensure rx ring is cleaned */
 	if (adapter->msix_entries) {
-		for (i = 0; i < adapter->num_rx_queues; i++)
-			eics |= adapter->rx_ring[i].eims_value;
+		u32 eics = 0;
+		for (i = 0; i < adapter->num_q_vectors; i++) {
+			struct igb_q_vector *q_vector = adapter->q_vector[i];
+			eics |= q_vector->eims_value;
+		}
 		wr32(E1000_EICS, eics);
 	} else {
 		wr32(E1000_ICS, E1000_ICS_RXDMT0);
 	}
 
-	/* Force detection of hung controller every watchdog period */
-	tx_ring->detect_tx_hung = true;
-
 	/* Reset the timer */
 	if (!test_bit(__IGB_DOWN, &adapter->state))
 		mod_timer(&adapter->watchdog_timer,
@@ -2796,7 +3182,6 @@ enum latency_range {
 	latency_invalid = 255
 };
 
-
 /**
  * igb_update_ring_itr - update the dynamic ITR value based on packet size
  *
@@ -2811,25 +3196,37 @@ enum latency_range {
  *      parameter (see igb_param.c)
  *      NOTE:  This function is called only when operating in a multiqueue
  *             receive environment.
- * @rx_ring: pointer to ring
+ * @q_vector: pointer to q_vector
  **/
-static void igb_update_ring_itr(struct igb_ring *rx_ring)
+static void igb_update_ring_itr(struct igb_q_vector *q_vector)
 {
-	int new_val = rx_ring->itr_val;
+	int new_val = q_vector->itr_val;
 	int avg_wire_size = 0;
-	struct igb_adapter *adapter = rx_ring->adapter;
-
-	if (!rx_ring->total_packets)
-		goto clear_counts; /* no packets, so don't do anything */
+	struct igb_adapter *adapter = q_vector->adapter;
 
 	/* For non-gigabit speeds, just fix the interrupt rate at 4000
 	 * ints/sec - ITR timer value of 120 ticks.
 	 */
 	if (adapter->link_speed != SPEED_1000) {
-		new_val = 120;
+		new_val = 976;
 		goto set_itr_val;
 	}
-	avg_wire_size = rx_ring->total_bytes / rx_ring->total_packets;
+
+	if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
+		struct igb_ring *ring = q_vector->rx_ring;
+		avg_wire_size = ring->total_bytes / ring->total_packets;
+	}
+
+	if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
+		struct igb_ring *ring = q_vector->tx_ring;
+		avg_wire_size = max_t(u32, avg_wire_size,
+		                      (ring->total_bytes /
+		                       ring->total_packets));
+	}
+
+	/* if avg_wire_size isn't set no work was done */
+	if (!avg_wire_size)
+		goto clear_counts;
 
 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
 	avg_wire_size += 24;
@@ -2844,13 +3241,19 @@ static void igb_update_ring_itr(struct igb_ring *rx_ring)
 		new_val = avg_wire_size / 2;
 
 set_itr_val:
-	if (new_val != rx_ring->itr_val) {
-		rx_ring->itr_val = new_val;
-		rx_ring->set_itr = 1;
+	if (new_val != q_vector->itr_val) {
+		q_vector->itr_val = new_val;
+		q_vector->set_itr = 1;
 	}
 clear_counts:
-	rx_ring->total_bytes = 0;
-	rx_ring->total_packets = 0;
+	if (q_vector->rx_ring) {
+		q_vector->rx_ring->total_bytes = 0;
+		q_vector->rx_ring->total_packets = 0;
+	}
+	if (q_vector->tx_ring) {
+		q_vector->tx_ring->total_bytes = 0;
+		q_vector->tx_ring->total_packets = 0;
+	}
 }
 
 /**
@@ -2867,7 +3270,7 @@ clear_counts:
  *      NOTE:  These calculations are only valid when operating in a single-
  *             queue environment.
  * @adapter: pointer to adapter
- * @itr_setting: current adapter->itr
+ * @itr_setting: current q_vector->itr_val
  * @packets: the number of packets during this measurement interval
  * @bytes: the number of bytes during this measurement interval
  **/
@@ -2919,8 +3322,9 @@ update_itr_done:
 
 static void igb_set_itr(struct igb_adapter *adapter)
 {
+	struct igb_q_vector *q_vector = adapter->q_vector[0];
 	u16 current_itr;
-	u32 new_itr = adapter->itr;
+	u32 new_itr = q_vector->itr_val;
 
 	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
 	if (adapter->link_speed != SPEED_1000) {
@@ -2934,18 +3338,14 @@ static void igb_set_itr(struct igb_adapter *adapter)
 				    adapter->rx_ring->total_packets,
 				    adapter->rx_ring->total_bytes);
 
-	if (adapter->rx_ring->buddy) {
-		adapter->tx_itr = igb_update_itr(adapter,
-					    adapter->tx_itr,
-					    adapter->tx_ring->total_packets,
-					    adapter->tx_ring->total_bytes);
-		current_itr = max(adapter->rx_itr, adapter->tx_itr);
-	} else {
-		current_itr = adapter->rx_itr;
-	}
+	adapter->tx_itr = igb_update_itr(adapter,
+				    adapter->tx_itr,
+				    adapter->tx_ring->total_packets,
+				    adapter->tx_ring->total_bytes);
+	current_itr = max(adapter->rx_itr, adapter->tx_itr);
 
 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
-	if (adapter->itr_setting == 3 && current_itr == lowest_latency)
+	if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
 		current_itr = low_latency;
 
 	switch (current_itr) {
@@ -2966,18 +3366,17 @@ static void igb_set_itr(struct igb_adapter *adapter)
 set_itr_now:
 	adapter->rx_ring->total_bytes = 0;
 	adapter->rx_ring->total_packets = 0;
-	if (adapter->rx_ring->buddy) {
-		adapter->rx_ring->buddy->total_bytes = 0;
-		adapter->rx_ring->buddy->total_packets = 0;
-	}
+	adapter->tx_ring->total_bytes = 0;
+	adapter->tx_ring->total_packets = 0;
 
-	if (new_itr != adapter->itr) {
+	if (new_itr != q_vector->itr_val) {
 		/* this attempts to bias the interrupt rate towards Bulk
 		 * by adding intermediate steps when interrupt rate is
 		 * increasing */
-		new_itr = new_itr > adapter->itr ?
-			     max((new_itr * adapter->itr) /
-			         (new_itr + (adapter->itr >> 2)), new_itr) :
+		new_itr = new_itr > q_vector->itr_val ?
+		             max((new_itr * q_vector->itr_val) /
+		                 (new_itr + (q_vector->itr_val >> 2)),
+		                 new_itr) :
 			     new_itr;
 		/* Don't write the value here; it resets the adapter's
 		 * internal timer, and causes us to delay far longer than
@@ -2985,25 +3384,22 @@ set_itr_now:
 		 * value at the beginning of the next interrupt so the timing
 		 * ends up being correct.
 		 */
-		adapter->itr = new_itr;
-		adapter->rx_ring->itr_val = new_itr;
-		adapter->rx_ring->set_itr = 1;
+		q_vector->itr_val = new_itr;
+		q_vector->set_itr = 1;
 	}
 
 	return;
 }
 
-
 #define IGB_TX_FLAGS_CSUM		0x00000001
 #define IGB_TX_FLAGS_VLAN		0x00000002
 #define IGB_TX_FLAGS_TSO		0x00000004
 #define IGB_TX_FLAGS_IPV4		0x00000008
-#define IGB_TX_FLAGS_TSTAMP             0x00000010
-#define IGB_TX_FLAGS_VLAN_MASK	0xffff0000
-#define IGB_TX_FLAGS_VLAN_SHIFT	16
+#define IGB_TX_FLAGS_TSTAMP		0x00000010
+#define IGB_TX_FLAGS_VLAN_MASK		0xffff0000
+#define IGB_TX_FLAGS_VLAN_SHIFT		        16
 
-static inline int igb_tso_adv(struct igb_adapter *adapter,
-			      struct igb_ring *tx_ring,
+static inline int igb_tso_adv(struct igb_ring *tx_ring,
 			      struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
 {
 	struct e1000_adv_tx_context_desc *context_desc;
@@ -3065,8 +3461,8 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
 	mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
 
 	/* For 82575, context index must be unique per ring. */
-	if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
-		mss_l4len_idx |= tx_ring->queue_index << 4;
+	if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
+		mss_l4len_idx |= tx_ring->reg_idx << 4;
 
 	context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
 	context_desc->seqnum_seed = 0;
@@ -3083,14 +3479,14 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
 	return true;
 }
 
-static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
-					struct igb_ring *tx_ring,
-					struct sk_buff *skb, u32 tx_flags)
+static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
+				   struct sk_buff *skb, u32 tx_flags)
 {
 	struct e1000_adv_tx_context_desc *context_desc;
-	unsigned int i;
+	struct pci_dev *pdev = tx_ring->pdev;
 	struct igb_buffer *buffer_info;
 	u32 info = 0, tu_cmd = 0;
+	unsigned int i;
 
 	if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
 	    (tx_flags & IGB_TX_FLAGS_VLAN)) {
@@ -3100,6 +3496,7 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
 
 		if (tx_flags & IGB_TX_FLAGS_VLAN)
 			info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
+
 		info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
 		if (skb->ip_summed == CHECKSUM_PARTIAL)
 			info |= skb_network_header_len(skb);
@@ -3137,7 +3534,7 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
 				break;
 			default:
 				if (unlikely(net_ratelimit()))
-					dev_warn(&adapter->pdev->dev,
+					dev_warn(&pdev->dev,
 					    "partial checksum but proto=%x!\n",
 					    skb->protocol);
 				break;
@@ -3146,11 +3543,9 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
 
 		context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
 		context_desc->seqnum_seed = 0;
-		if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
+		if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
 			context_desc->mss_l4len_idx =
-				cpu_to_le32(tx_ring->queue_index << 4);
-		else
-			context_desc->mss_l4len_idx = 0;
+				cpu_to_le32(tx_ring->reg_idx << 4);
 
 		buffer_info->time_stamp = jiffies;
 		buffer_info->next_to_watch = i;
@@ -3169,32 +3564,27 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
 #define IGB_MAX_TXD_PWR	16
 #define IGB_MAX_DATA_PER_TXD	(1<<IGB_MAX_TXD_PWR)
 
-static inline int igb_tx_map_adv(struct igb_adapter *adapter,
-				 struct igb_ring *tx_ring, struct sk_buff *skb,
+static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
 				 unsigned int first)
 {
 	struct igb_buffer *buffer_info;
+	struct pci_dev *pdev = tx_ring->pdev;
 	unsigned int len = skb_headlen(skb);
 	unsigned int count = 0, i;
 	unsigned int f;
-	dma_addr_t *map;
 
 	i = tx_ring->next_to_use;
 
-	if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
-		dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
-		return 0;
-	}
-
-	map = skb_shinfo(skb)->dma_maps;
-
 	buffer_info = &tx_ring->buffer_info[i];
 	BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
 	buffer_info->length = len;
 	/* set time_stamp *before* dma to help avoid a possible race */
 	buffer_info->time_stamp = jiffies;
 	buffer_info->next_to_watch = i;
-	buffer_info->dma = skb_shinfo(skb)->dma_head;
+	buffer_info->dma = pci_map_single(pdev, skb->data, len,
+					  PCI_DMA_TODEVICE);
+	if (pci_dma_mapping_error(pdev, buffer_info->dma))
+		goto dma_error;
 
 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
 		struct skb_frag_struct *frag;
@@ -3211,25 +3601,55 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
 		buffer_info->length = len;
 		buffer_info->time_stamp = jiffies;
 		buffer_info->next_to_watch = i;
-		buffer_info->dma = map[count];
+		buffer_info->mapped_as_page = true;
+		buffer_info->dma = pci_map_page(pdev,
+						frag->page,
+						frag->page_offset,
+						len,
+						PCI_DMA_TODEVICE);
+		if (pci_dma_mapping_error(pdev, buffer_info->dma))
+			goto dma_error;
+
 		count++;
 	}
 
 	tx_ring->buffer_info[i].skb = skb;
 	tx_ring->buffer_info[first].next_to_watch = i;
 
-	return count + 1;
+	return ++count;
+
+dma_error:
+	dev_err(&pdev->dev, "TX DMA map failed\n");
+
+	/* clear timestamp and dma mappings for failed buffer_info mapping */
+	buffer_info->dma = 0;
+	buffer_info->time_stamp = 0;
+	buffer_info->length = 0;
+	buffer_info->next_to_watch = 0;
+	buffer_info->mapped_as_page = false;
+	count--;
+
+	/* clear timestamp and dma mappings for remaining portion of packet */
+	while (count >= 0) {
+		count--;
+		i--;
+		if (i < 0)
+			i += tx_ring->count;
+		buffer_info = &tx_ring->buffer_info[i];
+		igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
+	}
+
+	return 0;
 }
 
-static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
-				    struct igb_ring *tx_ring,
+static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
 				    int tx_flags, int count, u32 paylen,
 				    u8 hdr_len)
 {
-	union e1000_adv_tx_desc *tx_desc = NULL;
+	union e1000_adv_tx_desc *tx_desc;
 	struct igb_buffer *buffer_info;
 	u32 olinfo_status = 0, cmd_type_len;
-	unsigned int i;
+	unsigned int i = tx_ring->next_to_use;
 
 	cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
 			E1000_ADVTXD_DCMD_DEXT);
@@ -3254,27 +3674,28 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
 		olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
 	}
 
-	if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) &&
-	    (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO |
+	if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
+	    (tx_flags & (IGB_TX_FLAGS_CSUM |
+	                 IGB_TX_FLAGS_TSO |
 			 IGB_TX_FLAGS_VLAN)))
-		olinfo_status |= tx_ring->queue_index << 4;
+		olinfo_status |= tx_ring->reg_idx << 4;
 
 	olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
 
-	i = tx_ring->next_to_use;
-	while (count--) {
+	do {
 		buffer_info = &tx_ring->buffer_info[i];
 		tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
 		tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
 		tx_desc->read.cmd_type_len =
 			cpu_to_le32(cmd_type_len | buffer_info->length);
 		tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+		count--;
 		i++;
 		if (i == tx_ring->count)
 			i = 0;
-	}
+	} while (count > 0);
 
-	tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
+	tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
 	/* Force memory writes to complete before letting h/w
 	 * know there are new descriptors to fetch.  (Only
 	 * applicable for weak-ordered memory model archs,
@@ -3282,16 +3703,15 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
 	wmb();
 
 	tx_ring->next_to_use = i;
-	writel(i, adapter->hw.hw_addr + tx_ring->tail);
+	writel(i, tx_ring->tail);
 	/* we need this if more than one processor can write to our tail
 	 * at a time, it syncronizes IO on IA64/Altix systems */
 	mmiowb();
 }
 
-static int __igb_maybe_stop_tx(struct net_device *netdev,
-			       struct igb_ring *tx_ring, int size)
+static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
 {
-	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct net_device *netdev = tx_ring->netdev;
 
 	netif_stop_subqueue(netdev, tx_ring->queue_index);
 
@@ -3307,66 +3727,43 @@ static int __igb_maybe_stop_tx(struct net_device *netdev,
 
 	/* A reprieve! */
 	netif_wake_subqueue(netdev, tx_ring->queue_index);
-	++adapter->restart_queue;
+	tx_ring->tx_stats.restart_queue++;
 	return 0;
 }
 
-static int igb_maybe_stop_tx(struct net_device *netdev,
-			     struct igb_ring *tx_ring, int size)
+static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
 {
 	if (igb_desc_unused(tx_ring) >= size)
 		return 0;
-	return __igb_maybe_stop_tx(netdev, tx_ring, size);
+	return __igb_maybe_stop_tx(tx_ring, size);
 }
 
-static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
-					   struct net_device *netdev,
-					   struct igb_ring *tx_ring)
+netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
+				    struct igb_ring *tx_ring)
 {
-	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
 	unsigned int first;
 	unsigned int tx_flags = 0;
 	u8 hdr_len = 0;
-	int count = 0;
-	int tso = 0;
-	union skb_shared_tx *shtx;
-
-	if (test_bit(__IGB_DOWN, &adapter->state)) {
-		dev_kfree_skb_any(skb);
-		return NETDEV_TX_OK;
-	}
-
-	if (skb->len <= 0) {
-		dev_kfree_skb_any(skb);
-		return NETDEV_TX_OK;
-	}
+	int tso = 0, count;
+	union skb_shared_tx *shtx = skb_tx(skb);
 
 	/* need: 1 descriptor per page,
 	 *       + 2 desc gap to keep tail from touching head,
 	 *       + 1 desc for skb->data,
 	 *       + 1 desc for context descriptor,
 	 * otherwise try next time */
-	if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
+	if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
 		/* this is a hard error */
 		return NETDEV_TX_BUSY;
 	}
 
-	/*
-	 * TODO: check that there currently is no other packet with
-	 * time stamping in the queue
-	 *
-	 * When doing time stamping, keep the connection to the socket
-	 * a while longer: it is still needed by skb_hwtstamp_tx(),
-	 * called either in igb_tx_hwtstamp() or by our caller when
-	 * doing software time stamping.
-	 */
-	shtx = skb_tx(skb);
 	if (unlikely(shtx->hardware)) {
 		shtx->in_progress = 1;
 		tx_flags |= IGB_TX_FLAGS_TSTAMP;
 	}
 
-	if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
+	if (vlan_tx_tag_present(skb) && adapter->vlgrp) {
 		tx_flags |= IGB_TX_FLAGS_VLAN;
 		tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
 	}
@@ -3375,37 +3772,38 @@ static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
 		tx_flags |= IGB_TX_FLAGS_IPV4;
 
 	first = tx_ring->next_to_use;
-	tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags,
-					      &hdr_len) : 0;
+	if (skb_is_gso(skb)) {
+		tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
 
-	if (tso < 0) {
-		dev_kfree_skb_any(skb);
-		return NETDEV_TX_OK;
+		if (tso < 0) {
+			dev_kfree_skb_any(skb);
+			return NETDEV_TX_OK;
+		}
 	}
 
 	if (tso)
 		tx_flags |= IGB_TX_FLAGS_TSO;
-	else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags) &&
+	else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
 	         (skb->ip_summed == CHECKSUM_PARTIAL))
 		tx_flags |= IGB_TX_FLAGS_CSUM;
 
 	/*
-	 * count reflects descriptors mapped, if 0 then mapping error
+	 * count reflects descriptors mapped, if 0 or less then mapping error
 	 * has occured and we need to rewind the descriptor queue
 	 */
-	count = igb_tx_map_adv(adapter, tx_ring, skb, first);
-
-	if (count) {
-		igb_tx_queue_adv(adapter, tx_ring, tx_flags, count,
-			         skb->len, hdr_len);
-		/* Make sure there is space in the ring for the next send. */
-		igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
-	} else {
+	count = igb_tx_map_adv(tx_ring, skb, first);
+	if (!count) {
 		dev_kfree_skb_any(skb);
 		tx_ring->buffer_info[first].time_stamp = 0;
 		tx_ring->next_to_use = first;
+		return NETDEV_TX_OK;
 	}
 
+	igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
+
+	/* Make sure there is space in the ring for the next send. */
+	igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
+
 	return NETDEV_TX_OK;
 }
 
@@ -3414,8 +3812,18 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct igb_ring *tx_ring;
-
 	int r_idx = 0;
+
+	if (test_bit(__IGB_DOWN, &adapter->state)) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (skb->len <= 0) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
 	r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
 	tx_ring = adapter->multi_tx_table[r_idx];
 
@@ -3423,7 +3831,7 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
 	 * to a flow.  Right now, performance is impacted slightly negatively
 	 * if using multiple tx queues.  If the stack breaks away from a
 	 * single qdisc implementation, we can look at this again. */
-	return igb_xmit_frame_ring_adv(skb, netdev, tx_ring);
+	return igb_xmit_frame_ring_adv(skb, tx_ring);
 }
 
 /**
@@ -3437,6 +3845,10 @@ static void igb_tx_timeout(struct net_device *netdev)
 
 	/* Do the reset outside of interrupt context */
 	adapter->tx_timeout_count++;
+
+	if (hw->mac.type == e1000_82580)
+		hw->dev_spec._82575.global_device_reset = true;
+
 	schedule_work(&adapter->reset_task);
 	wr32(E1000_EICS,
 	     (adapter->eims_enable_mask & ~adapter->eims_other));
@@ -3459,10 +3871,8 @@ static void igb_reset_task(struct work_struct *work)
  **/
 static struct net_device_stats *igb_get_stats(struct net_device *netdev)
 {
-	struct igb_adapter *adapter = netdev_priv(netdev);
-
 	/* only return the current stats */
-	return &adapter->net_stats;
+	return &netdev->stats;
 }
 
 /**
@@ -3475,16 +3885,17 @@ static struct net_device_stats *igb_get_stats(struct net_device *netdev)
 static int igb_change_mtu(struct net_device *netdev, int new_mtu)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct pci_dev *pdev = adapter->pdev;
 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+	u32 rx_buffer_len, i;
 
-	if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
-	    (max_frame > MAX_JUMBO_FRAME_SIZE)) {
-		dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
+	if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+		dev_err(&pdev->dev, "Invalid MTU setting\n");
 		return -EINVAL;
 	}
 
 	if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
-		dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
+		dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
 		return -EINVAL;
 	}
 
@@ -3493,8 +3904,6 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
 
 	/* igb_down has a dependency on max_frame_size */
 	adapter->max_frame_size = max_frame;
-	if (netif_running(netdev))
-		igb_down(adapter);
 
 	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
 	 * means we reserve 2 more, this pushes us to allocate from the next
@@ -3502,35 +3911,23 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
 	 * i.e. RXBUFFER_2048 --> size-4096 slab
 	 */
 
-	if (max_frame <= IGB_RXBUFFER_256)
-		adapter->rx_buffer_len = IGB_RXBUFFER_256;
-	else if (max_frame <= IGB_RXBUFFER_512)
-		adapter->rx_buffer_len = IGB_RXBUFFER_512;
-	else if (max_frame <= IGB_RXBUFFER_1024)
-		adapter->rx_buffer_len = IGB_RXBUFFER_1024;
-	else if (max_frame <= IGB_RXBUFFER_2048)
-		adapter->rx_buffer_len = IGB_RXBUFFER_2048;
+	if (max_frame <= IGB_RXBUFFER_1024)
+		rx_buffer_len = IGB_RXBUFFER_1024;
+	else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
+		rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
 	else
-#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
-		adapter->rx_buffer_len = IGB_RXBUFFER_16384;
-#else
-		adapter->rx_buffer_len = PAGE_SIZE / 2;
-#endif
+		rx_buffer_len = IGB_RXBUFFER_128;
 
-	/* if sr-iov is enabled we need to force buffer size to 1K or larger */
-	if (adapter->vfs_allocated_count &&
-	    (adapter->rx_buffer_len < IGB_RXBUFFER_1024))
-		adapter->rx_buffer_len = IGB_RXBUFFER_1024;
-
-	/* adjust allocation if LPE protects us, and we aren't using SBP */
-	if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
-	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
-		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+	if (netif_running(netdev))
+		igb_down(adapter);
 
-	dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
+	dev_info(&pdev->dev, "changing MTU from %d to %d\n",
 		 netdev->mtu, new_mtu);
 	netdev->mtu = new_mtu;
 
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		adapter->rx_ring[i].rx_buffer_len = rx_buffer_len;
+
 	if (netif_running(netdev))
 		igb_up(adapter);
 	else
@@ -3548,9 +3945,13 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
 
 void igb_update_stats(struct igb_adapter *adapter)
 {
+	struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
 	struct e1000_hw *hw = &adapter->hw;
 	struct pci_dev *pdev = adapter->pdev;
+	u32 rnbc;
 	u16 phy_tmp;
+	int i;
+	u64 bytes, packets;
 
 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
 
@@ -3563,6 +3964,29 @@ void igb_update_stats(struct igb_adapter *adapter)
 	if (pci_channel_offline(pdev))
 		return;
 
+	bytes = 0;
+	packets = 0;
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
+		adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
+		net_stats->rx_fifo_errors += rqdpc_tmp;
+		bytes += adapter->rx_ring[i].rx_stats.bytes;
+		packets += adapter->rx_ring[i].rx_stats.packets;
+	}
+
+	net_stats->rx_bytes = bytes;
+	net_stats->rx_packets = packets;
+
+	bytes = 0;
+	packets = 0;
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		bytes += adapter->tx_ring[i].tx_stats.bytes;
+		packets += adapter->tx_ring[i].tx_stats.packets;
+	}
+	net_stats->tx_bytes = bytes;
+	net_stats->tx_packets = packets;
+
+	/* read stats registers */
 	adapter->stats.crcerrs += rd32(E1000_CRCERRS);
 	adapter->stats.gprc += rd32(E1000_GPRC);
 	adapter->stats.gorc += rd32(E1000_GORCL);
@@ -3595,7 +4019,9 @@ void igb_update_stats(struct igb_adapter *adapter)
 	adapter->stats.gptc += rd32(E1000_GPTC);
 	adapter->stats.gotc += rd32(E1000_GOTCL);
 	rd32(E1000_GOTCH); /* clear GOTCL */
-	adapter->stats.rnbc += rd32(E1000_RNBC);
+	rnbc = rd32(E1000_RNBC);
+	adapter->stats.rnbc += rnbc;
+	net_stats->rx_fifo_errors += rnbc;
 	adapter->stats.ruc += rd32(E1000_RUC);
 	adapter->stats.rfc += rd32(E1000_RFC);
 	adapter->stats.rjc += rd32(E1000_RJC);
@@ -3614,7 +4040,6 @@ void igb_update_stats(struct igb_adapter *adapter)
 	adapter->stats.bptc += rd32(E1000_BPTC);
 
 	/* used for adaptive IFS */
-
 	hw->mac.tx_packet_delta = rd32(E1000_TPT);
 	adapter->stats.tpt += hw->mac.tx_packet_delta;
 	hw->mac.collision_delta = rd32(E1000_COLC);
@@ -3637,56 +4062,29 @@ void igb_update_stats(struct igb_adapter *adapter)
 	adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
 
 	/* Fill out the OS statistics structure */
-	adapter->net_stats.multicast = adapter->stats.mprc;
-	adapter->net_stats.collisions = adapter->stats.colc;
+	net_stats->multicast = adapter->stats.mprc;
+	net_stats->collisions = adapter->stats.colc;
 
 	/* Rx Errors */
 
-	if (hw->mac.type != e1000_82575) {
-		u32 rqdpc_tmp;
-		u64 rqdpc_total = 0;
-		int i;
-		/* Read out drops stats per RX queue.  Notice RQDPC (Receive
-		 * Queue Drop Packet Count) stats only gets incremented, if
-		 * the DROP_EN but it set (in the SRRCTL register for that
-		 * queue).  If DROP_EN bit is NOT set, then the some what
-		 * equivalent count is stored in RNBC (not per queue basis).
-		 * Also note the drop count is due to lack of available
-		 * descriptors.
-		 */
-		for (i = 0; i < adapter->num_rx_queues; i++) {
-			rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0xFFF;
-			adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
-			rqdpc_total += adapter->rx_ring[i].rx_stats.drops;
-		}
-		adapter->net_stats.rx_fifo_errors = rqdpc_total;
-	}
-
-	/* Note RNBC (Receive No Buffers Count) is an not an exact
-	 * drop count as the hardware FIFO might save the day.  Thats
-	 * one of the reason for saving it in rx_fifo_errors, as its
-	 * potentially not a true drop.
-	 */
-	adapter->net_stats.rx_fifo_errors += adapter->stats.rnbc;
-
 	/* RLEC on some newer hardware can be incorrect so build
 	 * our own version based on RUC and ROC */
-	adapter->net_stats.rx_errors = adapter->stats.rxerrc +
+	net_stats->rx_errors = adapter->stats.rxerrc +
 		adapter->stats.crcerrs + adapter->stats.algnerrc +
 		adapter->stats.ruc + adapter->stats.roc +
 		adapter->stats.cexterr;
-	adapter->net_stats.rx_length_errors = adapter->stats.ruc +
-					      adapter->stats.roc;
-	adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
-	adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
-	adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
+	net_stats->rx_length_errors = adapter->stats.ruc +
+				      adapter->stats.roc;
+	net_stats->rx_crc_errors = adapter->stats.crcerrs;
+	net_stats->rx_frame_errors = adapter->stats.algnerrc;
+	net_stats->rx_missed_errors = adapter->stats.mpc;
 
 	/* Tx Errors */
-	adapter->net_stats.tx_errors = adapter->stats.ecol +
-				       adapter->stats.latecol;
-	adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
-	adapter->net_stats.tx_window_errors = adapter->stats.latecol;
-	adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
+	net_stats->tx_errors = adapter->stats.ecol +
+			       adapter->stats.latecol;
+	net_stats->tx_aborted_errors = adapter->stats.ecol;
+	net_stats->tx_window_errors = adapter->stats.latecol;
+	net_stats->tx_carrier_errors = adapter->stats.tncrs;
 
 	/* Tx Dropped needs to be maintained elsewhere */
 
@@ -3707,14 +4105,12 @@ void igb_update_stats(struct igb_adapter *adapter)
 
 static irqreturn_t igb_msix_other(int irq, void *data)
 {
-	struct net_device *netdev = data;
-	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct igb_adapter *adapter = data;
 	struct e1000_hw *hw = &adapter->hw;
 	u32 icr = rd32(E1000_ICR);
-
 	/* reading ICR causes bit 31 of EICR to be cleared */
 
-	if(icr & E1000_ICR_DOUTSYNC) {
+	if (icr & E1000_ICR_DOUTSYNC) {
 		/* HW is reporting DMA is out of sync */
 		adapter->stats.doosync++;
 	}
@@ -3730,125 +4126,90 @@ static irqreturn_t igb_msix_other(int irq, void *data)
 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
 	}
 
-	wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_VMMB);
+	if (adapter->vfs_allocated_count)
+		wr32(E1000_IMS, E1000_IMS_LSC |
+				E1000_IMS_VMMB |
+				E1000_IMS_DOUTSYNC);
+	else
+		wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
 	wr32(E1000_EIMS, adapter->eims_other);
 
 	return IRQ_HANDLED;
 }
 
-static irqreturn_t igb_msix_tx(int irq, void *data)
+static void igb_write_itr(struct igb_q_vector *q_vector)
 {
-	struct igb_ring *tx_ring = data;
-	struct igb_adapter *adapter = tx_ring->adapter;
-	struct e1000_hw *hw = &adapter->hw;
+	u32 itr_val = q_vector->itr_val & 0x7FFC;
 
-#ifdef CONFIG_IGB_DCA
-	if (adapter->flags & IGB_FLAG_DCA_ENABLED)
-		igb_update_tx_dca(tx_ring);
-#endif
+	if (!q_vector->set_itr)
+		return;
 
-	tx_ring->total_bytes = 0;
-	tx_ring->total_packets = 0;
+	if (!itr_val)
+		itr_val = 0x4;
 
-	/* auto mask will automatically reenable the interrupt when we write
-	 * EICS */
-	if (!igb_clean_tx_irq(tx_ring))
-		/* Ring was not completely cleaned, so fire another interrupt */
-		wr32(E1000_EICS, tx_ring->eims_value);
+	if (q_vector->itr_shift)
+		itr_val |= itr_val << q_vector->itr_shift;
 	else
-		wr32(E1000_EIMS, tx_ring->eims_value);
+		itr_val |= 0x8000000;
 
-	return IRQ_HANDLED;
-}
-
-static void igb_write_itr(struct igb_ring *ring)
-{
-	struct e1000_hw *hw = &ring->adapter->hw;
-	if ((ring->adapter->itr_setting & 3) && ring->set_itr) {
-		switch (hw->mac.type) {
-		case e1000_82576:
-			wr32(ring->itr_register, ring->itr_val |
-			     0x80000000);
-			break;
-		default:
-			wr32(ring->itr_register, ring->itr_val |
-			     (ring->itr_val << 16));
-			break;
-		}
-		ring->set_itr = 0;
-	}
+	writel(itr_val, q_vector->itr_register);
+	q_vector->set_itr = 0;
 }
 
-static irqreturn_t igb_msix_rx(int irq, void *data)
+static irqreturn_t igb_msix_ring(int irq, void *data)
 {
-	struct igb_ring *rx_ring = data;
-
-	/* Write the ITR value calculated at the end of the
-	 * previous interrupt.
-	 */
+	struct igb_q_vector *q_vector = data;
 
-	igb_write_itr(rx_ring);
+	/* Write the ITR value calculated from the previous interrupt. */
+	igb_write_itr(q_vector);
 
-	if (napi_schedule_prep(&rx_ring->napi))
-		__napi_schedule(&rx_ring->napi);
+	napi_schedule(&q_vector->napi);
 
-#ifdef CONFIG_IGB_DCA
-	if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
-		igb_update_rx_dca(rx_ring);
-#endif
-		return IRQ_HANDLED;
+	return IRQ_HANDLED;
 }
 
 #ifdef CONFIG_IGB_DCA
-static void igb_update_rx_dca(struct igb_ring *rx_ring)
+static void igb_update_dca(struct igb_q_vector *q_vector)
 {
-	u32 dca_rxctrl;
-	struct igb_adapter *adapter = rx_ring->adapter;
+	struct igb_adapter *adapter = q_vector->adapter;
 	struct e1000_hw *hw = &adapter->hw;
 	int cpu = get_cpu();
-	int q = rx_ring->reg_idx;
 
-	if (rx_ring->cpu != cpu) {
-		dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
-		if (hw->mac.type == e1000_82576) {
-			dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
-			dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
-			              E1000_DCA_RXCTRL_CPUID_SHIFT;
+	if (q_vector->cpu == cpu)
+		goto out_no_update;
+
+	if (q_vector->tx_ring) {
+		int q = q_vector->tx_ring->reg_idx;
+		u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
+		if (hw->mac.type == e1000_82575) {
+			dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
+			dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
 		} else {
+			dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
+			dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
+			              E1000_DCA_TXCTRL_CPUID_SHIFT;
+		}
+		dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
+		wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
+	}
+	if (q_vector->rx_ring) {
+		int q = q_vector->rx_ring->reg_idx;
+		u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
+		if (hw->mac.type == e1000_82575) {
 			dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
 			dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+		} else {
+			dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
+			dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
+			              E1000_DCA_RXCTRL_CPUID_SHIFT;
 		}
 		dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
 		dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
 		dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
 		wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
-		rx_ring->cpu = cpu;
-	}
-	put_cpu();
-}
-
-static void igb_update_tx_dca(struct igb_ring *tx_ring)
-{
-	u32 dca_txctrl;
-	struct igb_adapter *adapter = tx_ring->adapter;
-	struct e1000_hw *hw = &adapter->hw;
-	int cpu = get_cpu();
-	int q = tx_ring->reg_idx;
-
-	if (tx_ring->cpu != cpu) {
-		dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
-		if (hw->mac.type == e1000_82576) {
-			dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
-			dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
-			              E1000_DCA_TXCTRL_CPUID_SHIFT;
-		} else {
-			dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
-			dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
-		}
-		dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
-		wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
-		tx_ring->cpu = cpu;
 	}
+	q_vector->cpu = cpu;
+out_no_update:
 	put_cpu();
 }
 
@@ -3863,13 +4224,10 @@ static void igb_setup_dca(struct igb_adapter *adapter)
 	/* Always use CB2 mode, difference is masked in the CB driver. */
 	wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
 
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		adapter->tx_ring[i].cpu = -1;
-		igb_update_tx_dca(&adapter->tx_ring[i]);
-	}
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		adapter->rx_ring[i].cpu = -1;
-		igb_update_rx_dca(&adapter->rx_ring[i]);
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[i];
+		q_vector->cpu = -1;
+		igb_update_dca(q_vector);
 	}
 }
 
@@ -3877,6 +4235,7 @@ static int __igb_notify_dca(struct device *dev, void *data)
 {
 	struct net_device *netdev = dev_get_drvdata(dev);
 	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct pci_dev *pdev = adapter->pdev;
 	struct e1000_hw *hw = &adapter->hw;
 	unsigned long event = *(unsigned long *)data;
 
@@ -3885,12 +4244,9 @@ static int __igb_notify_dca(struct device *dev, void *data)
 		/* if already enabled, don't do it again */
 		if (adapter->flags & IGB_FLAG_DCA_ENABLED)
 			break;
-		/* Always use CB2 mode, difference is masked
-		 * in the CB driver. */
-		wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
 		if (dca_add_requester(dev) == 0) {
 			adapter->flags |= IGB_FLAG_DCA_ENABLED;
-			dev_info(&adapter->pdev->dev, "DCA enabled\n");
+			dev_info(&pdev->dev, "DCA enabled\n");
 			igb_setup_dca(adapter);
 			break;
 		}
@@ -3898,9 +4254,9 @@ static int __igb_notify_dca(struct device *dev, void *data)
 	case DCA_PROVIDER_REMOVE:
 		if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
 			/* without this a class_device is left
- 			 * hanging around in the sysfs model */
+			 * hanging around in the sysfs model */
 			dca_remove_requester(dev);
-			dev_info(&adapter->pdev->dev, "DCA disabled\n");
+			dev_info(&pdev->dev, "DCA disabled\n");
 			adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
 			wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
 		}
@@ -3930,12 +4286,51 @@ static void igb_ping_all_vfs(struct igb_adapter *adapter)
 
 	for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
 		ping = E1000_PF_CONTROL_MSG;
-		if (adapter->vf_data[i].clear_to_send)
+		if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
 			ping |= E1000_VT_MSGTYPE_CTS;
 		igb_write_mbx(hw, &ping, 1, i);
 	}
 }
 
+static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 vmolr = rd32(E1000_VMOLR(vf));
+	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
+
+	vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC |
+	                    IGB_VF_FLAG_MULTI_PROMISC);
+	vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
+
+	if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
+		vmolr |= E1000_VMOLR_MPME;
+		*msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
+	} else {
+		/*
+		 * if we have hashes and we are clearing a multicast promisc
+		 * flag we need to write the hashes to the MTA as this step
+		 * was previously skipped
+		 */
+		if (vf_data->num_vf_mc_hashes > 30) {
+			vmolr |= E1000_VMOLR_MPME;
+		} else if (vf_data->num_vf_mc_hashes) {
+			int j;
+			vmolr |= E1000_VMOLR_ROMPE;
+			for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
+				igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
+		}
+	}
+
+	wr32(E1000_VMOLR(vf), vmolr);
+
+	/* there are flags left unprocessed, likely not supported */
+	if (*msgbuf & E1000_VT_MSGINFO_MASK)
+		return -EINVAL;
+
+	return 0;
+
+}
+
 static int igb_set_vf_multicasts(struct igb_adapter *adapter,
 				  u32 *msgbuf, u32 vf)
 {
@@ -3944,18 +4339,17 @@ static int igb_set_vf_multicasts(struct igb_adapter *adapter,
 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
 	int i;
 
-	/* only up to 30 hash values supported */
-	if (n > 30)
-		n = 30;
-
-	/* salt away the number of multi cast addresses assigned
+	/* salt away the number of multicast addresses assigned
 	 * to this VF for later use to restore when the PF multi cast
 	 * list changes
 	 */
 	vf_data->num_vf_mc_hashes = n;
 
-	/* VFs are limited to using the MTA hash table for their multicast
-	 * addresses */
+	/* only up to 30 hash values supported */
+	if (n > 30)
+		n = 30;
+
+	/* store the hashes for later use */
 	for (i = 0; i < n; i++)
 		vf_data->vf_mc_hashes[i] = hash_list[i];
 
@@ -3972,9 +4366,20 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
 	int i, j;
 
 	for (i = 0; i < adapter->vfs_allocated_count; i++) {
+		u32 vmolr = rd32(E1000_VMOLR(i));
+		vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
+
 		vf_data = &adapter->vf_data[i];
-		for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
-			igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
+
+		if ((vf_data->num_vf_mc_hashes > 30) ||
+		    (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
+			vmolr |= E1000_VMOLR_MPME;
+		} else if (vf_data->num_vf_mc_hashes) {
+			vmolr |= E1000_VMOLR_ROMPE;
+			for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
+				igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
+		}
+		wr32(E1000_VMOLR(i), vmolr);
 	}
 }
 
@@ -4012,7 +4417,11 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
 	struct e1000_hw *hw = &adapter->hw;
 	u32 reg, i;
 
-	/* It is an error to call this function when VFs are not enabled */
+	/* The vlvf table only exists on 82576 hardware and newer */
+	if (hw->mac.type < e1000_82576)
+		return -1;
+
+	/* we only need to do this if VMDq is enabled */
 	if (!adapter->vfs_allocated_count)
 		return -1;
 
@@ -4042,16 +4451,12 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
 
 			/* if !enabled we need to set this up in vfta */
 			if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
-				/* add VID to filter table, if bit already set
-				 * PF must have added it outside of table */
-				if (igb_vfta_set(hw, vid, true))
-					reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT +
-						adapter->vfs_allocated_count);
+				/* add VID to filter table */
+				igb_vfta_set(hw, vid, true);
 				reg |= E1000_VLVF_VLANID_ENABLE;
 			}
 			reg &= ~E1000_VLVF_VLANID_MASK;
 			reg |= vid;
-
 			wr32(E1000_VLVF(i), reg);
 
 			/* do not modify RLPML for PF devices */
@@ -4067,8 +4472,8 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
 				reg |= size;
 				wr32(E1000_VMOLR(vf), reg);
 			}
-			adapter->vf_data[vf].vlans_enabled++;
 
+			adapter->vf_data[vf].vlans_enabled++;
 			return 0;
 		}
 	} else {
@@ -4110,15 +4515,14 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
 	return igb_vlvf_set(adapter, vid, add, vf);
 }
 
-static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
+static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
 {
-	struct e1000_hw *hw = &adapter->hw;
-
-	/* disable mailbox functionality for vf */
-	adapter->vf_data[vf].clear_to_send = false;
+	/* clear all flags */
+	adapter->vf_data[vf].flags = 0;
+	adapter->vf_data[vf].last_nack = jiffies;
 
 	/* reset offloads to defaults */
-	igb_set_vmolr(hw, vf);
+	igb_set_vmolr(adapter, vf);
 
 	/* reset vlans for device */
 	igb_clear_vf_vfta(adapter, vf);
@@ -4130,7 +4534,18 @@ static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
 	igb_set_rx_mode(adapter->netdev);
 }
 
-static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
+static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
+{
+	unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
+
+	/* generate a new mac address as we were hotplug removed/added */
+	random_ether_addr(vf_mac);
+
+	/* process remaining reset events */
+	igb_vf_reset(adapter, vf);
+}
+
+static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
 {
 	struct e1000_hw *hw = &adapter->hw;
 	unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
@@ -4139,11 +4554,10 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
 	u8 *addr = (u8 *)(&msgbuf[1]);
 
 	/* process all the same items cleared in a function level reset */
-	igb_vf_reset_event(adapter, vf);
+	igb_vf_reset(adapter, vf);
 
 	/* set vf mac address */
-	igb_rar_set(hw, vf_mac, rar_entry);
-	igb_set_rah_pool(hw, vf, rar_entry);
+	igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
 
 	/* enable transmit and receive for vf */
 	reg = rd32(E1000_VFTE);
@@ -4151,8 +4565,7 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
 	reg = rd32(E1000_VFRE);
 	wr32(E1000_VFRE, reg | (1 << vf));
 
-	/* enable mailbox functionality for vf */
-	adapter->vf_data[vf].clear_to_send = true;
+	adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS;
 
 	/* reply to reset with ack and vf mac address */
 	msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
@@ -4162,66 +4575,45 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
 
 static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
 {
-		unsigned char *addr = (char *)&msg[1];
-		int err = -1;
+	unsigned char *addr = (char *)&msg[1];
+	int err = -1;
 
-		if (is_valid_ether_addr(addr))
-			err = igb_set_vf_mac(adapter, vf, addr);
-
-		return err;
+	if (is_valid_ether_addr(addr))
+		err = igb_set_vf_mac(adapter, vf, addr);
 
+	return err;
 }
 
 static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
 {
 	struct e1000_hw *hw = &adapter->hw;
+	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
 	u32 msg = E1000_VT_MSGTYPE_NACK;
 
 	/* if device isn't clear to send it shouldn't be reading either */
-	if (!adapter->vf_data[vf].clear_to_send)
+	if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
+	    time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
 		igb_write_mbx(hw, &msg, 1, vf);
-}
-
-
-static void igb_msg_task(struct igb_adapter *adapter)
-{
-	struct e1000_hw *hw = &adapter->hw;
-	u32 vf;
-
-	for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
-		/* process any reset requests */
-		if (!igb_check_for_rst(hw, vf)) {
-			adapter->vf_data[vf].clear_to_send = false;
-			igb_vf_reset_event(adapter, vf);
-		}
-
-		/* process any messages pending */
-		if (!igb_check_for_msg(hw, vf))
-			igb_rcv_msg_from_vf(adapter, vf);
-
-		/* process any acks */
-		if (!igb_check_for_ack(hw, vf))
-			igb_rcv_ack_from_vf(adapter, vf);
-
+		vf_data->last_nack = jiffies;
 	}
 }
 
-static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
+static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
 {
-	u32 mbx_size = E1000_VFMAILBOX_SIZE;
-	u32 msgbuf[mbx_size];
+	struct pci_dev *pdev = adapter->pdev;
+	u32 msgbuf[E1000_VFMAILBOX_SIZE];
 	struct e1000_hw *hw = &adapter->hw;
+	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
 	s32 retval;
 
-	retval = igb_read_mbx(hw, msgbuf, mbx_size, vf);
+	retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
 
 	if (retval)
-		dev_err(&adapter->pdev->dev,
-		        "Error receiving message from VF\n");
+		dev_err(&pdev->dev, "Error receiving message from VF\n");
 
 	/* this is a message we already processed, do nothing */
 	if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
-		return retval;
+		return;
 
 	/*
 	 * until the vf completes a reset it should not be
@@ -4230,20 +4622,25 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
 
 	if (msgbuf[0] == E1000_VF_RESET) {
 		igb_vf_reset_msg(adapter, vf);
-
-		return retval;
+		return;
 	}
 
-	if (!adapter->vf_data[vf].clear_to_send) {
-		msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
-		igb_write_mbx(hw, msgbuf, 1, vf);
-		return retval;
+	if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
+		msgbuf[0] = E1000_VT_MSGTYPE_NACK;
+		if (time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
+			igb_write_mbx(hw, msgbuf, 1, vf);
+			vf_data->last_nack = jiffies;
+		}
+		return;
 	}
 
 	switch ((msgbuf[0] & 0xFFFF)) {
 	case E1000_VF_SET_MAC_ADDR:
 		retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
 		break;
+	case E1000_VF_SET_PROMISC:
+		retval = igb_set_vf_promisc(adapter, msgbuf, vf);
+		break;
 	case E1000_VF_SET_MULTICAST:
 		retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
 		break;
@@ -4254,7 +4651,7 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
 		retval = igb_set_vf_vlan(adapter, msgbuf, vf);
 		break;
 	default:
-		dev_err(&adapter->pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
+		dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
 		retval = -1;
 		break;
 	}
@@ -4268,8 +4665,53 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
 	msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
 
 	igb_write_mbx(hw, msgbuf, 1, vf);
+}
 
-	return retval;
+static void igb_msg_task(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 vf;
+
+	for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
+		/* process any reset requests */
+		if (!igb_check_for_rst(hw, vf))
+			igb_vf_reset_event(adapter, vf);
+
+		/* process any messages pending */
+		if (!igb_check_for_msg(hw, vf))
+			igb_rcv_msg_from_vf(adapter, vf);
+
+		/* process any acks */
+		if (!igb_check_for_ack(hw, vf))
+			igb_rcv_ack_from_vf(adapter, vf);
+	}
+}
+
+/**
+ *  igb_set_uta - Set unicast filter table address
+ *  @adapter: board private structure
+ *
+ *  The unicast table address is a register array of 32-bit registers.
+ *  The table is meant to be used in a way similar to how the MTA is used
+ *  however due to certain limitations in the hardware it is necessary to
+ *  set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
+ *  enable bit to allow vlan tag stripping when promiscous mode is enabled
+ **/
+static void igb_set_uta(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	int i;
+
+	/* The UTA table only exists on 82576 hardware and newer */
+	if (hw->mac.type < e1000_82576)
+		return;
+
+	/* we only need to do this if VMDq is enabled */
+	if (!adapter->vfs_allocated_count)
+		return;
+
+	for (i = 0; i < hw->mac.uta_reg_count; i++)
+		array_wr32(E1000_UTA, i, ~0);
 }
 
 /**
@@ -4279,15 +4721,15 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
  **/
 static irqreturn_t igb_intr_msi(int irq, void *data)
 {
-	struct net_device *netdev = data;
-	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct igb_adapter *adapter = data;
+	struct igb_q_vector *q_vector = adapter->q_vector[0];
 	struct e1000_hw *hw = &adapter->hw;
 	/* read ICR disables interrupts using IAM */
 	u32 icr = rd32(E1000_ICR);
 
-	igb_write_itr(adapter->rx_ring);
+	igb_write_itr(q_vector);
 
-	if(icr & E1000_ICR_DOUTSYNC) {
+	if (icr & E1000_ICR_DOUTSYNC) {
 		/* HW is reporting DMA is out of sync */
 		adapter->stats.doosync++;
 	}
@@ -4298,7 +4740,7 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
 	}
 
-	napi_schedule(&adapter->rx_ring[0].napi);
+	napi_schedule(&q_vector->napi);
 
 	return IRQ_HANDLED;
 }
@@ -4310,8 +4752,8 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
  **/
 static irqreturn_t igb_intr(int irq, void *data)
 {
-	struct net_device *netdev = data;
-	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct igb_adapter *adapter = data;
+	struct igb_q_vector *q_vector = adapter->q_vector[0];
 	struct e1000_hw *hw = &adapter->hw;
 	/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
 	 * need for the IMC write */
@@ -4319,14 +4761,14 @@ static irqreturn_t igb_intr(int irq, void *data)
 	if (!icr)
 		return IRQ_NONE;  /* Not our interrupt */
 
-	igb_write_itr(adapter->rx_ring);
+	igb_write_itr(q_vector);
 
 	/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
 	 * not set, then the adapter didn't send an interrupt */
 	if (!(icr & E1000_ICR_INT_ASSERTED))
 		return IRQ_NONE;
 
-	if(icr & E1000_ICR_DOUTSYNC) {
+	if (icr & E1000_ICR_DOUTSYNC) {
 		/* HW is reporting DMA is out of sync */
 		adapter->stats.doosync++;
 	}
@@ -4338,26 +4780,27 @@ static irqreturn_t igb_intr(int irq, void *data)
 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
 	}
 
-	napi_schedule(&adapter->rx_ring[0].napi);
+	napi_schedule(&q_vector->napi);
 
 	return IRQ_HANDLED;
 }
 
-static inline void igb_rx_irq_enable(struct igb_ring *rx_ring)
+static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
 {
-	struct igb_adapter *adapter = rx_ring->adapter;
+	struct igb_adapter *adapter = q_vector->adapter;
 	struct e1000_hw *hw = &adapter->hw;
 
-	if (adapter->itr_setting & 3) {
-		if (adapter->num_rx_queues == 1)
+	if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
+	    (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
+		if (!adapter->msix_entries)
 			igb_set_itr(adapter);
 		else
-			igb_update_ring_itr(rx_ring);
+			igb_update_ring_itr(q_vector);
 	}
 
 	if (!test_bit(__IGB_DOWN, &adapter->state)) {
 		if (adapter->msix_entries)
-			wr32(E1000_EIMS, rx_ring->eims_value);
+			wr32(E1000_EIMS, q_vector->eims_value);
 		else
 			igb_irq_enable(adapter);
 	}
@@ -4370,76 +4813,101 @@ static inline void igb_rx_irq_enable(struct igb_ring *rx_ring)
  **/
 static int igb_poll(struct napi_struct *napi, int budget)
 {
-	struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi);
-	int work_done = 0;
+	struct igb_q_vector *q_vector = container_of(napi,
+	                                             struct igb_q_vector,
+	                                             napi);
+	int tx_clean_complete = 1, work_done = 0;
 
 #ifdef CONFIG_IGB_DCA
-	if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
-		igb_update_rx_dca(rx_ring);
+	if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
+		igb_update_dca(q_vector);
 #endif
-	igb_clean_rx_irq_adv(rx_ring, &work_done, budget);
+	if (q_vector->tx_ring)
+		tx_clean_complete = igb_clean_tx_irq(q_vector);
 
-	if (rx_ring->buddy) {
-#ifdef CONFIG_IGB_DCA
-		if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
-			igb_update_tx_dca(rx_ring->buddy);
-#endif
-		if (!igb_clean_tx_irq(rx_ring->buddy))
-			work_done = budget;
-	}
+	if (q_vector->rx_ring)
+		igb_clean_rx_irq_adv(q_vector, &work_done, budget);
+
+	if (!tx_clean_complete)
+		work_done = budget;
 
 	/* If not enough Rx work done, exit the polling mode */
 	if (work_done < budget) {
 		napi_complete(napi);
-		igb_rx_irq_enable(rx_ring);
+		igb_ring_irq_enable(q_vector);
 	}
 
 	return work_done;
 }
 
 /**
- * igb_hwtstamp - utility function which checks for TX time stamp
+ * igb_systim_to_hwtstamp - convert system time value to hw timestamp
  * @adapter: board private structure
+ * @shhwtstamps: timestamp structure to update
+ * @regval: unsigned 64bit system time value.
+ *
+ * We need to convert the system time value stored in the RX/TXSTMP registers
+ * into a hwtstamp which can be used by the upper level timestamping functions
+ */
+static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
+                                   struct skb_shared_hwtstamps *shhwtstamps,
+                                   u64 regval)
+{
+	u64 ns;
+
+	/*
+	 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
+	 * 24 to match clock shift we setup earlier.
+	 */
+	if (adapter->hw.mac.type == e1000_82580)
+		regval <<= IGB_82580_TSYNC_SHIFT;
+
+	ns = timecounter_cyc2time(&adapter->clock, regval);
+	timecompare_update(&adapter->compare, ns);
+	memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+	shhwtstamps->hwtstamp = ns_to_ktime(ns);
+	shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
+}
+
+/**
+ * igb_tx_hwtstamp - utility function which checks for TX time stamp
+ * @q_vector: pointer to q_vector containing needed info
  * @skb: packet that was just sent
  *
  * If we were asked to do hardware stamping and such a time stamp is
  * available, then it must have been for this skb here because we only
  * allow only one such packet into the queue.
  */
-static void igb_tx_hwtstamp(struct igb_adapter *adapter, struct sk_buff *skb)
+static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
 {
+	struct igb_adapter *adapter = q_vector->adapter;
 	union skb_shared_tx *shtx = skb_tx(skb);
 	struct e1000_hw *hw = &adapter->hw;
+	struct skb_shared_hwtstamps shhwtstamps;
+	u64 regval;
 
-	if (unlikely(shtx->hardware)) {
-		u32 valid = rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID;
-		if (valid) {
-			u64 regval = rd32(E1000_TXSTMPL);
-			u64 ns;
-			struct skb_shared_hwtstamps shhwtstamps;
-
-			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
-			regval |= (u64)rd32(E1000_TXSTMPH) << 32;
-			ns = timecounter_cyc2time(&adapter->clock,
-						  regval);
-			timecompare_update(&adapter->compare, ns);
-			shhwtstamps.hwtstamp = ns_to_ktime(ns);
-			shhwtstamps.syststamp =
-				timecompare_transform(&adapter->compare, ns);
-			skb_tstamp_tx(skb, &shhwtstamps);
-		}
-	}
+	/* if skb does not support hw timestamp or TX stamp not valid exit */
+	if (likely(!shtx->hardware) ||
+	    !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
+		return;
+
+	regval = rd32(E1000_TXSTMPL);
+	regval |= (u64)rd32(E1000_TXSTMPH) << 32;
+
+	igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+	skb_tstamp_tx(skb, &shhwtstamps);
 }
 
 /**
  * igb_clean_tx_irq - Reclaim resources after transmit completes
- * @adapter: board private structure
+ * @q_vector: pointer to q_vector containing needed info
  * returns true if ring is completely cleaned
  **/
-static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
+static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
 {
-	struct igb_adapter *adapter = tx_ring->adapter;
-	struct net_device *netdev = adapter->netdev;
+	struct igb_adapter *adapter = q_vector->adapter;
+	struct igb_ring *tx_ring = q_vector->tx_ring;
+	struct net_device *netdev = tx_ring->netdev;
 	struct e1000_hw *hw = &adapter->hw;
 	struct igb_buffer *buffer_info;
 	struct sk_buff *skb;
@@ -4470,10 +4938,10 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
 				total_packets += segs;
 				total_bytes += bytecount;
 
-				igb_tx_hwtstamp(adapter, skb);
+				igb_tx_hwtstamp(q_vector, skb);
 			}
 
-			igb_unmap_and_free_tx_resource(adapter, buffer_info);
+			igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
 			tx_desc->wb.status = 0;
 
 			i++;
@@ -4496,7 +4964,7 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
 		if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
 		    !(test_bit(__IGB_DOWN, &adapter->state))) {
 			netif_wake_subqueue(netdev, tx_ring->queue_index);
-			++adapter->restart_queue;
+			tx_ring->tx_stats.restart_queue++;
 		}
 	}
 
@@ -4506,12 +4974,11 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
 		tx_ring->detect_tx_hung = false;
 		if (tx_ring->buffer_info[i].time_stamp &&
 		    time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
-			       (adapter->tx_timeout_factor * HZ))
-		    && !(rd32(E1000_STATUS) &
-			 E1000_STATUS_TXOFF)) {
+			       (adapter->tx_timeout_factor * HZ)) &&
+		    !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
 
 			/* detected Tx unit hang */
-			dev_err(&adapter->pdev->dev,
+			dev_err(&tx_ring->pdev->dev,
 				"Detected Tx Unit Hang\n"
 				"  Tx Queue             <%d>\n"
 				"  TDH                  <%x>\n"
@@ -4524,11 +4991,11 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
 				"  jiffies              <%lx>\n"
 				"  desc.status          <%x>\n",
 				tx_ring->queue_index,
-				readl(adapter->hw.hw_addr + tx_ring->head),
-				readl(adapter->hw.hw_addr + tx_ring->tail),
+				readl(tx_ring->head),
+				readl(tx_ring->tail),
 				tx_ring->next_to_use,
 				tx_ring->next_to_clean,
-				tx_ring->buffer_info[i].time_stamp,
+				tx_ring->buffer_info[eop].time_stamp,
 				eop,
 				jiffies,
 				eop_desc->wb.status);
@@ -4539,43 +5006,38 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
 	tx_ring->total_packets += total_packets;
 	tx_ring->tx_stats.bytes += total_bytes;
 	tx_ring->tx_stats.packets += total_packets;
-	adapter->net_stats.tx_bytes += total_bytes;
-	adapter->net_stats.tx_packets += total_packets;
 	return (count < tx_ring->count);
 }
 
 /**
  * igb_receive_skb - helper function to handle rx indications
- * @ring: pointer to receive ring receving this packet
- * @status: descriptor status field as written by hardware
- * @rx_desc: receive descriptor containing vlan and type information.
- * @skb: pointer to sk_buff to be indicated to stack
+ * @q_vector: structure containing interrupt and ring information
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
  **/
-static void igb_receive_skb(struct igb_ring *ring, u8 status,
-                            union e1000_adv_rx_desc * rx_desc,
-                            struct sk_buff *skb)
-{
-	struct igb_adapter * adapter = ring->adapter;
-	bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP));
-
-	skb_record_rx_queue(skb, ring->queue_index);
-	if (vlan_extracted)
-		vlan_gro_receive(&ring->napi, adapter->vlgrp,
-		                 le16_to_cpu(rx_desc->wb.upper.vlan),
-		                 skb);
+static void igb_receive_skb(struct igb_q_vector *q_vector,
+                            struct sk_buff *skb,
+                            u16 vlan_tag)
+{
+	struct igb_adapter *adapter = q_vector->adapter;
+
+	if (vlan_tag)
+		vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
+		                 vlan_tag, skb);
 	else
-		napi_gro_receive(&ring->napi, skb);
+		napi_gro_receive(&q_vector->napi, skb);
 }
 
-static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
+static inline void igb_rx_checksum_adv(struct igb_ring *ring,
 				       u32 status_err, struct sk_buff *skb)
 {
 	skb->ip_summed = CHECKSUM_NONE;
 
 	/* Ignore Checksum bit is set or checksum is disabled through ethtool */
-	if ((status_err & E1000_RXD_STAT_IXSM) ||
-	    (adapter->flags & IGB_FLAG_RX_CSUM_DISABLED))
+	if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
+	     (status_err & E1000_RXD_STAT_IXSM))
 		return;
+
 	/* TCP/UDP checksum error bit is set */
 	if (status_err &
 	    (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
@@ -4584,9 +5046,10 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
 		 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
 		 * packets, (aka let the stack check the crc32c)
 		 */
-		if (!((adapter->hw.mac.type == e1000_82576) &&
-		      (skb->len == 60)))
-			adapter->hw_csum_err++;
+		if ((skb->len == 60) &&
+		    (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM))
+			ring->rx_stats.csum_err++;
+
 		/* let the stack verify checksum errors */
 		return;
 	}
@@ -4594,11 +5057,38 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
 	if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 
-	dev_dbg(&adapter->pdev->dev, "cksum success: bits %08X\n", status_err);
-	adapter->hw_csum_good++;
+	dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
 }
 
-static inline u16 igb_get_hlen(struct igb_adapter *adapter,
+static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
+                                   struct sk_buff *skb)
+{
+	struct igb_adapter *adapter = q_vector->adapter;
+	struct e1000_hw *hw = &adapter->hw;
+	u64 regval;
+
+	/*
+	 * If this bit is set, then the RX registers contain the time stamp. No
+	 * other packet will be time stamped until we read these registers, so
+	 * read the registers to make them available again. Because only one
+	 * packet can be time stamped at a time, we know that the register
+	 * values must belong to this one here and therefore we don't need to
+	 * compare any of the additional attributes stored for it.
+	 *
+	 * If nothing went wrong, then it should have a skb_shared_tx that we
+	 * can turn into a skb_shared_hwtstamps.
+	 */
+	if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
+		return;
+	if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
+		return;
+
+	regval = rd32(E1000_RXSTMPL);
+	regval |= (u64)rd32(E1000_RXSTMPH) << 32;
+
+	igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+}
+static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
                                union e1000_adv_rx_desc *rx_desc)
 {
 	/* HW will not DMA in data larger than the given buffer, even if it
@@ -4607,27 +5097,28 @@ static inline u16 igb_get_hlen(struct igb_adapter *adapter,
 	 */
 	u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
 	           E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
-	if (hlen > adapter->rx_ps_hdr_size)
-		hlen = adapter->rx_ps_hdr_size;
+	if (hlen > rx_ring->rx_buffer_len)
+		hlen = rx_ring->rx_buffer_len;
 	return hlen;
 }
 
-static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
-				 int *work_done, int budget)
+static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
+                                 int *work_done, int budget)
 {
-	struct igb_adapter *adapter = rx_ring->adapter;
-	struct net_device *netdev = adapter->netdev;
-	struct e1000_hw *hw = &adapter->hw;
-	struct pci_dev *pdev = adapter->pdev;
+	struct igb_ring *rx_ring = q_vector->rx_ring;
+	struct net_device *netdev = rx_ring->netdev;
+	struct pci_dev *pdev = rx_ring->pdev;
 	union e1000_adv_rx_desc *rx_desc , *next_rxd;
 	struct igb_buffer *buffer_info , *next_buffer;
 	struct sk_buff *skb;
 	bool cleaned = false;
 	int cleaned_count = 0;
+	int current_node = numa_node_id();
 	unsigned int total_bytes = 0, total_packets = 0;
 	unsigned int i;
 	u32 staterr;
 	u16 length;
+	u16 vlan_tag;
 
 	i = rx_ring->next_to_clean;
 	buffer_info = &rx_ring->buffer_info[i];
@@ -4646,6 +5137,7 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
 		i++;
 		if (i == rx_ring->count)
 			i = 0;
+
 		next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
 		prefetch(next_rxd);
 		next_buffer = &rx_ring->buffer_info[i];
@@ -4654,23 +5146,16 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
 		cleaned = true;
 		cleaned_count++;
 
-		/* this is the fast path for the non-packet split case */
-		if (!adapter->rx_ps_hdr_size) {
-			pci_unmap_single(pdev, buffer_info->dma,
-					 adapter->rx_buffer_len,
-					 PCI_DMA_FROMDEVICE);
-			buffer_info->dma = 0;
-			skb_put(skb, length);
-			goto send_up;
-		}
-
 		if (buffer_info->dma) {
-			u16 hlen = igb_get_hlen(adapter, rx_desc);
 			pci_unmap_single(pdev, buffer_info->dma,
-					 adapter->rx_ps_hdr_size,
+					 rx_ring->rx_buffer_len,
 					 PCI_DMA_FROMDEVICE);
 			buffer_info->dma = 0;
-			skb_put(skb, hlen);
+			if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
+				skb_put(skb, length);
+				goto send_up;
+			}
+			skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
 		}
 
 		if (length) {
@@ -4683,15 +5168,14 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
 						buffer_info->page_offset,
 						length);
 
-			if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
-			    (page_count(buffer_info->page) != 1))
+			if ((page_count(buffer_info->page) != 1) ||
+			    (page_to_nid(buffer_info->page) != current_node))
 				buffer_info->page = NULL;
 			else
 				get_page(buffer_info->page);
 
 			skb->len += length;
 			skb->data_len += length;
-
 			skb->truesize += length;
 		}
 
@@ -4703,60 +5187,24 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
 			goto next_desc;
 		}
 send_up:
-		/*
-		 * If this bit is set, then the RX registers contain
-		 * the time stamp. No other packet will be time
-		 * stamped until we read these registers, so read the
-		 * registers to make them available again. Because
-		 * only one packet can be time stamped at a time, we
-		 * know that the register values must belong to this
-		 * one here and therefore we don't need to compare
-		 * any of the additional attributes stored for it.
-		 *
-		 * If nothing went wrong, then it should have a
-		 * skb_shared_tx that we can turn into a
-		 * skb_shared_hwtstamps.
-		 *
-		 * TODO: can time stamping be triggered (thus locking
-		 * the registers) without the packet reaching this point
-		 * here? In that case RX time stamping would get stuck.
-		 *
-		 * TODO: in "time stamp all packets" mode this bit is
-		 * not set. Need a global flag for this mode and then
-		 * always read the registers. Cannot be done without
-		 * a race condition.
-		 */
-		if (unlikely(staterr & E1000_RXD_STAT_TS)) {
-			u64 regval;
-			u64 ns;
-			struct skb_shared_hwtstamps *shhwtstamps =
-				skb_hwtstamps(skb);
-
-			WARN(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID),
-			     "igb: no RX time stamp available for time stamped packet");
-			regval = rd32(E1000_RXSTMPL);
-			regval |= (u64)rd32(E1000_RXSTMPH) << 32;
-			ns = timecounter_cyc2time(&adapter->clock, regval);
-			timecompare_update(&adapter->compare, ns);
-			memset(shhwtstamps, 0, sizeof(*shhwtstamps));
-			shhwtstamps->hwtstamp = ns_to_ktime(ns);
-			shhwtstamps->syststamp =
-				timecompare_transform(&adapter->compare, ns);
-		}
-
 		if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
 			dev_kfree_skb_irq(skb);
 			goto next_desc;
 		}
 
+		igb_rx_hwtstamp(q_vector, staterr, skb);
 		total_bytes += skb->len;
 		total_packets++;
 
-		igb_rx_checksum_adv(adapter, staterr, skb);
+		igb_rx_checksum_adv(rx_ring, staterr, skb);
 
 		skb->protocol = eth_type_trans(skb, netdev);
+		skb_record_rx_queue(skb, rx_ring->queue_index);
 
-		igb_receive_skb(rx_ring, staterr, rx_desc, skb);
+		vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
+		            le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
+
+		igb_receive_skb(q_vector, skb, vlan_tag);
 
 next_desc:
 		rx_desc->wb.upper.status_error = 0;
@@ -4783,8 +5231,6 @@ next_desc:
 	rx_ring->total_bytes += total_bytes;
 	rx_ring->rx_stats.packets += total_packets;
 	rx_ring->rx_stats.bytes += total_bytes;
-	adapter->net_stats.rx_bytes += total_bytes;
-	adapter->net_stats.rx_packets += total_packets;
 	return cleaned;
 }
 
@@ -4792,12 +5238,9 @@ next_desc:
  * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
  * @adapter: address of board private structure
  **/
-static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
-				     int cleaned_count)
+void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
 {
-	struct igb_adapter *adapter = rx_ring->adapter;
-	struct net_device *netdev = adapter->netdev;
-	struct pci_dev *pdev = adapter->pdev;
+	struct net_device *netdev = rx_ring->netdev;
 	union e1000_adv_rx_desc *rx_desc;
 	struct igb_buffer *buffer_info;
 	struct sk_buff *skb;
@@ -4807,19 +5250,16 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
 	i = rx_ring->next_to_use;
 	buffer_info = &rx_ring->buffer_info[i];
 
-	if (adapter->rx_ps_hdr_size)
-		bufsz = adapter->rx_ps_hdr_size;
-	else
-		bufsz = adapter->rx_buffer_len;
+	bufsz = rx_ring->rx_buffer_len;
 
 	while (cleaned_count--) {
 		rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
 
-		if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
+		if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
 			if (!buffer_info->page) {
-				buffer_info->page = alloc_page(GFP_ATOMIC);
+				buffer_info->page = netdev_alloc_page(netdev);
 				if (!buffer_info->page) {
-					adapter->alloc_rx_buff_failed++;
+					rx_ring->rx_stats.alloc_failed++;
 					goto no_buffers;
 				}
 				buffer_info->page_offset = 0;
@@ -4827,39 +5267,48 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
 				buffer_info->page_offset ^= PAGE_SIZE / 2;
 			}
 			buffer_info->page_dma =
-				pci_map_page(pdev, buffer_info->page,
+				pci_map_page(rx_ring->pdev, buffer_info->page,
 					     buffer_info->page_offset,
 					     PAGE_SIZE / 2,
 					     PCI_DMA_FROMDEVICE);
+			if (pci_dma_mapping_error(rx_ring->pdev,
+			                          buffer_info->page_dma)) {
+				buffer_info->page_dma = 0;
+				rx_ring->rx_stats.alloc_failed++;
+				goto no_buffers;
+			}
 		}
 
-		if (!buffer_info->skb) {
-			skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN);
+		skb = buffer_info->skb;
+		if (!skb) {
+			skb = netdev_alloc_skb_ip_align(netdev, bufsz);
 			if (!skb) {
-				adapter->alloc_rx_buff_failed++;
+				rx_ring->rx_stats.alloc_failed++;
 				goto no_buffers;
 			}
 
-			/* Make buffer alignment 2 beyond a 16 byte boundary
-			 * this will result in a 16 byte aligned IP header after
-			 * the 14 byte MAC header is removed
-			 */
-			skb_reserve(skb, NET_IP_ALIGN);
-
 			buffer_info->skb = skb;
-			buffer_info->dma = pci_map_single(pdev, skb->data,
+		}
+		if (!buffer_info->dma) {
+			buffer_info->dma = pci_map_single(rx_ring->pdev,
+			                                  skb->data,
 							  bufsz,
 							  PCI_DMA_FROMDEVICE);
+			if (pci_dma_mapping_error(rx_ring->pdev,
+			                          buffer_info->dma)) {
+				buffer_info->dma = 0;
+				rx_ring->rx_stats.alloc_failed++;
+				goto no_buffers;
+			}
 		}
 		/* Refresh the desc even if buffer_addrs didn't change because
 		 * each write-back erases this info. */
-		if (adapter->rx_ps_hdr_size) {
+		if (bufsz < IGB_RXBUFFER_1024) {
 			rx_desc->read.pkt_addr =
 			     cpu_to_le64(buffer_info->page_dma);
 			rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
 		} else {
-			rx_desc->read.pkt_addr =
-			     cpu_to_le64(buffer_info->dma);
+			rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
 			rx_desc->read.hdr_addr = 0;
 		}
 
@@ -4882,7 +5331,7 @@ no_buffers:
 		 * applicable for weak-ordered memory model archs,
 		 * such as IA-64). */
 		wmb();
-		writel(i, adapter->hw.hw_addr + rx_ring->tail);
+		writel(i, rx_ring->tail);
 	}
 }
 
@@ -4941,13 +5390,11 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
 	struct hwtstamp_config config;
-	u32 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
-	u32 tsync_rx_ctl_bit = E1000_TSYNCRXCTL_ENABLED;
-	u32 tsync_rx_ctl_type = 0;
+	u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
+	u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
 	u32 tsync_rx_cfg = 0;
-	int is_l4 = 0;
-	int is_l2 = 0;
-	short port = 319; /* PTP */
+	bool is_l4 = false;
+	bool is_l2 = false;
 	u32 regval;
 
 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
@@ -4959,10 +5406,8 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
 
 	switch (config.tx_type) {
 	case HWTSTAMP_TX_OFF:
-		tsync_tx_ctl_bit = 0;
-		break;
+		tsync_tx_ctl = 0;
 	case HWTSTAMP_TX_ON:
-		tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
 		break;
 	default:
 		return -ERANGE;
@@ -4970,7 +5415,7 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
 
 	switch (config.rx_filter) {
 	case HWTSTAMP_FILTER_NONE:
-		tsync_rx_ctl_bit = 0;
+		tsync_rx_ctl = 0;
 		break;
 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -4981,86 +5426,97 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
 		 * possible to time stamp both Sync and Delay_Req messages
 		 * => fall back to time stamping all packets
 		 */
-		tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_ALL;
+		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
 		config.rx_filter = HWTSTAMP_FILTER_ALL;
 		break;
 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
-		tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
+		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
 		tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
-		is_l4 = 1;
+		is_l4 = true;
 		break;
 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
-		tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
+		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
 		tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
-		is_l4 = 1;
+		is_l4 = true;
 		break;
 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
-		tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
 		tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
-		is_l2 = 1;
-		is_l4 = 1;
+		is_l2 = true;
+		is_l4 = true;
 		config.rx_filter = HWTSTAMP_FILTER_SOME;
 		break;
 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
-		tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
 		tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
-		is_l2 = 1;
-		is_l4 = 1;
+		is_l2 = true;
+		is_l4 = true;
 		config.rx_filter = HWTSTAMP_FILTER_SOME;
 		break;
 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
-		tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_EVENT_V2;
+		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
 		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
-		is_l2 = 1;
+		is_l2 = true;
 		break;
 	default:
 		return -ERANGE;
 	}
 
+	if (hw->mac.type == e1000_82575) {
+		if (tsync_rx_ctl | tsync_tx_ctl)
+			return -EINVAL;
+		return 0;
+	}
+
 	/* enable/disable TX */
 	regval = rd32(E1000_TSYNCTXCTL);
-	regval = (regval & ~E1000_TSYNCTXCTL_ENABLED) | tsync_tx_ctl_bit;
+	regval &= ~E1000_TSYNCTXCTL_ENABLED;
+	regval |= tsync_tx_ctl;
 	wr32(E1000_TSYNCTXCTL, regval);
 
-	/* enable/disable RX, define which PTP packets are time stamped */
+	/* enable/disable RX */
 	regval = rd32(E1000_TSYNCRXCTL);
-	regval = (regval & ~E1000_TSYNCRXCTL_ENABLED) | tsync_rx_ctl_bit;
-	regval = (regval & ~0xE) | tsync_rx_ctl_type;
+	regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
+	regval |= tsync_rx_ctl;
 	wr32(E1000_TSYNCRXCTL, regval);
-	wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
 
-	/*
-	 * Ethertype Filter Queue Filter[0][15:0] = 0x88F7
-	 *                                          (Ethertype to filter on)
-	 * Ethertype Filter Queue Filter[0][26] = 0x1 (Enable filter)
-	 * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable Timestamping)
-	 */
-	wr32(E1000_ETQF0, is_l2 ? 0x440088f7 : 0);
-
-	/* L4 Queue Filter[0]: only filter by source and destination port */
-	wr32(E1000_SPQF0, htons(port));
-	wr32(E1000_IMIREXT(0), is_l4 ?
-	     ((1<<12) | (1<<19) /* bypass size and control flags */) : 0);
-	wr32(E1000_IMIR(0), is_l4 ?
-	     (htons(port)
-	      | (0<<16) /* immediate interrupt disabled */
-	      | 0 /* (1<<17) bit cleared: do not bypass
-		     destination port check */)
-		: 0);
-	wr32(E1000_FTQF0, is_l4 ?
-	     (0x11 /* UDP */
-	      | (1<<15) /* VF not compared */
-	      | (1<<27) /* Enable Timestamping */
-	      | (7<<28) /* only source port filter enabled,
-			   source/target address and protocol
-			   masked */)
-	     : ((1<<15) | (15<<28) /* all mask bits set = filter not
-				      enabled */));
+	/* define which PTP packets are time stamped */
+	wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
 
+	/* define ethertype filter for timestamped packets */
+	if (is_l2)
+		wr32(E1000_ETQF(3),
+		                (E1000_ETQF_FILTER_ENABLE | /* enable filter */
+		                 E1000_ETQF_1588 | /* enable timestamping */
+		                 ETH_P_1588));     /* 1588 eth protocol type */
+	else
+		wr32(E1000_ETQF(3), 0);
+
+#define PTP_PORT 319
+	/* L4 Queue Filter[3]: filter by destination port and protocol */
+	if (is_l4) {
+		u32 ftqf = (IPPROTO_UDP /* UDP */
+			| E1000_FTQF_VF_BP /* VF not compared */
+			| E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
+			| E1000_FTQF_MASK); /* mask all inputs */
+		ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
+
+		wr32(E1000_IMIR(3), htons(PTP_PORT));
+		wr32(E1000_IMIREXT(3),
+		     (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
+		if (hw->mac.type == e1000_82576) {
+			/* enable source port check */
+			wr32(E1000_SPQF(3), htons(PTP_PORT));
+			ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
+		}
+		wr32(E1000_FTQF(3), ftqf);
+	} else {
+		wr32(E1000_FTQF(3), E1000_FTQF_MASK);
+	}
 	wrfl();
 
 	adapter->hwtstamp_config = config;
@@ -5137,21 +5593,15 @@ static void igb_vlan_rx_register(struct net_device *netdev,
 		ctrl |= E1000_CTRL_VME;
 		wr32(E1000_CTRL, ctrl);
 
-		/* enable VLAN receive filtering */
+		/* Disable CFI check */
 		rctl = rd32(E1000_RCTL);
 		rctl &= ~E1000_RCTL_CFIEN;
 		wr32(E1000_RCTL, rctl);
-		igb_update_mng_vlan(adapter);
 	} else {
 		/* disable VLAN tag insert/strip */
 		ctrl = rd32(E1000_CTRL);
 		ctrl &= ~E1000_CTRL_VME;
 		wr32(E1000_CTRL, ctrl);
-
-		if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) {
-			igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
-			adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
-		}
 	}
 
 	igb_rlpml_set(adapter);
@@ -5166,16 +5616,11 @@ static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 	struct e1000_hw *hw = &adapter->hw;
 	int pf_id = adapter->vfs_allocated_count;
 
-	if ((hw->mng_cookie.status &
-	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
-	    (vid == adapter->mng_vlan_id))
-		return;
-
-	/* add vid to vlvf if sr-iov is enabled,
-	 * if that fails add directly to filter table */
-	if (igb_vlvf_set(adapter, vid, true, pf_id))
-		igb_vfta_set(hw, vid, true);
+	/* attempt to add filter to vlvf array */
+	igb_vlvf_set(adapter, vid, true, pf_id);
 
+	/* add the filter since PF can receive vlans w/o entry in vlvf */
+	igb_vfta_set(hw, vid, true);
 }
 
 static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
@@ -5183,6 +5628,7 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
 	int pf_id = adapter->vfs_allocated_count;
+	s32 err;
 
 	igb_irq_disable(adapter);
 	vlan_group_set_device(adapter->vlgrp, vid, NULL);
@@ -5190,17 +5636,11 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 	if (!test_bit(__IGB_DOWN, &adapter->state))
 		igb_irq_enable(adapter);
 
-	if ((adapter->hw.mng_cookie.status &
-	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
-	    (vid == adapter->mng_vlan_id)) {
-		/* release control to f/w */
-		igb_release_hw_control(adapter);
-		return;
-	}
+	/* remove vlan from VLVF table array */
+	err = igb_vlvf_set(adapter, vid, false, pf_id);
 
-	/* remove vid from vlvf if sr-iov is enabled,
-	 * if not in vlvf remove from vfta */
-	if (igb_vlvf_set(adapter, vid, false, pf_id))
+	/* if vid was not present in VLVF just remove it from table */
+	if (err)
 		igb_vfta_set(hw, vid, false);
 }
 
@@ -5220,6 +5660,7 @@ static void igb_restore_vlan(struct igb_adapter *adapter)
 
 int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
 {
+	struct pci_dev *pdev = adapter->pdev;
 	struct e1000_mac_info *mac = &adapter->hw.mac;
 
 	mac->autoneg = 0;
@@ -5243,8 +5684,7 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
 		break;
 	case SPEED_1000 + DUPLEX_HALF: /* not supported */
 	default:
-		dev_err(&adapter->pdev->dev,
-			"Unsupported Speed/Duplex configuration\n");
+		dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
 		return -EINVAL;
 	}
 	return 0;
@@ -5266,9 +5706,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
 	if (netif_running(netdev))
 		igb_close(netdev);
 
-	igb_reset_interrupt_capability(adapter);
-
-	igb_free_queues(adapter);
+	igb_clear_interrupt_scheme(adapter);
 
 #ifdef CONFIG_PM
 	retval = pci_save_state(pdev);
@@ -5300,7 +5738,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
 		wr32(E1000_CTRL, ctrl);
 
 		/* Allow time for pending master requests to run */
-		igb_disable_pcie_master(&adapter->hw);
+		igb_disable_pcie_master(hw);
 
 		wr32(E1000_WUC, E1000_WUC_PME_EN);
 		wr32(E1000_WUFC, wufc);
@@ -5363,9 +5801,7 @@ static int igb_resume(struct pci_dev *pdev)
 	pci_enable_wake(pdev, PCI_D3hot, 0);
 	pci_enable_wake(pdev, PCI_D3cold, 0);
 
-	igb_set_interrupt_capability(adapter);
-
-	if (igb_alloc_queues(adapter)) {
+	if (igb_init_interrupt_scheme(adapter)) {
 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
 		return -ENOMEM;
 	}
@@ -5417,22 +5853,16 @@ static void igb_netpoll(struct net_device *netdev)
 	int i;
 
 	if (!adapter->msix_entries) {
+		struct igb_q_vector *q_vector = adapter->q_vector[0];
 		igb_irq_disable(adapter);
-		napi_schedule(&adapter->rx_ring[0].napi);
+		napi_schedule(&q_vector->napi);
 		return;
 	}
 
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		struct igb_ring *tx_ring = &adapter->tx_ring[i];
-		wr32(E1000_EIMC, tx_ring->eims_value);
-		igb_clean_tx_irq(tx_ring);
-		wr32(E1000_EIMS, tx_ring->eims_value);
-	}
-
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		struct igb_ring *rx_ring = &adapter->rx_ring[i];
-		wr32(E1000_EIMC, rx_ring->eims_value);
-		napi_schedule(&rx_ring->napi);
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[i];
+		wr32(E1000_EIMC, q_vector->eims_value);
+		napi_schedule(&q_vector->napi);
 	}
 }
 #endif /* CONFIG_NET_POLL_CONTROLLER */
@@ -5532,6 +5962,33 @@ static void igb_io_resume(struct pci_dev *pdev)
 	igb_get_hw_control(adapter);
 }
 
+static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
+                             u8 qsel)
+{
+	u32 rar_low, rar_high;
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* HW expects these in little endian so we reverse the byte order
+	 * from network order (big endian) to little endian
+	 */
+	rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
+	          ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+	/* Indicate to hardware the Address is Valid. */
+	rar_high |= E1000_RAH_AV;
+
+	if (hw->mac.type == e1000_82575)
+		rar_high |= E1000_RAH_POOL_1 * qsel;
+	else
+		rar_high |= E1000_RAH_POOL_1 << qsel;
+
+	wr32(E1000_RAL(index), rar_low);
+	wrfl();
+	wr32(E1000_RAH(index), rar_high);
+	wrfl();
+}
+
 static int igb_set_vf_mac(struct igb_adapter *adapter,
                           int vf, unsigned char *mac_addr)
 {
@@ -5542,8 +5999,7 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
 
 	memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
 
-	igb_rar_set(hw, mac_addr, rar_entry);
-	igb_set_rah_pool(hw, vf, rar_entry);
+	igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
 
 	return 0;
 }
@@ -5551,19 +6007,29 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
 static void igb_vmm_control(struct igb_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
-	u32 reg_data;
+	u32 reg;
 
-	if (!adapter->vfs_allocated_count)
+	/* replication is not supported for 82575 */
+	if (hw->mac.type == e1000_82575)
 		return;
 
-	/* VF's need PF reset indication before they
-	 * can send/receive mail */
-	reg_data = rd32(E1000_CTRL_EXT);
-	reg_data |= E1000_CTRL_EXT_PFRSTD;
-	wr32(E1000_CTRL_EXT, reg_data);
+	/* enable replication vlan tag stripping */
+	reg = rd32(E1000_RPLOLR);
+	reg |= E1000_RPLOLR_STRVLAN;
+	wr32(E1000_RPLOLR, reg);
 
-	igb_vmdq_set_loopback_pf(hw, true);
-	igb_vmdq_set_replication_pf(hw, true);
+	/* notify HW that the MAC is adding vlan tags */
+	reg = rd32(E1000_DTXCTL);
+	reg |= E1000_DTXCTL_VLAN_ADDED;
+	wr32(E1000_DTXCTL, reg);
+
+	if (adapter->vfs_allocated_count) {
+		igb_vmdq_set_loopback_pf(hw, true);
+		igb_vmdq_set_replication_pf(hw, true);
+	} else {
+		igb_vmdq_set_loopback_pf(hw, false);
+		igb_vmdq_set_replication_pf(hw, false);
+	}
 }
 
 /* igb_main.c */
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index c68265bd0d1a..8afff07ff559 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -367,16 +367,6 @@ static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data)
 	return *data;
 }
 
-static int igbvf_get_self_test_count(struct net_device *netdev)
-{
-	return IGBVF_TEST_LEN;
-}
-
-static int igbvf_get_stats_count(struct net_device *netdev)
-{
-	return IGBVF_GLOBAL_STATS_LEN;
-}
-
 static void igbvf_diag_test(struct net_device *netdev,
                             struct ethtool_test *eth_test, u64 *data)
 {
@@ -484,6 +474,18 @@ static void igbvf_get_ethtool_stats(struct net_device *netdev,
 
 }
 
+static int igbvf_get_sset_count(struct net_device *dev, int stringset)
+{
+	switch(stringset) {
+	case ETH_SS_TEST:
+		return IGBVF_TEST_LEN;
+	case ETH_SS_STATS:
+		return IGBVF_GLOBAL_STATS_LEN;
+	default:
+		return -EINVAL;
+	}
+}
+
 static void igbvf_get_strings(struct net_device *netdev, u32 stringset,
                               u8 *data)
 {
@@ -532,11 +534,10 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
 	.get_tso		= ethtool_op_get_tso,
 	.set_tso		= igbvf_set_tso,
 	.self_test		= igbvf_diag_test,
+	.get_sset_count		= igbvf_get_sset_count,
 	.get_strings		= igbvf_get_strings,
 	.phys_id		= igbvf_phys_id,
 	.get_ethtool_stats	= igbvf_get_ethtool_stats,
-	.self_test_count	= igbvf_get_self_test_count,
-	.get_stats_count	= igbvf_get_stats_count,
 	.get_coalesce		= igbvf_get_coalesce,
 	.set_coalesce		= igbvf_set_coalesce,
 };
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
index 8e9b67ebbf8b..3d1ee7a8478e 100644
--- a/drivers/net/igbvf/igbvf.h
+++ b/drivers/net/igbvf/igbvf.h
@@ -117,6 +117,7 @@ struct igbvf_buffer {
 			unsigned long time_stamp;
 			u16 length;
 			u16 next_to_watch;
+			u16 mapped_as_page;
 		};
 		/* Rx */
 		struct {
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 91024a3cdad3..a127620dc653 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -170,18 +170,12 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
 		}
 
 		if (!buffer_info->skb) {
-			skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN);
+			skb = netdev_alloc_skb_ip_align(netdev, bufsz);
 			if (!skb) {
 				adapter->alloc_rx_buff_failed++;
 				goto no_buffers;
 			}
 
-			/* Make buffer alignment 2 beyond a 16 byte boundary
-			 * this will result in a 16 byte aligned IP header after
-			 * the 14 byte MAC header is removed
-			 */
-			skb_reserve(skb, NET_IP_ALIGN);
-
 			buffer_info->skb = skb;
 			buffer_info->dma = pci_map_single(pdev, skb->data,
 			                                  bufsz,
@@ -372,10 +366,20 @@ next_desc:
 static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
                             struct igbvf_buffer *buffer_info)
 {
-	buffer_info->dma = 0;
+	if (buffer_info->dma) {
+		if (buffer_info->mapped_as_page)
+			pci_unmap_page(adapter->pdev,
+				       buffer_info->dma,
+				       buffer_info->length,
+				       PCI_DMA_TODEVICE);
+		else
+			pci_unmap_single(adapter->pdev,
+					 buffer_info->dma,
+					 buffer_info->length,
+					 PCI_DMA_TODEVICE);
+		buffer_info->dma = 0;
+	}
 	if (buffer_info->skb) {
-		skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
-		              DMA_TO_DEVICE);
 		dev_kfree_skb_any(buffer_info->skb);
 		buffer_info->skb = NULL;
 	}
@@ -823,8 +827,8 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
 		adapter->detect_tx_hung = false;
 		if (tx_ring->buffer_info[i].time_stamp &&
 		    time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
-		               (adapter->tx_timeout_factor * HZ))
-		    && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
+		               (adapter->tx_timeout_factor * HZ)) &&
+		    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
 
 			tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
 			/* detected Tx unit hang */
@@ -1049,7 +1053,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
 	}
 
 	err = request_irq(adapter->msix_entries[vector].vector,
-	                  &igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
+	                  igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
 	                  netdev);
 	if (err)
 		goto out;
@@ -1059,7 +1063,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
 	vector++;
 
 	err = request_irq(adapter->msix_entries[vector].vector,
-	                  &igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
+	                  igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
 	                  netdev);
 	if (err)
 		goto out;
@@ -1069,7 +1073,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
 	vector++;
 
 	err = request_irq(adapter->msix_entries[vector].vector,
-	                  &igbvf_msix_other, 0, netdev->name, netdev);
+	                  igbvf_msix_other, 0, netdev->name, netdev);
 	if (err)
 		goto out;
 
@@ -2094,27 +2098,24 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
                                    unsigned int first)
 {
 	struct igbvf_buffer *buffer_info;
+	struct pci_dev *pdev = adapter->pdev;
 	unsigned int len = skb_headlen(skb);
 	unsigned int count = 0, i;
 	unsigned int f;
-	dma_addr_t *map;
 
 	i = tx_ring->next_to_use;
 
-	if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
-		dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
-		return 0;
-	}
-
-	map = skb_shinfo(skb)->dma_maps;
-
 	buffer_info = &tx_ring->buffer_info[i];
 	BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
 	buffer_info->length = len;
 	/* set time_stamp *before* dma to help avoid a possible race */
 	buffer_info->time_stamp = jiffies;
 	buffer_info->next_to_watch = i;
-	buffer_info->dma = skb_shinfo(skb)->dma_head;
+	buffer_info->dma = pci_map_single(pdev, skb->data, len,
+					  PCI_DMA_TODEVICE);
+	if (pci_dma_mapping_error(pdev, buffer_info->dma))
+		goto dma_error;
+
 
 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
 		struct skb_frag_struct *frag;
@@ -2131,14 +2132,44 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
 		buffer_info->length = len;
 		buffer_info->time_stamp = jiffies;
 		buffer_info->next_to_watch = i;
-		buffer_info->dma = map[count];
+		buffer_info->mapped_as_page = true;
+		buffer_info->dma = pci_map_page(pdev,
+						frag->page,
+						frag->page_offset,
+						len,
+						PCI_DMA_TODEVICE);
+		if (pci_dma_mapping_error(pdev, buffer_info->dma))
+			goto dma_error;
 		count++;
 	}
 
 	tx_ring->buffer_info[i].skb = skb;
 	tx_ring->buffer_info[first].next_to_watch = i;
 
-	return count + 1;
+	return ++count;
+
+dma_error:
+	dev_err(&pdev->dev, "TX DMA map failed\n");
+
+	/* clear timestamp and dma mappings for failed buffer_info mapping */
+	buffer_info->dma = 0;
+	buffer_info->time_stamp = 0;
+	buffer_info->length = 0;
+	buffer_info->next_to_watch = 0;
+	buffer_info->mapped_as_page = false;
+	count--;
+
+	/* clear timestamp and dma mappings for remaining portion of packet */
+	while (count >= 0) {
+		count--;
+		i--;
+		if (i < 0)
+			i += tx_ring->count;
+		buffer_info = &tx_ring->buffer_info[i];
+		igbvf_put_txbuf(adapter, buffer_info);
+	}
+
+	return 0;
 }
 
 static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 9f7b5d4172b8..ba8d246d05a0 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -738,17 +738,12 @@ static int ipg_get_rxbuff(struct net_device *dev, int entry)
 
 	IPG_DEBUG_MSG("_get_rxbuff\n");
 
-	skb = netdev_alloc_skb(dev, sp->rxsupport_size + NET_IP_ALIGN);
+	skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size);
 	if (!skb) {
 		sp->rx_buff[entry] = NULL;
 		return -ENOMEM;
 	}
 
-	/* Adjust the data start location within the buffer to
-	 * align IP address field to a 16 byte boundary.
-	 */
-	skb_reserve(skb, NET_IP_ALIGN);
-
 	/* Associate the receive buffer with the IPG NIC. */
 	skb->dev = dev;
 
@@ -1756,7 +1751,7 @@ static int ipg_nic_open(struct net_device *dev)
 	/* Register the interrupt line to be used by the IPG within
 	 * the Linux system.
 	 */
-	rc = request_irq(pdev->irq, &ipg_interrupt_handler, IRQF_SHARED,
+	rc = request_irq(pdev->irq, ipg_interrupt_handler, IRQF_SHARED,
 			 dev->name, dev);
 	if (rc < 0) {
 		printk(KERN_INFO "%s: Error when requesting interrupt.\n",
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index eb424681202d..9b2eebdbb25b 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -353,13 +353,13 @@ static int au1k_irda_start(struct net_device *dev)
 		return retval;
 	}
 
-	if ((retval = request_irq(AU1000_IRDA_TX_INT, &au1k_irda_interrupt, 
+	if ((retval = request_irq(AU1000_IRDA_TX_INT, au1k_irda_interrupt, 
 					0, dev->name, dev))) {
 		printk(KERN_ERR "%s: unable to get IRQ %d\n", 
 				dev->name, dev->irq);
 		return retval;
 	}
-	if ((retval = request_irq(AU1000_IRDA_RX_INT, &au1k_irda_interrupt, 
+	if ((retval = request_irq(AU1000_IRDA_RX_INT, au1k_irda_interrupt, 
 					0, dev->name, dev))) {
 		free_irq(AU1000_IRDA_TX_INT, dev);
 		printk(KERN_ERR "%s: unable to get IRQ %d\n", 
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 215adf6377d0..e8e33bb9d876 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -852,7 +852,7 @@ static void irda_usb_receive(struct urb *urb)
 		 * hot unplug of the dongle...
 		 * Lowest effective timer is 10ms...
 		 * Jean II */
-		self->rx_defer_timer.function = &irda_usb_rx_defer_expired;
+		self->rx_defer_timer.function = irda_usb_rx_defer_expired;
 		self->rx_defer_timer.data = (unsigned long) urb;
 		mod_timer(&self->rx_defer_timer, jiffies + (10 * HZ / 1000));
 		return;
@@ -1124,11 +1124,11 @@ static int stir421x_patch_device(struct irda_usb_cb *self)
                  * The actual image starts after the "STMP" keyword
                  * so forward to the firmware header tag
                  */
-                for (i = 0; (fw->data[i] != STIR421X_PATCH_END_OF_HDR_TAG)
-			     && (i < fw->size); i++) ;
+                for (i = 0; (fw->data[i] != STIR421X_PATCH_END_OF_HDR_TAG) &&
+			     (i < fw->size); i++) ;
                 /* here we check for the out of buffer case */
-                if ((STIR421X_PATCH_END_OF_HDR_TAG == fw->data[i])
-                    && (i < STIR421X_PATCH_CODE_OFFSET)) {
+                if ((STIR421X_PATCH_END_OF_HDR_TAG == fw->data[i]) &&
+                    (i < STIR421X_PATCH_CODE_OFFSET)) {
                         if (!memcmp(fw->data + i + 1, STIR421X_PATCH_STMP_TAG,
                                     sizeof(STIR421X_PATCH_STMP_TAG) - 1)) {
 
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index 528767dec9d7..e5698fa30a4f 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -612,16 +612,16 @@ static int fifo_txwait(struct stir_cb *stir, int space)
 		pr_debug("fifo status 0x%lx count %lu\n", status, count);
 
 		/* is fifo receiving already, or empty */
-		if (!(status & FIFOCTL_DIR)
-		    || (status & FIFOCTL_EMPTY))
+		if (!(status & FIFOCTL_DIR) ||
+		    (status & FIFOCTL_EMPTY))
 			return 0;
 
 		if (signal_pending(current))
 			return -EINTR;
 
 		/* shutting down? */
-		if (!netif_running(stir->netdev)
-		    || !netif_device_present(stir->netdev))
+		if (!netif_running(stir->netdev) ||
+		    !netif_device_present(stir->netdev))
 			return -ESHUTDOWN;
 
 		/* only waiting for some space */
@@ -776,8 +776,8 @@ static int stir_transmit_thread(void *arg)
 		}
 
 		/* nothing to send? start receiving */
-		if (!stir->receiving 
-		    && irda_device_txqueue_empty(dev)) {
+		if (!stir->receiving &&
+		    irda_device_txqueue_empty(dev)) {
 			/* Wait otherwise chip gets confused. */
 			if (fifo_txwait(stir, -1))
 				break;
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index a5ca71cec028..fddb4efd5453 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -1185,8 +1185,8 @@ F01_E */
 		 * if frame size,data ptr,or skb ptr are wrong ,the get next
 		 * entry.
 		 */
-		if ((skb == NULL) || (skb->data == NULL)
-		    || (self->rx_buff.data == NULL) || (len < 6)) {
+		if ((skb == NULL) || (skb->data == NULL) ||
+		    (self->rx_buff.data == NULL) || (len < 6)) {
 			self->netdev->stats.rx_dropped++;
 			return TRUE;
 		}
@@ -1284,8 +1284,8 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
 		self->RetryCount++;
 
 	if ((self->RetryCount >= 1) ||
-	    ((st_fifo->pending_bytes + 2048) > self->rx_buff.truesize)
-	    || (st_fifo->len >= (MAX_RX_WINDOW))) {
+	    ((st_fifo->pending_bytes + 2048) > self->rx_buff.truesize) ||
+	    (st_fifo->len >= (MAX_RX_WINDOW))) {
 		while (st_fifo->len > 0) {	//upload frame
 			// Put this entry back in fifo 
 			if (st_fifo->head > MAX_RX_WINDOW)
@@ -1300,8 +1300,8 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
 			 * if frame size, data ptr, or skb ptr are wrong,
 			 * then get next entry.
 			 */
-			if ((skb == NULL) || (skb->data == NULL)
-			    || (self->rx_buff.data == NULL) || (len < 6)) {
+			if ((skb == NULL) || (skb->data == NULL) ||
+			    (self->rx_buff.data == NULL) || (len < 6)) {
 				self->netdev->stats.rx_dropped++;
 				continue;
 			}
@@ -1332,8 +1332,8 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
 		 * if frame is receive complete at this routine ,then upload
 		 * frame.
 		 */
-		if ((GetRXStatus(iobase) & 0x10)
-		    && (RxCurCount(iobase, self) != self->RxLastCount)) {
+		if ((GetRXStatus(iobase) & 0x10) &&
+		    (RxCurCount(iobase, self) != self->RxLastCount)) {
 			upload_rxdata(self, iobase);
 			if (irda_device_txqueue_empty(self->netdev))
 				via_ircc_dma_receive(self);
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 7cfb8b6593c6..bd3c6b5ee76a 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -431,8 +431,8 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
 		memset(rd, 0, sizeof(*rd));
 		rd->hw = hwmap + i;
 		rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA);
-		if (rd->buf == NULL
-		    ||  !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) {
+		if (rd->buf == NULL ||
+		    !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) {
 			if (rd->buf) {
 				IRDA_ERROR("%s: failed to create PCI-MAP for %p",
 					   __func__, rd->buf);
@@ -955,8 +955,8 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
 		}
 		for(;;) {
 			do_gettimeofday(&now);
-			if (now.tv_sec > ready.tv_sec
-			    ||  (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))
+			if (now.tv_sec > ready.tv_sec ||
+			    (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))
 			    	break;
 			udelay(100);
 			/* must not sleep here - called under netif_tx_lock! */
@@ -1594,8 +1594,8 @@ static int vlsi_irda_init(struct net_device *ndev)
 	 * see include file for details why we need these 2 masks, in this order!
 	 */
 
-	if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW)
-	    || pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) {
+	if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW) ||
+	    pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) {
 		IRDA_ERROR("%s: aborting due to PCI BM-DMA address limitations\n", __func__);
 		return -1;
 	}
@@ -1641,8 +1641,8 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	IRDA_MESSAGE("%s: IrDA PCI controller %s detected\n",
 		     drivername, pci_name(pdev));
 
-	if ( !pci_resource_start(pdev,0)
-	     || !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) {
+	if ( !pci_resource_start(pdev,0) ||
+	     !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) {
 		IRDA_ERROR("%s: bar 0 invalid", __func__);
 		goto out_disable;
 	}
diff --git a/drivers/net/isa-skeleton.c b/drivers/net/isa-skeleton.c
index 9706e64e367b..04d0502726c0 100644
--- a/drivers/net/isa-skeleton.c
+++ b/drivers/net/isa-skeleton.c
@@ -214,9 +214,9 @@ static int __init netcard_probe1(struct net_device *dev, int ioaddr)
 	 * contains the manufacturer's unique code. That might be a good probe
 	 * method. Ideally you would add additional checks.
 	 */
-	if (inb(ioaddr + 0) != SA_ADDR0
-		||	 inb(ioaddr + 1) != SA_ADDR1
-		||	 inb(ioaddr + 2) != SA_ADDR2)
+	if (inb(ioaddr + 0) != SA_ADDR0 ||
+	    inb(ioaddr + 1) != SA_ADDR1 ||
+	    inb(ioaddr + 2) != SA_ADDR2)
 		goto out;
 
 	if (net_debug  &&  version_printed++ == 0)
@@ -260,7 +260,7 @@ static int __init netcard_probe1(struct net_device *dev, int ioaddr)
 		dev->irq = 9;
 
 	{
-		int irqval = request_irq(dev->irq, &net_interrupt, 0, cardname, dev);
+		int irqval = request_irq(dev->irq, net_interrupt, 0, cardname, dev);
 		if (irqval) {
 			printk("%s: unable to get IRQ %d (irqval=%d).\n",
 				   dev->name, dev->irq, irqval);
@@ -378,7 +378,7 @@ net_open(struct net_device *dev)
 	 * This is used if the interrupt line can turned off (shared).
 	 * See 3c503.c for an example of selecting the IRQ at config-time.
 	 */
-	if (request_irq(dev->irq, &net_interrupt, 0, cardname, dev)) {
+	if (request_irq(dev->irq, net_interrupt, 0, cardname, dev)) {
 		return -EAGAIN;
 	}
 	/*
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index aa7286bc4364..49997194bdd0 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -604,10 +604,10 @@ static int veth_process_caps(struct veth_lpar_connection *cnx)
 	/* Convert timer to jiffies */
 	cnx->ack_timeout = remote_caps->ack_timeout * HZ / 1000000;
 
-	if ( (remote_caps->num_buffers == 0)
-	     || (remote_caps->ack_threshold > VETH_MAX_ACKS_PER_MSG)
-	     || (remote_caps->ack_threshold == 0)
-	     || (cnx->ack_timeout == 0) ) {
+	if ( (remote_caps->num_buffers == 0) ||
+	     (remote_caps->ack_threshold > VETH_MAX_ACKS_PER_MSG) ||
+	     (remote_caps->ack_threshold == 0) ||
+	     (cnx->ack_timeout == 0) ) {
 		veth_error("Received incompatible capabilities from LPAR %d.\n",
 				cnx->remote_lp);
 		return HvLpEvent_Rc_InvalidSubtypeData;
@@ -714,8 +714,8 @@ static void veth_statemachine(struct work_struct *work)
 		cnx->state |= VETH_STATE_OPEN;
 	}
 
-	if ( (cnx->state & VETH_STATE_OPEN)
-	     && !(cnx->state & VETH_STATE_SENTMON) ) {
+	if ( (cnx->state & VETH_STATE_OPEN) &&
+	     !(cnx->state & VETH_STATE_SENTMON) ) {
 		rc = veth_signalevent(cnx, VETH_EVENT_MONITOR,
 				      HvLpEvent_AckInd_DoAck,
 				      HvLpEvent_AckType_DeferredAck,
@@ -724,8 +724,8 @@ static void veth_statemachine(struct work_struct *work)
 		if (rc == HvLpEvent_Rc_Good) {
 			cnx->state |= VETH_STATE_SENTMON;
 		} else {
-			if ( (rc != HvLpEvent_Rc_PartitionDead)
-			     && (rc != HvLpEvent_Rc_PathClosed) )
+			if ( (rc != HvLpEvent_Rc_PartitionDead) &&
+			     (rc != HvLpEvent_Rc_PathClosed) )
 				veth_error("Error sending monitor to LPAR %d, "
 						"rc = %d\n", rlp, rc);
 
@@ -735,8 +735,8 @@ static void veth_statemachine(struct work_struct *work)
 		}
 	}
 
-	if ( (cnx->state & VETH_STATE_OPEN)
-	     && !(cnx->state & VETH_STATE_SENTCAPS)) {
+	if ( (cnx->state & VETH_STATE_OPEN) &&
+	     !(cnx->state & VETH_STATE_SENTCAPS)) {
 		u64 *rawcap = (u64 *)&cnx->local_caps;
 
 		rc = veth_signalevent(cnx, VETH_EVENT_CAP,
@@ -748,8 +748,8 @@ static void veth_statemachine(struct work_struct *work)
 		if (rc == HvLpEvent_Rc_Good) {
 			cnx->state |= VETH_STATE_SENTCAPS;
 		} else {
-			if ( (rc != HvLpEvent_Rc_PartitionDead)
-			     && (rc != HvLpEvent_Rc_PathClosed) )
+			if ( (rc != HvLpEvent_Rc_PartitionDead) &&
+			     (rc != HvLpEvent_Rc_PathClosed) )
 				veth_error("Error sending caps to LPAR %d, "
 						"rc = %d\n", rlp, rc);
 
@@ -759,8 +759,8 @@ static void veth_statemachine(struct work_struct *work)
 		}
 	}
 
-	if ((cnx->state & VETH_STATE_GOTCAPS)
-	    && !(cnx->state & VETH_STATE_SENTCAPACK)) {
+	if ((cnx->state & VETH_STATE_GOTCAPS) &&
+	    !(cnx->state & VETH_STATE_SENTCAPACK)) {
 		struct veth_cap_data *remote_caps = &cnx->remote_caps;
 
 		memcpy(remote_caps, &cnx->cap_event.u.caps_data,
@@ -783,9 +783,9 @@ static void veth_statemachine(struct work_struct *work)
 			goto cant_cope;
 	}
 
-	if ((cnx->state & VETH_STATE_GOTCAPACK)
-	    && (cnx->state & VETH_STATE_GOTCAPS)
-	    && !(cnx->state & VETH_STATE_READY)) {
+	if ((cnx->state & VETH_STATE_GOTCAPACK) &&
+	    (cnx->state & VETH_STATE_GOTCAPS) &&
+	    !(cnx->state & VETH_STATE_READY)) {
 		if (cnx->cap_ack_event.base_event.xRc == HvLpEvent_Rc_Good) {
 			/* Start the ACK timer */
 			cnx->ack_timer.expires = jiffies + cnx->ack_timeout;
@@ -818,8 +818,8 @@ static int veth_init_connection(u8 rlp)
 	struct veth_msg *msgs;
 	int i;
 
-	if ( (rlp == this_lp)
-	     || ! HvLpConfig_doLpsCommunicateOnVirtualLan(this_lp, rlp) )
+	if ( (rlp == this_lp) ||
+	     ! HvLpConfig_doLpsCommunicateOnVirtualLan(this_lp, rlp) )
 		return 0;
 
 	cnx = kzalloc(sizeof(*cnx), GFP_KERNEL);
@@ -1538,8 +1538,8 @@ static void veth_receive(struct veth_lpar_connection *cnx,
 	cnx->pending_acks[cnx->num_pending_acks++] =
 		event->base_event.xCorrelationToken;
 
-	if ( (cnx->num_pending_acks >= cnx->remote_caps.ack_threshold)
-	     || (cnx->num_pending_acks >= VETH_MAX_ACKS_PER_MSG) )
+	if ( (cnx->num_pending_acks >= cnx->remote_caps.ack_threshold) ||
+	     (cnx->num_pending_acks >= VETH_MAX_ACKS_PER_MSG) )
 		veth_flush_acks(cnx);
 
 	spin_unlock_irqrestore(&cnx->lock, flags);
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index d85717e3022a..5257ae08b9f9 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -117,6 +117,7 @@ struct ixgb_buffer {
 	unsigned long time_stamp;
 	u16 length;
 	u16 next_to_watch;
+	u16 mapped_as_page;
 };
 
 struct ixgb_desc_ring {
@@ -183,7 +184,6 @@ struct ixgb_adapter {
 	struct napi_struct napi;
 	struct net_device *netdev;
 	struct pci_dev *pdev;
-	struct net_device_stats net_stats;
 
 	/* structs defined in ixgb_hw.h */
 	struct ixgb_hw hw;
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index 288ee1d0f431..a4ed96caae69 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -34,38 +34,46 @@
 
 #define IXGB_ALL_RAR_ENTRIES 16
 
+enum {NETDEV_STATS, IXGB_STATS};
+
 struct ixgb_stats {
 	char stat_string[ETH_GSTRING_LEN];
+	int type;
 	int sizeof_stat;
 	int stat_offset;
 };
 
-#define IXGB_STAT(m) FIELD_SIZEOF(struct ixgb_adapter, m), \
-		      offsetof(struct ixgb_adapter, m)
+#define IXGB_STAT(m)		IXGB_STATS, \
+				FIELD_SIZEOF(struct ixgb_adapter, m), \
+				offsetof(struct ixgb_adapter, m)
+#define IXGB_NETDEV_STAT(m)	NETDEV_STATS, \
+				FIELD_SIZEOF(struct net_device, m), \
+				offsetof(struct net_device, m)
+
 static struct ixgb_stats ixgb_gstrings_stats[] = {
-	{"rx_packets", IXGB_STAT(net_stats.rx_packets)},
-	{"tx_packets", IXGB_STAT(net_stats.tx_packets)},
-	{"rx_bytes", IXGB_STAT(net_stats.rx_bytes)},
-	{"tx_bytes", IXGB_STAT(net_stats.tx_bytes)},
-	{"rx_errors", IXGB_STAT(net_stats.rx_errors)},
-	{"tx_errors", IXGB_STAT(net_stats.tx_errors)},
-	{"rx_dropped", IXGB_STAT(net_stats.rx_dropped)},
-	{"tx_dropped", IXGB_STAT(net_stats.tx_dropped)},
-	{"multicast", IXGB_STAT(net_stats.multicast)},
-	{"collisions", IXGB_STAT(net_stats.collisions)},
-
-/*	{ "rx_length_errors", IXGB_STAT(net_stats.rx_length_errors) },	*/
-	{"rx_over_errors", IXGB_STAT(net_stats.rx_over_errors)},
-	{"rx_crc_errors", IXGB_STAT(net_stats.rx_crc_errors)},
-	{"rx_frame_errors", IXGB_STAT(net_stats.rx_frame_errors)},
+	{"rx_packets", IXGB_NETDEV_STAT(stats.rx_packets)},
+	{"tx_packets", IXGB_NETDEV_STAT(stats.tx_packets)},
+	{"rx_bytes", IXGB_NETDEV_STAT(stats.rx_bytes)},
+	{"tx_bytes", IXGB_NETDEV_STAT(stats.tx_bytes)},
+	{"rx_errors", IXGB_NETDEV_STAT(stats.rx_errors)},
+	{"tx_errors", IXGB_NETDEV_STAT(stats.tx_errors)},
+	{"rx_dropped", IXGB_NETDEV_STAT(stats.rx_dropped)},
+	{"tx_dropped", IXGB_NETDEV_STAT(stats.tx_dropped)},
+	{"multicast", IXGB_NETDEV_STAT(stats.multicast)},
+	{"collisions", IXGB_NETDEV_STAT(stats.collisions)},
+
+/*	{ "rx_length_errors", IXGB_NETDEV_STAT(stats.rx_length_errors) },	*/
+	{"rx_over_errors", IXGB_NETDEV_STAT(stats.rx_over_errors)},
+	{"rx_crc_errors", IXGB_NETDEV_STAT(stats.rx_crc_errors)},
+	{"rx_frame_errors", IXGB_NETDEV_STAT(stats.rx_frame_errors)},
 	{"rx_no_buffer_count", IXGB_STAT(stats.rnbc)},
-	{"rx_fifo_errors", IXGB_STAT(net_stats.rx_fifo_errors)},
-	{"rx_missed_errors", IXGB_STAT(net_stats.rx_missed_errors)},
-	{"tx_aborted_errors", IXGB_STAT(net_stats.tx_aborted_errors)},
-	{"tx_carrier_errors", IXGB_STAT(net_stats.tx_carrier_errors)},
-	{"tx_fifo_errors", IXGB_STAT(net_stats.tx_fifo_errors)},
-	{"tx_heartbeat_errors", IXGB_STAT(net_stats.tx_heartbeat_errors)},
-	{"tx_window_errors", IXGB_STAT(net_stats.tx_window_errors)},
+	{"rx_fifo_errors", IXGB_NETDEV_STAT(stats.rx_fifo_errors)},
+	{"rx_missed_errors", IXGB_NETDEV_STAT(stats.rx_missed_errors)},
+	{"tx_aborted_errors", IXGB_NETDEV_STAT(stats.tx_aborted_errors)},
+	{"tx_carrier_errors", IXGB_NETDEV_STAT(stats.tx_carrier_errors)},
+	{"tx_fifo_errors", IXGB_NETDEV_STAT(stats.tx_fifo_errors)},
+	{"tx_heartbeat_errors", IXGB_NETDEV_STAT(stats.tx_heartbeat_errors)},
+	{"tx_window_errors", IXGB_NETDEV_STAT(stats.tx_window_errors)},
 	{"tx_deferred_ok", IXGB_STAT(stats.dc)},
 	{"tx_timeout_count", IXGB_STAT(tx_timeout_count) },
 	{"tx_restart_queue", IXGB_STAT(restart_queue) },
@@ -662,10 +670,21 @@ ixgb_get_ethtool_stats(struct net_device *netdev,
 {
 	struct ixgb_adapter *adapter = netdev_priv(netdev);
 	int i;
+	char *p = NULL;
 
 	ixgb_update_stats(adapter);
 	for (i = 0; i < IXGB_STATS_LEN; i++) {
-		char *p = (char *)adapter+ixgb_gstrings_stats[i].stat_offset;
+		switch (ixgb_gstrings_stats[i].type) {
+		case NETDEV_STATS:
+			p = (char *) netdev +
+					ixgb_gstrings_stats[i].stat_offset;
+			break;
+		case IXGB_STATS:
+			p = (char *) adapter +
+					ixgb_gstrings_stats[i].stat_offset;
+			break;
+		}
+
 		data[i] = (ixgb_gstrings_stats[i].sizeof_stat ==
 			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
 	}
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 8aa44dca57eb..bcd0f01d5feb 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -233,7 +233,7 @@ ixgb_up(struct ixgb_adapter *adapter)
 		/* proceed to try to request regular interrupt */
 	}
 
-	err = request_irq(adapter->pdev->irq, &ixgb_intr, irq_flags,
+	err = request_irq(adapter->pdev->irq, ixgb_intr, irq_flags,
 	                  netdev->name, netdev);
 	if (err) {
 		if (adapter->have_msi)
@@ -892,10 +892,18 @@ static void
 ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
                                 struct ixgb_buffer *buffer_info)
 {
-	buffer_info->dma = 0;
+	if (buffer_info->dma) {
+		if (buffer_info->mapped_as_page)
+			pci_unmap_page(adapter->pdev, buffer_info->dma,
+				       buffer_info->length, PCI_DMA_TODEVICE);
+		else
+			pci_unmap_single(adapter->pdev, buffer_info->dma,
+					 buffer_info->length,
+					 PCI_DMA_TODEVICE);
+		buffer_info->dma = 0;
+	}
+
 	if (buffer_info->skb) {
-		skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
-		              DMA_TO_DEVICE);
 		dev_kfree_skb_any(buffer_info->skb);
 		buffer_info->skb = NULL;
 	}
@@ -1272,24 +1280,16 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
 	    unsigned int first)
 {
 	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
+	struct pci_dev *pdev = adapter->pdev;
 	struct ixgb_buffer *buffer_info;
 	int len = skb_headlen(skb);
 	unsigned int offset = 0, size, count = 0, i;
 	unsigned int mss = skb_shinfo(skb)->gso_size;
-
 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
 	unsigned int f;
-	dma_addr_t *map;
 
 	i = tx_ring->next_to_use;
 
-	if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
-		dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
-		return 0;
-	}
-
-	map = skb_shinfo(skb)->dma_maps;
-
 	while (len) {
 		buffer_info = &tx_ring->buffer_info[i];
 		size = min(len, IXGB_MAX_DATA_PER_TXD);
@@ -1301,11 +1301,11 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
 		buffer_info->length = size;
 		WARN_ON(buffer_info->dma != 0);
 		buffer_info->time_stamp = jiffies;
-		buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
-			pci_map_single(adapter->pdev,
-				skb->data + offset,
-				size,
-				PCI_DMA_TODEVICE);
+		buffer_info->mapped_as_page = false;
+		buffer_info->dma = pci_map_single(pdev, skb->data + offset,
+						  size, PCI_DMA_TODEVICE);
+		if (pci_dma_mapping_error(pdev, buffer_info->dma))
+			goto dma_error;
 		buffer_info->next_to_watch = 0;
 
 		len -= size;
@@ -1323,7 +1323,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
 
 		frag = &skb_shinfo(skb)->frags[f];
 		len = frag->size;
-		offset = 0;
+		offset = frag->page_offset;
 
 		while (len) {
 			i++;
@@ -1341,7 +1341,13 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
 
 			buffer_info->length = size;
 			buffer_info->time_stamp = jiffies;
-			buffer_info->dma = map[f] + offset;
+			buffer_info->mapped_as_page = true;
+			buffer_info->dma =
+				pci_map_page(pdev, frag->page,
+					     offset, size,
+					     PCI_DMA_TODEVICE);
+			if (pci_dma_mapping_error(pdev, buffer_info->dma))
+				goto dma_error;
 			buffer_info->next_to_watch = 0;
 
 			len -= size;
@@ -1353,6 +1359,22 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
 	tx_ring->buffer_info[first].next_to_watch = i;
 
 	return count;
+
+dma_error:
+	dev_err(&pdev->dev, "TX DMA map failed\n");
+	buffer_info->dma = 0;
+	count--;
+
+	while (count >= 0) {
+		count--;
+		i--;
+		if (i < 0)
+			i += tx_ring->count;
+		buffer_info = &tx_ring->buffer_info[i];
+		ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
+	}
+
+	return 0;
 }
 
 static void
@@ -1537,9 +1559,7 @@ ixgb_tx_timeout_task(struct work_struct *work)
 static struct net_device_stats *
 ixgb_get_stats(struct net_device *netdev)
 {
-	struct ixgb_adapter *adapter = netdev_priv(netdev);
-
-	return &adapter->net_stats;
+	return &netdev->stats;
 }
 
 /**
@@ -1676,16 +1696,16 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
 
 	/* Fill out the OS statistics structure */
 
-	adapter->net_stats.rx_packets = adapter->stats.gprcl;
-	adapter->net_stats.tx_packets = adapter->stats.gptcl;
-	adapter->net_stats.rx_bytes = adapter->stats.gorcl;
-	adapter->net_stats.tx_bytes = adapter->stats.gotcl;
-	adapter->net_stats.multicast = adapter->stats.mprcl;
-	adapter->net_stats.collisions = 0;
+	netdev->stats.rx_packets = adapter->stats.gprcl;
+	netdev->stats.tx_packets = adapter->stats.gptcl;
+	netdev->stats.rx_bytes = adapter->stats.gorcl;
+	netdev->stats.tx_bytes = adapter->stats.gotcl;
+	netdev->stats.multicast = adapter->stats.mprcl;
+	netdev->stats.collisions = 0;
 
 	/* ignore RLEC as it reports errors for padded (<64bytes) frames
 	 * with a length in the type/len field */
-	adapter->net_stats.rx_errors =
+	netdev->stats.rx_errors =
 	    /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
 	    adapter->stats.ruc +
 	    adapter->stats.roc /*+ adapter->stats.rlec */  +
@@ -1693,21 +1713,21 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
 	    adapter->stats.ecbc + adapter->stats.mpc;
 
 	/* see above
-	 * adapter->net_stats.rx_length_errors = adapter->stats.rlec;
+	 * netdev->stats.rx_length_errors = adapter->stats.rlec;
 	 */
 
-	adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
-	adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
-	adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
-	adapter->net_stats.rx_over_errors = adapter->stats.mpc;
-
-	adapter->net_stats.tx_errors = 0;
-	adapter->net_stats.rx_frame_errors = 0;
-	adapter->net_stats.tx_aborted_errors = 0;
-	adapter->net_stats.tx_carrier_errors = 0;
-	adapter->net_stats.tx_fifo_errors = 0;
-	adapter->net_stats.tx_heartbeat_errors = 0;
-	adapter->net_stats.tx_window_errors = 0;
+	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
+	netdev->stats.rx_fifo_errors = adapter->stats.mpc;
+	netdev->stats.rx_missed_errors = adapter->stats.mpc;
+	netdev->stats.rx_over_errors = adapter->stats.mpc;
+
+	netdev->stats.tx_errors = 0;
+	netdev->stats.rx_frame_errors = 0;
+	netdev->stats.tx_aborted_errors = 0;
+	netdev->stats.tx_carrier_errors = 0;
+	netdev->stats.tx_fifo_errors = 0;
+	netdev->stats.tx_heartbeat_errors = 0;
+	netdev->stats.tx_window_errors = 0;
 }
 
 #define IXGB_MAX_INTR 10
@@ -1974,9 +1994,8 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
 		 * of reassembly being done in the stack */
 		if (length < copybreak) {
 			struct sk_buff *new_skb =
-			    netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
+			    netdev_alloc_skb_ip_align(netdev, length);
 			if (new_skb) {
-				skb_reserve(new_skb, NET_IP_ALIGN);
 				skb_copy_to_linear_data_offset(new_skb,
 							       -NET_IP_ALIGN,
 							       (skb->data -
@@ -2059,20 +2078,13 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
 			goto map_skb;
 		}
 
-		skb = netdev_alloc_skb(netdev, adapter->rx_buffer_len
-			               + NET_IP_ALIGN);
+		skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len);
 		if (unlikely(!skb)) {
 			/* Better luck next round */
 			adapter->alloc_rx_buff_failed++;
 			break;
 		}
 
-		/* Make buffer alignment 2 beyond a 16 byte boundary
-		 * this will result in a 16 byte aligned IP header after
-		 * the 14 byte MAC header is removed
-		 */
-		skb_reserve(skb, NET_IP_ALIGN);
-
 		buffer_info->skb = skb;
 		buffer_info->length = adapter->rx_buffer_len;
 map_skb:
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 385be6016667..8da8eb535084 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -51,11 +51,11 @@
 		__func__ , ## args)))
 
 /* TX/RX descriptor defines */
-#define IXGBE_DEFAULT_TXD		   1024
+#define IXGBE_DEFAULT_TXD		    512
 #define IXGBE_MAX_TXD			   4096
 #define IXGBE_MIN_TXD			     64
 
-#define IXGBE_DEFAULT_RXD		   1024
+#define IXGBE_DEFAULT_RXD		    512
 #define IXGBE_MAX_RXD			   4096
 #define IXGBE_MIN_RXD			     64
 
@@ -106,6 +106,7 @@ struct ixgbe_tx_buffer {
 	unsigned long time_stamp;
 	u16 length;
 	u16 next_to_watch;
+	u16 mapped_as_page;
 };
 
 struct ixgbe_rx_buffer {
@@ -159,10 +160,13 @@ struct ixgbe_ring {
 	struct ixgbe_queue_stats stats;
 	unsigned long reinit_state;
 	u64 rsc_count;			/* stat for coalesced packets */
+	u64 rsc_flush;			/* stats for flushed packets */
+	u32 restart_queue;		/* track tx queue restarts */
+	u32 non_eop_descs;		/* track hardware descriptor chaining */
 
 	unsigned int size;		/* length in bytes */
 	dma_addr_t dma;			/* phys. address of descriptor ring */
-};
+} ____cacheline_internodealigned_in_smp;
 
 enum ixgbe_ring_f_enum {
 	RING_F_NONE = 0,
@@ -187,7 +191,7 @@ enum ixgbe_ring_f_enum {
 struct ixgbe_ring_feature {
 	int indices;
 	int mask;
-};
+} ____cacheline_internodealigned_in_smp;
 
 #define MAX_RX_QUEUES 128
 #define MAX_TX_QUEUES 128
@@ -273,29 +277,25 @@ struct ixgbe_adapter {
 	u16 eitr_high;
 
 	/* TX */
-	struct ixgbe_ring *tx_ring;	/* One per active queue */
+	struct ixgbe_ring *tx_ring ____cacheline_aligned_in_smp; /* One per active queue */
 	int num_tx_queues;
-	u64 restart_queue;
-	u64 hw_csum_tx_good;
-	u64 lsc_int;
-	u64 hw_tso_ctxt;
-	u64 hw_tso6_ctxt;
 	u32 tx_timeout_count;
 	bool detect_tx_hung;
 
+	u64 restart_queue;
+	u64 lsc_int;
+
 	/* RX */
-	struct ixgbe_ring *rx_ring;	/* One per active queue */
+	struct ixgbe_ring *rx_ring ____cacheline_aligned_in_smp; /* One per active queue */
 	int num_rx_queues;
 	u64 hw_csum_rx_error;
 	u64 hw_rx_no_dma_resources;
-	u64 hw_csum_rx_good;
 	u64 non_eop_descs;
 	int num_msix_vectors;
 	int max_msix_q_vectors;         /* true count of q_vectors for device */
 	struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
 	struct msix_entry *msix_entries;
 
-	u64 rx_hdr_split;
 	u32 alloc_rx_page_failed;
 	u32 alloc_rx_buff_failed;
 
@@ -340,7 +340,6 @@ struct ixgbe_adapter {
 	/* OS defined structs */
 	struct net_device *netdev;
 	struct pci_dev *pdev;
-	struct net_device_stats net_stats;
 
 	u32 test_icr;
 	struct ixgbe_ring test_tx_ring;
@@ -376,7 +375,8 @@ struct ixgbe_adapter {
 #ifdef IXGBE_FCOE
 	struct ixgbe_fcoe fcoe;
 #endif /* IXGBE_FCOE */
-	u64 rsc_count;
+	u64 rsc_total_count;
+	u64 rsc_total_flush;
 	u32 wol;
 	u16 eeprom_version;
 };
@@ -397,7 +397,7 @@ enum ixgbe_boards {
 extern struct ixgbe_info ixgbe_82598_info;
 extern struct ixgbe_info ixgbe_82599_info;
 #ifdef CONFIG_IXGBE_DCB
-extern struct dcbnl_rtnl_ops dcbnl_ops;
+extern const struct dcbnl_rtnl_ops dcbnl_ops;
 extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
                               struct ixgbe_dcb_config *dst_dcb_cfg,
                               int tc_max);
@@ -458,6 +458,7 @@ extern int ixgbe_fcoe_disable(struct net_device *netdev);
 extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
 extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
 #endif /* CONFIG_IXGBE_DCB */
+extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
 #endif /* IXGBE_FCOE */
 
 #endif /* _IXGBE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 34b04924c8a1..72106898a5cb 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -42,6 +42,10 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
                                           ixgbe_link_speed speed,
                                           bool autoneg,
                                           bool autoneg_wait_to_complete);
+static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+                                           ixgbe_link_speed speed,
+                                           bool autoneg,
+                                           bool autoneg_wait_to_complete);
 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
                                bool autoneg_wait_to_complete);
 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
@@ -64,7 +68,13 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
 		/* Set up dual speed SFP+ support */
 		mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
 	} else {
-		mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
+		if ((mac->ops.get_media_type(hw) ==
+		     ixgbe_media_type_backplane) &&
+		    (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
+		     hw->phy.smart_speed == ixgbe_smart_speed_on))
+			mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
+		else
+			mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
 	}
 }
 
@@ -337,6 +347,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
 		media_type = ixgbe_media_type_backplane;
 		break;
 	case IXGBE_DEV_ID_82599_SFP:
+	case IXGBE_DEV_ID_82599_SFP_EM:
 		media_type = ixgbe_media_type_fiber;
 		break;
 	case IXGBE_DEV_ID_82599_CX4:
@@ -479,7 +490,12 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
 			hw->mac.autotry_restart = false;
 		}
 
-		/* The controller may take up to 500ms at 10g to acquire link */
+		/*
+		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
+		 * Section 73.10.2, we may have to wait up to 500ms if KR is
+		 * attempted.  82599 uses the same timing for 10g SFI.
+		 */
+
 		for (i = 0; i < 5; i++) {
 			/* Wait for the link partner to also set speed */
 			msleep(100);
@@ -567,6 +583,111 @@ out:
 }
 
 /**
+ *  ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg: true if autonegotiation enabled
+ *  @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ *  Implements the Intel SmartSpeed algorithm.
+ **/
+static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+				     ixgbe_link_speed speed, bool autoneg,
+				     bool autoneg_wait_to_complete)
+{
+	s32 status = 0;
+	ixgbe_link_speed link_speed;
+	s32 i, j;
+	bool link_up = false;
+	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+	hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n");
+
+	 /* Set autoneg_advertised value based on input link speed */
+	hw->phy.autoneg_advertised = 0;
+
+	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+	if (speed & IXGBE_LINK_SPEED_100_FULL)
+		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
+
+	/*
+	 * Implement Intel SmartSpeed algorithm.  SmartSpeed will reduce the
+	 * autoneg advertisement if link is unable to be established at the
+	 * highest negotiated rate.  This can sometimes happen due to integrity
+	 * issues with the physical media connection.
+	 */
+
+	/* First, try to get link with full advertisement */
+	hw->phy.smart_speed_active = false;
+	for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
+		status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+						    autoneg_wait_to_complete);
+		if (status)
+			goto out;
+
+		/*
+		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
+		 * Section 73.10.2, we may have to wait up to 500ms if KR is
+		 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
+		 * Table 9 in the AN MAS.
+		 */
+		for (i = 0; i < 5; i++) {
+			mdelay(100);
+
+			/* If we have link, just jump out */
+			hw->mac.ops.check_link(hw, &link_speed,
+			                       &link_up, false);
+			if (link_up)
+				goto out;
+		}
+	}
+
+	/*
+	 * We didn't get link.  If we advertised KR plus one of KX4/KX
+	 * (or BX4/BX), then disable KR and try again.
+	 */
+	if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
+	    ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
+		goto out;
+
+	/* Turn SmartSpeed on to disable KR support */
+	hw->phy.smart_speed_active = true;
+	status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+					    autoneg_wait_to_complete);
+	if (status)
+		goto out;
+
+	/*
+	 * Wait for the controller to acquire link.  600ms will allow for
+	 * the AN link_fail_inhibit_timer as well for multiple cycles of
+	 * parallel detect, both 10g and 1g. This allows for the maximum
+	 * connect attempts as defined in the AN MAS table 73-7.
+	 */
+	for (i = 0; i < 6; i++) {
+		mdelay(100);
+
+		/* If we have link, just jump out */
+		hw->mac.ops.check_link(hw, &link_speed,
+		                       &link_up, false);
+		if (link_up)
+			goto out;
+	}
+
+	/* We didn't get link.  Turn SmartSpeed back off. */
+	hw->phy.smart_speed_active = false;
+	status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+					    autoneg_wait_to_complete);
+
+out:
+	return status;
+}
+
+/**
  *  ixgbe_check_mac_link_82599 - Determine link and speed status
  *  @hw: pointer to hardware structure
  *  @speed: pointer to link speed
@@ -669,7 +790,8 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
 		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
 			if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
 				autoc |= IXGBE_AUTOC_KX4_SUPP;
-			if (orig_autoc & IXGBE_AUTOC_KR_SUPP)
+			if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
+			    (hw->phy.smart_speed_active == false))
 				autoc |= IXGBE_AUTOC_KR_SUPP;
 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
 			autoc |= IXGBE_AUTOC_KX_SUPP;
@@ -878,6 +1000,10 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
 		hw->mac.num_rar_entries--;
 	}
 
+	/* Store the alternative WWNN/WWPN prefix */
+	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
+	                               &hw->mac.wwpn_prefix);
+
 reset_hw_out:
 	return status;
 }
@@ -2414,6 +2540,51 @@ fw_version_out:
 	return status;
 }
 
+/**
+ *  ixgbe_get_wwn_prefix_82599 - Get alternative WWNN/WWPN prefix from
+ *  the EEPROM
+ *  @hw: pointer to hardware structure
+ *  @wwnn_prefix: the alternative WWNN prefix
+ *  @wwpn_prefix: the alternative WWPN prefix
+ *
+ *  This function will read the EEPROM from the alternative SAN MAC address
+ *  block to check the support for the alternative WWNN/WWPN prefix support.
+ **/
+static s32 ixgbe_get_wwn_prefix_82599(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+                                      u16 *wwpn_prefix)
+{
+	u16 offset, caps;
+	u16 alt_san_mac_blk_offset;
+
+	/* clear output first */
+	*wwnn_prefix = 0xFFFF;
+	*wwpn_prefix = 0xFFFF;
+
+	/* check if alternative SAN MAC is supported */
+	hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
+	                    &alt_san_mac_blk_offset);
+
+	if ((alt_san_mac_blk_offset == 0) ||
+	    (alt_san_mac_blk_offset == 0xFFFF))
+		goto wwn_prefix_out;
+
+	/* check capability in alternative san mac address block */
+	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
+	hw->eeprom.ops.read(hw, offset, &caps);
+	if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
+		goto wwn_prefix_out;
+
+	/* get the corresponding prefix for WWNN/WWPN */
+	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
+	hw->eeprom.ops.read(hw, offset, wwnn_prefix);
+
+	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
+	hw->eeprom.ops.read(hw, offset, wwpn_prefix);
+
+wwn_prefix_out:
+	return 0;
+}
+
 static struct ixgbe_mac_operations mac_ops_82599 = {
 	.init_hw                = &ixgbe_init_hw_generic,
 	.reset_hw               = &ixgbe_reset_hw_82599,
@@ -2425,6 +2596,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
 	.get_mac_addr           = &ixgbe_get_mac_addr_generic,
 	.get_san_mac_addr       = &ixgbe_get_san_mac_addr_82599,
 	.get_device_caps        = &ixgbe_get_device_caps_82599,
+	.get_wwn_prefix         = &ixgbe_get_wwn_prefix_82599,
 	.stop_adapter           = &ixgbe_stop_adapter_generic,
 	.get_bus_info           = &ixgbe_get_bus_info_generic,
 	.set_lan_id             = &ixgbe_set_lan_id_multi_port_pcie,
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 40ff120a9ad4..688b8ca5da32 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1382,10 +1382,10 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
 	hw->addr_ctrl.overflow_promisc = 0;
 
 	/* Zero out the other receive addresses */
-	hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use);
-	for (i = 1; i <= uc_addr_in_use; i++) {
-		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
+	hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use + 1);
+	for (i = 0; i < uc_addr_in_use; i++) {
+		IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
 	}
 
 	/* Add the new addresses */
@@ -1755,17 +1755,24 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
 	/*
 	 * On backplane, bail out if
 	 * - backplane autoneg was not completed, or if
-	 * - link partner is not AN enabled
+	 * - we are 82599 and link partner is not AN enabled
 	 */
 	if (hw->phy.media_type == ixgbe_media_type_backplane) {
 		links = IXGBE_READ_REG(hw, IXGBE_LINKS);
-		links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
-		if (((links & IXGBE_LINKS_KX_AN_COMP) == 0) ||
-		    ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)) {
+		if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
 			hw->fc.fc_was_autonegged = false;
 			hw->fc.current_mode = hw->fc.requested_mode;
 			goto out;
 		}
+
+		if (hw->mac.type == ixgbe_mac_82599EB) {
+			links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
+			if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
+				hw->fc.fc_was_autonegged = false;
+				hw->fc.current_mode = hw->fc.requested_mode;
+				goto out;
+			}
+		}
 	}
 
 	/*
@@ -1784,6 +1791,20 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
 	}
 
 	/*
+	 * Bail out on
+	 * - copper or CX4 adapters
+	 * - fiber adapters running at 10gig
+	 */
+	if ((hw->phy.media_type == ixgbe_media_type_copper) ||
+	     (hw->phy.media_type == ixgbe_media_type_cx4) ||
+	     ((hw->phy.media_type == ixgbe_media_type_fiber) &&
+	     (speed == IXGBE_LINK_SPEED_10GB_FULL))) {
+		hw->fc.fc_was_autonegged = false;
+		hw->fc.current_mode = hw->fc.requested_mode;
+		goto out;
+	}
+
+	/*
 	 * Read the AN advertisement and LP ability registers and resolve
 	 * local flow control settings accordingly
 	 */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index a6bc1ef28f92..3c7a79a7d7c6 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -563,7 +563,7 @@ static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
 	return rval;
 }
 
-struct dcbnl_rtnl_ops dcbnl_ops = {
+const struct dcbnl_rtnl_ops dcbnl_ops = {
 	.getstate	= ixgbe_dcbnl_get_state,
 	.setstate	= ixgbe_dcbnl_set_state,
 	.getpermhwaddr	= ixgbe_dcbnl_get_perm_hw_addr,
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 856c18c207f3..06a9d18bbdbc 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -40,19 +40,27 @@
 
 #define IXGBE_ALL_RAR_ENTRIES 16
 
+enum {NETDEV_STATS, IXGBE_STATS};
+
 struct ixgbe_stats {
 	char stat_string[ETH_GSTRING_LEN];
+	int type;
 	int sizeof_stat;
 	int stat_offset;
 };
 
-#define IXGBE_STAT(m) sizeof(((struct ixgbe_adapter *)0)->m), \
-                             offsetof(struct ixgbe_adapter, m)
+#define IXGBE_STAT(m)		IXGBE_STATS, \
+				sizeof(((struct ixgbe_adapter *)0)->m), \
+				offsetof(struct ixgbe_adapter, m)
+#define IXGBE_NETDEV_STAT(m)	NETDEV_STATS, \
+				sizeof(((struct net_device *)0)->m), \
+				offsetof(struct net_device, m)
+
 static struct ixgbe_stats ixgbe_gstrings_stats[] = {
-	{"rx_packets", IXGBE_STAT(net_stats.rx_packets)},
-	{"tx_packets", IXGBE_STAT(net_stats.tx_packets)},
-	{"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)},
-	{"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)},
+	{"rx_packets", IXGBE_NETDEV_STAT(stats.rx_packets)},
+	{"tx_packets", IXGBE_NETDEV_STAT(stats.tx_packets)},
+	{"rx_bytes", IXGBE_NETDEV_STAT(stats.rx_bytes)},
+	{"tx_bytes", IXGBE_NETDEV_STAT(stats.tx_bytes)},
 	{"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
 	{"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
 	{"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
@@ -60,40 +68,36 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
 	{"lsc_int", IXGBE_STAT(lsc_int)},
 	{"tx_busy", IXGBE_STAT(tx_busy)},
 	{"non_eop_descs", IXGBE_STAT(non_eop_descs)},
-	{"rx_errors", IXGBE_STAT(net_stats.rx_errors)},
-	{"tx_errors", IXGBE_STAT(net_stats.tx_errors)},
-	{"rx_dropped", IXGBE_STAT(net_stats.rx_dropped)},
-	{"tx_dropped", IXGBE_STAT(net_stats.tx_dropped)},
-	{"multicast", IXGBE_STAT(net_stats.multicast)},
+	{"rx_errors", IXGBE_NETDEV_STAT(stats.rx_errors)},
+	{"tx_errors", IXGBE_NETDEV_STAT(stats.tx_errors)},
+	{"rx_dropped", IXGBE_NETDEV_STAT(stats.rx_dropped)},
+	{"tx_dropped", IXGBE_NETDEV_STAT(stats.tx_dropped)},
+	{"multicast", IXGBE_NETDEV_STAT(stats.multicast)},
 	{"broadcast", IXGBE_STAT(stats.bprc)},
 	{"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
-	{"collisions", IXGBE_STAT(net_stats.collisions)},
-	{"rx_over_errors", IXGBE_STAT(net_stats.rx_over_errors)},
-	{"rx_crc_errors", IXGBE_STAT(net_stats.rx_crc_errors)},
-	{"rx_frame_errors", IXGBE_STAT(net_stats.rx_frame_errors)},
-	{"hw_rsc_count", IXGBE_STAT(rsc_count)},
+	{"collisions", IXGBE_NETDEV_STAT(stats.collisions)},
+	{"rx_over_errors", IXGBE_NETDEV_STAT(stats.rx_over_errors)},
+	{"rx_crc_errors", IXGBE_NETDEV_STAT(stats.rx_crc_errors)},
+	{"rx_frame_errors", IXGBE_NETDEV_STAT(stats.rx_frame_errors)},
+	{"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
+	{"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
 	{"fdir_match", IXGBE_STAT(stats.fdirmatch)},
 	{"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
-	{"rx_fifo_errors", IXGBE_STAT(net_stats.rx_fifo_errors)},
-	{"rx_missed_errors", IXGBE_STAT(net_stats.rx_missed_errors)},
-	{"tx_aborted_errors", IXGBE_STAT(net_stats.tx_aborted_errors)},
-	{"tx_carrier_errors", IXGBE_STAT(net_stats.tx_carrier_errors)},
-	{"tx_fifo_errors", IXGBE_STAT(net_stats.tx_fifo_errors)},
-	{"tx_heartbeat_errors", IXGBE_STAT(net_stats.tx_heartbeat_errors)},
+	{"rx_fifo_errors", IXGBE_NETDEV_STAT(stats.rx_fifo_errors)},
+	{"rx_missed_errors", IXGBE_NETDEV_STAT(stats.rx_missed_errors)},
+	{"tx_aborted_errors", IXGBE_NETDEV_STAT(stats.tx_aborted_errors)},
+	{"tx_carrier_errors", IXGBE_NETDEV_STAT(stats.tx_carrier_errors)},
+	{"tx_fifo_errors", IXGBE_NETDEV_STAT(stats.tx_fifo_errors)},
+	{"tx_heartbeat_errors", IXGBE_NETDEV_STAT(stats.tx_heartbeat_errors)},
 	{"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
 	{"tx_restart_queue", IXGBE_STAT(restart_queue)},
 	{"rx_long_length_errors", IXGBE_STAT(stats.roc)},
 	{"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
-	{"tx_tcp4_seg_ctxt", IXGBE_STAT(hw_tso_ctxt)},
-	{"tx_tcp6_seg_ctxt", IXGBE_STAT(hw_tso6_ctxt)},
 	{"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
 	{"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
 	{"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
 	{"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
-	{"rx_csum_offload_good", IXGBE_STAT(hw_csum_rx_good)},
 	{"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
-	{"tx_csum_offload_ctxt", IXGBE_STAT(hw_csum_tx_good)},
-	{"rx_header_split", IXGBE_STAT(rx_hdr_split)},
 	{"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
 	{"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
 	{"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
@@ -196,6 +200,56 @@ static int ixgbe_get_settings(struct net_device *netdev,
 		ecmd->autoneg = AUTONEG_DISABLE;
 	}
 
+	/* Get PHY type */
+	switch (adapter->hw.phy.type) {
+	case ixgbe_phy_tn:
+	case ixgbe_phy_cu_unknown:
+		/* Copper 10G-BASET */
+		ecmd->port = PORT_TP;
+		break;
+	case ixgbe_phy_qt:
+		ecmd->port = PORT_FIBRE;
+		break;
+	case ixgbe_phy_nl:
+	case ixgbe_phy_tw_tyco:
+	case ixgbe_phy_tw_unknown:
+	case ixgbe_phy_sfp_ftl:
+	case ixgbe_phy_sfp_avago:
+	case ixgbe_phy_sfp_intel:
+	case ixgbe_phy_sfp_unknown:
+		switch (adapter->hw.phy.sfp_type) {
+		/* SFP+ devices, further checking needed */
+		case ixgbe_sfp_type_da_cu:
+		case ixgbe_sfp_type_da_cu_core0:
+		case ixgbe_sfp_type_da_cu_core1:
+			ecmd->port = PORT_DA;
+			break;
+		case ixgbe_sfp_type_sr:
+		case ixgbe_sfp_type_lr:
+		case ixgbe_sfp_type_srlr_core0:
+		case ixgbe_sfp_type_srlr_core1:
+			ecmd->port = PORT_FIBRE;
+			break;
+		case ixgbe_sfp_type_not_present:
+			ecmd->port = PORT_NONE;
+			break;
+		case ixgbe_sfp_type_unknown:
+		default:
+			ecmd->port = PORT_OTHER;
+			break;
+		}
+		break;
+	case ixgbe_phy_xaui:
+		ecmd->port = PORT_NONE;
+		break;
+	case ixgbe_phy_unknown:
+	case ixgbe_phy_generic:
+	case ixgbe_phy_sfp_unsupported:
+	default:
+		ecmd->port = PORT_OTHER;
+		break;
+	}
+
 	hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
 	if (link_up) {
 		ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
@@ -933,10 +987,21 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
 	int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64);
 	int j, k;
 	int i;
+	char *p = NULL;
 
 	ixgbe_update_stats(adapter);
 	for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
-		char *p = (char *)adapter + ixgbe_gstrings_stats[i].stat_offset;
+		switch (ixgbe_gstrings_stats[i].type) {
+		case NETDEV_STATS:
+			p = (char *) netdev +
+					ixgbe_gstrings_stats[i].stat_offset;
+			break;
+		case IXGBE_STATS:
+			p = (char *) adapter +
+					ixgbe_gstrings_stats[i].stat_offset;
+			break;
+		}
+
 		data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
 		           sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
 	}
@@ -1255,15 +1320,15 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
 		return 0;
 	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
 		shared_int = false;
-		if (request_irq(irq, &ixgbe_test_intr, 0, netdev->name,
+		if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
 				netdev)) {
 			*data = 1;
 			return -1;
 		}
-	} else if (!request_irq(irq, &ixgbe_test_intr, IRQF_PROBE_SHARED,
+	} else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
 	                        netdev->name, netdev)) {
 		shared_int = false;
-	} else if (request_irq(irq, &ixgbe_test_intr, IRQF_SHARED,
+	} else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
 	                       netdev->name, netdev)) {
 		*data = 1;
 		return -1;
@@ -1952,6 +2017,10 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
 		break;
 	}
 
+	/* if in mixed tx/rx queues per vector mode, report only rx settings */
+	if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count)
+		return 0;
+
 	/* only valid if in constant ITR mode */
 	switch (adapter->tx_itr_setting) {
 	case 0:
@@ -1977,12 +2046,9 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
 	struct ixgbe_q_vector *q_vector;
 	int i;
 
-	/*
-	 * don't accept tx specific changes if we've got mixed RxTx vectors
-	 * test and jump out here if needed before changing the rx numbers
-	 */
-	if ((1000000/ec->tx_coalesce_usecs) != adapter->tx_eitr_param &&
-	    adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count)
+	/* don't accept tx specific changes if we've got mixed RxTx vectors */
+	if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count
+	   && ec->tx_coalesce_usecs)
 		return -EINVAL;
 
 	if (ec->tx_max_coalesced_frames_irq)
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index a3c9f99515e2..da32a108a7b4 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -499,6 +499,10 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
 	struct ixgbe_hw *hw = &adapter->hw;
 	struct ixgbe_fcoe *fcoe = &adapter->fcoe;
 	struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+#ifdef CONFIG_IXGBE_DCB
+	u8 tc;
+	u32 up2tc;
+#endif
 
 	/* create the pool for ddp if not created yet */
 	if (!fcoe->pool) {
@@ -540,6 +544,17 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
 			IXGBE_FCRXCTRL_FCOELLI |
 			IXGBE_FCRXCTRL_FCCRCBO |
 			(FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
+#ifdef CONFIG_IXGBE_DCB
+	up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC);
+	for (i = 0; i < MAX_USER_PRIORITY; i++) {
+		tc = (u8)(up2tc >> (i * IXGBE_RTTUP2TC_UP_SHIFT));
+		tc &= (MAX_TRAFFIC_CLASS - 1);
+		if (fcoe->tc == tc) {
+			fcoe->up = i;
+			break;
+		}
+	}
+#endif
 }
 
 /**
@@ -671,19 +686,7 @@ out_disable:
  */
 u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter)
 {
-	int i;
-	u8 tc;
-	u32 up2tc;
-
-	up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC);
-	for (i = 0; i < MAX_USER_PRIORITY; i++) {
-		tc = (u8)(up2tc >> (i * IXGBE_RTTUP2TC_UP_SHIFT));
-		tc &= (MAX_TRAFFIC_CLASS - 1);
-		if (adapter->fcoe.tc == tc)
-			return 1 << i;
-	}
-
-	return 0;
+	return 1 << adapter->fcoe.up;
 }
 
 /**
@@ -710,6 +713,7 @@ u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up)
 				up2tc >>= (i * IXGBE_RTTUP2TC_UP_SHIFT);
 				up2tc &= (MAX_TRAFFIC_CLASS - 1);
 				adapter->fcoe.tc = (u8)up2tc;
+				adapter->fcoe.up = i;
 				return 0;
 			}
 		}
@@ -718,3 +722,49 @@ u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up)
 	return 1;
 }
 #endif /* CONFIG_IXGBE_DCB */
+
+/**
+ * ixgbe_fcoe_get_wwn - get world wide name for the node or the port
+ * @netdev : ixgbe adapter
+ * @wwn : the world wide name
+ * @type: the type of world wide name
+ *
+ * Returns the node or port world wide name if both the prefix and the san
+ * mac address are valid, then the wwn is formed based on the NAA-2 for
+ * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3).
+ *
+ * Returns : 0 on success
+ */
+int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
+{
+	int rc = -EINVAL;
+	u16 prefix = 0xffff;
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_mac_info *mac = &adapter->hw.mac;
+
+	switch (type) {
+	case NETDEV_FCOE_WWNN:
+		prefix = mac->wwnn_prefix;
+		break;
+	case NETDEV_FCOE_WWPN:
+		prefix = mac->wwpn_prefix;
+		break;
+	default:
+		break;
+	}
+
+	if ((prefix != 0xffff) &&
+	    is_valid_ether_addr(mac->san_addr)) {
+		*wwn = ((u64) prefix << 48) |
+		       ((u64) mac->san_addr[0] << 40) |
+		       ((u64) mac->san_addr[1] << 32) |
+		       ((u64) mac->san_addr[2] << 24) |
+		       ((u64) mac->san_addr[3] << 16) |
+		       ((u64) mac->san_addr[4] << 8)  |
+		       ((u64) mac->san_addr[5]);
+		rc = 0;
+	}
+	return rc;
+}
+
+
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index b5dee7b3ef1c..de8ff53187da 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -62,7 +62,10 @@ struct ixgbe_fcoe_ddp {
 };
 
 struct ixgbe_fcoe {
+#ifdef CONFIG_IXGBE_DCB
 	u8 tc;
+	u8 up;
+#endif
 	spinlock_t lock;
 	struct pci_pool *pool;
 	struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index a456578b8578..247ed2a24769 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -98,6 +98,8 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
 	 board_82599 },
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
 	 board_82599 },
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM),
+	 board_82599 },
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
 	 board_82599 },
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
@@ -216,10 +218,20 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
                                              struct ixgbe_tx_buffer
                                              *tx_buffer_info)
 {
-	tx_buffer_info->dma = 0;
+	if (tx_buffer_info->dma) {
+		if (tx_buffer_info->mapped_as_page)
+			pci_unmap_page(adapter->pdev,
+				       tx_buffer_info->dma,
+				       tx_buffer_info->length,
+				       PCI_DMA_TODEVICE);
+		else
+			pci_unmap_single(adapter->pdev,
+					 tx_buffer_info->dma,
+					 tx_buffer_info->length,
+					 PCI_DMA_TODEVICE);
+		tx_buffer_info->dma = 0;
+	}
 	if (tx_buffer_info->skb) {
-		skb_dma_unmap(&adapter->pdev->dev, tx_buffer_info->skb,
-		              DMA_TO_DEVICE);
 		dev_kfree_skb_any(tx_buffer_info->skb);
 		tx_buffer_info->skb = NULL;
 	}
@@ -401,7 +413,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
 		if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
 		    !test_bit(__IXGBE_DOWN, &adapter->state)) {
 			netif_wake_subqueue(netdev, tx_ring->queue_index);
-			++adapter->restart_queue;
+			++tx_ring->restart_queue;
 		}
 	}
 
@@ -423,8 +435,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
 	tx_ring->total_packets += total_packets;
 	tx_ring->stats.packets += total_packets;
 	tx_ring->stats.bytes += total_bytes;
-	adapter->net_stats.tx_bytes += total_bytes;
-	adapter->net_stats.tx_packets += total_packets;
+	netdev->stats.tx_bytes += total_bytes;
+	netdev->stats.tx_packets += total_packets;
 	return (count < tx_ring->work_limit);
 }
 
@@ -612,7 +624,6 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
 
 	/* It must be a TCP or UDP packet with a valid checksum */
 	skb->ip_summed = CHECKSUM_UNNECESSARY;
-	adapter->hw_csum_rx_good++;
 }
 
 static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
@@ -669,21 +680,18 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
 
 		if (!bi->skb) {
 			struct sk_buff *skb;
-			skb = netdev_alloc_skb(adapter->netdev,
-			                       (rx_ring->rx_buf_len +
-			                        NET_IP_ALIGN));
+			/* netdev_alloc_skb reserves 32 bytes up front!! */
+			uint bufsz = rx_ring->rx_buf_len + SMP_CACHE_BYTES;
+			skb = netdev_alloc_skb(adapter->netdev, bufsz);
 
 			if (!skb) {
 				adapter->alloc_rx_buff_failed++;
 				goto no_buffers;
 			}
 
-			/*
-			 * Make buffer alignment 2 beyond a 16 byte boundary
-			 * this will result in a 16 byte aligned IP header after
-			 * the 14 byte MAC header is removed
-			 */
-			skb_reserve(skb, NET_IP_ALIGN);
+			/* advance the data pointer to the next cache line */
+			skb_reserve(skb, (PTR_ALIGN(skb->data, SMP_CACHE_BYTES)
+			                  - skb->data));
 
 			bi->skb = skb;
 			bi->dma = pci_map_single(pdev, skb->data,
@@ -735,12 +743,14 @@ static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
 /**
  * ixgbe_transform_rsc_queue - change rsc queue into a full packet
  * @skb: pointer to the last skb in the rsc queue
+ * @count: pointer to number of packets coalesced in this context
  *
  * This function changes a queue full of hw rsc buffers into a completed
  * packet.  It uses the ->prev pointers to find the first packet and then
  * turns it into the frag list owner.
  **/
-static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
+static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
+                                                        u64 *count)
 {
 	unsigned int frag_list_size = 0;
 
@@ -749,6 +759,7 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
 		frag_list_size += skb->len;
 		skb->prev = NULL;
 		skb = prev;
+		*count += 1;
 	}
 
 	skb_shinfo(skb)->frag_list = skb->next;
@@ -764,6 +775,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                                int *work_done, int work_to_do)
 {
 	struct ixgbe_adapter *adapter = q_vector->adapter;
+	struct net_device *netdev = adapter->netdev;
 	struct pci_dev *pdev = adapter->pdev;
 	union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
 	struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
@@ -793,8 +805,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 			hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
 			len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
 			       IXGBE_RXDADV_HDRBUFLEN_SHIFT;
-			if (hdr_info & IXGBE_RXDADV_SPH)
-				adapter->rx_hdr_split++;
 			if (len > IXGBE_RX_HDR_SIZE)
 				len = IXGBE_RX_HDR_SIZE;
 			upper_len = le16_to_cpu(rx_desc->wb.upper.length);
@@ -804,7 +814,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 
 		cleaned = true;
 		skb = rx_buffer_info->skb;
-		prefetch(skb->data - NET_IP_ALIGN);
+		prefetch(skb->data);
 		rx_buffer_info->skb = NULL;
 
 		if (rx_buffer_info->dma) {
@@ -850,14 +860,20 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 			u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
 				     IXGBE_RXDADV_NEXTP_SHIFT;
 			next_buffer = &rx_ring->rx_buffer_info[nextp];
-			rx_ring->rsc_count += (rsc_count - 1);
 		} else {
 			next_buffer = &rx_ring->rx_buffer_info[i];
 		}
 
 		if (staterr & IXGBE_RXD_STAT_EOP) {
 			if (skb->prev)
-				skb = ixgbe_transform_rsc_queue(skb);
+				skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
+			if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
+				if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
+					rx_ring->rsc_count += skb_shinfo(skb)->nr_frags;
+				else
+					rx_ring->rsc_count++;
+				rx_ring->rsc_flush++;
+			}
 			rx_ring->stats.packets++;
 			rx_ring->stats.bytes += skb->len;
 		} else {
@@ -870,7 +886,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 				skb->next = next_buffer->skb;
 				skb->next->prev = skb;
 			}
-			adapter->non_eop_descs++;
+			rx_ring->non_eop_descs++;
 			goto next_desc;
 		}
 
@@ -935,8 +951,8 @@ next_desc:
 
 	rx_ring->total_packets += total_rx_packets;
 	rx_ring->total_bytes += total_rx_bytes;
-	adapter->net_stats.rx_bytes += total_rx_bytes;
-	adapter->net_stats.rx_packets += total_rx_packets;
+	netdev->stats.rx_bytes += total_rx_bytes;
+	netdev->stats.rx_packets += total_rx_packets;
 
 	return cleaned;
 }
@@ -1209,6 +1225,7 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
 	adapter->link_check_timeout = jiffies;
 	if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
+		IXGBE_WRITE_FLUSH(hw);
 		schedule_work(&adapter->watchdog_task);
 	}
 }
@@ -1312,8 +1329,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
 		                      r_idx + 1);
 	}
 
-	/* disable interrupts on this vector only */
-	ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
+	/* EIAM disabled interrupts (on this vector) for us */
 	napi_schedule(&q_vector->napi);
 
 	return IRQ_HANDLED;
@@ -1344,10 +1360,8 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
 	if (!q_vector->rxr_count)
 		return IRQ_HANDLED;
 
-	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
-	rx_ring = &(adapter->rx_ring[r_idx]);
 	/* disable interrupts on this vector only */
-	ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
+	/* EIAM disabled interrupts (on this vector) for us */
 	napi_schedule(&q_vector->napi);
 
 	return IRQ_HANDLED;
@@ -1382,8 +1396,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
 		                      r_idx + 1);
 	}
 
-	/* disable interrupts on this vector only */
-	ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
+	/* EIAM disabled interrupts (on this vector) for us */
 	napi_schedule(&q_vector->napi);
 
 	return IRQ_HANDLED;
@@ -1667,7 +1680,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
 
 	sprintf(adapter->name[vector], "%s:lsc", netdev->name);
 	err = request_irq(adapter->msix_entries[vector].vector,
-	                  &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
+	                  ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
 	if (err) {
 		DPRINTK(PROBE, ERR,
 			"request_irq for msix_lsc failed: %d\n", err);
@@ -1838,10 +1851,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
 		err = ixgbe_request_msix_irqs(adapter);
 	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
-		err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
+		err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
 		                  netdev->name, netdev);
 	} else {
-		err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
+		err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
 		                  netdev->name, netdev);
 	}
 
@@ -2063,18 +2076,18 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
  * ixgbe_configure_rscctl - enable RSC for the indicated ring
  * @adapter:    address of board private structure
  * @index:      index of ring to set
- * @rx_buf_len: rx buffer length
  **/
-static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index,
-                                   int rx_buf_len)
+static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index)
 {
 	struct ixgbe_ring *rx_ring;
 	struct ixgbe_hw *hw = &adapter->hw;
 	int j;
 	u32 rscctrl;
+	int rx_buf_len;
 
 	rx_ring = &adapter->rx_ring[index];
 	j = rx_ring->reg_idx;
+	rx_buf_len = rx_ring->rx_buf_len;
 	rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
 	rscctrl |= IXGBE_RSCCTL_RSCEN;
 	/*
@@ -2282,7 +2295,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
 	if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
 		/* Enable 82599 HW-RSC */
 		for (i = 0; i < adapter->num_rx_queues; i++)
-			ixgbe_configure_rscctl(adapter, i, rx_buf_len);
+			ixgbe_configure_rscctl(adapter, i);
 
 		/* Disable RSC for ACK packets */
 		IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
@@ -2333,23 +2346,25 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
 	 * not in DCB mode.
 	 */
 	ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
+
+	/* Disable CFI check */
+	ctrl &= ~IXGBE_VLNCTRL_CFIEN;
+
+	/* enable VLAN tag stripping */
 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-		ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
-		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
-		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
+		ctrl |= IXGBE_VLNCTRL_VME;
 	} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-		ctrl |= IXGBE_VLNCTRL_VFE;
-		/* enable VLAN tag insert/strip */
-		ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
-		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
-		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
 		for (i = 0; i < adapter->num_rx_queues; i++) {
+			u32 ctrl;
 			j = adapter->rx_ring[i].reg_idx;
 			ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
 			ctrl |= IXGBE_RXDCTL_VME;
 			IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
 		}
 	}
+
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
+
 	ixgbe_vlan_rx_add_vid(netdev, 0);
 
 	if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -2699,7 +2714,22 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
 		IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
 	}
 
-	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+		/*
+		 * use EIAM to auto-mask when MSI-X interrupt is asserted
+		 * this saves a register write for every interrupt
+		 */
+		switch (hw->mac.type) {
+		case ixgbe_mac_82598EB:
+			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
+			break;
+		default:
+		case ixgbe_mac_82599EB:
+			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
+			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
+			break;
+		}
+	} else {
 		/* legacy interrupts, use EIAM to auto-mask when reading EICR,
 		 * specifically only auto mask tx and rx interrupts */
 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
@@ -3632,10 +3662,10 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
 	 * It's easy to be greedy for MSI-X vectors, but it really
 	 * doesn't do us much good if we have a lot more vectors
 	 * than CPU's.  So let's be conservative and only ask for
-	 * (roughly) twice the number of vectors as there are CPU's.
+	 * (roughly) the same number of vectors as there are CPU's.
 	 */
 	v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
-	               (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
+	               (int)num_online_cpus()) + NON_Q_VECTORS;
 
 	/*
 	 * At the same time, hardware can only support a maximum of
@@ -3943,8 +3973,10 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
 		adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
 		adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
 		adapter->ring_feature[RING_F_FCOE].indices = 0;
+#ifdef CONFIG_IXGBE_DCB
 		/* Default traffic class to use for FCoE */
 		adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
+#endif
 #endif /* IXGBE_FCOE */
 	}
 
@@ -4475,20 +4507,32 @@ static void ixgbe_shutdown(struct pci_dev *pdev)
  **/
 void ixgbe_update_stats(struct ixgbe_adapter *adapter)
 {
+	struct net_device *netdev = adapter->netdev;
 	struct ixgbe_hw *hw = &adapter->hw;
 	u64 total_mpc = 0;
 	u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
 
-	if (hw->mac.type == ixgbe_mac_82599EB) {
+	if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
 		u64 rsc_count = 0;
+		u64 rsc_flush = 0;
 		for (i = 0; i < 16; i++)
 			adapter->hw_rx_no_dma_resources +=
 			                     IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
-		for (i = 0; i < adapter->num_rx_queues; i++)
+		for (i = 0; i < adapter->num_rx_queues; i++) {
 			rsc_count += adapter->rx_ring[i].rsc_count;
-		adapter->rsc_count = rsc_count;
+			rsc_flush += adapter->rx_ring[i].rsc_flush;
+		}
+		adapter->rsc_total_count = rsc_count;
+		adapter->rsc_total_flush = rsc_flush;
 	}
 
+	/* gather some stats to the adapter struct that are per queue */
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		adapter->restart_queue += adapter->tx_ring[i].restart_queue;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		adapter->non_eop_descs += adapter->tx_ring[i].non_eop_descs;
+
 	adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
 	for (i = 0; i < 8; i++) {
 		/* for packet buffers not used, the register should read 0 */
@@ -4594,15 +4638,15 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
 	adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
 
 	/* Fill out the OS statistics structure */
-	adapter->net_stats.multicast = adapter->stats.mprc;
+	netdev->stats.multicast = adapter->stats.mprc;
 
 	/* Rx Errors */
-	adapter->net_stats.rx_errors = adapter->stats.crcerrs +
+	netdev->stats.rx_errors = adapter->stats.crcerrs +
 	                               adapter->stats.rlec;
-	adapter->net_stats.rx_dropped = 0;
-	adapter->net_stats.rx_length_errors = adapter->stats.rlec;
-	adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
-	adapter->net_stats.rx_missed_errors = total_mpc;
+	netdev->stats.rx_dropped = 0;
+	netdev->stats.rx_length_errors = adapter->stats.rlec;
+	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
+	netdev->stats.rx_missed_errors = total_mpc;
 }
 
 /**
@@ -4871,14 +4915,12 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
 			                                         iph->daddr, 0,
 			                                         IPPROTO_TCP,
 			                                         0);
-			adapter->hw_tso_ctxt++;
 		} else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
 			ipv6_hdr(skb)->payload_len = 0;
 			tcp_hdr(skb)->check =
 			    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
 			                     &ipv6_hdr(skb)->daddr,
 			                     0, IPPROTO_TCP, 0);
-			adapter->hw_tso6_ctxt++;
 		}
 
 		i = tx_ring->next_to_use;
@@ -4997,7 +5039,6 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
 		tx_buffer_info->time_stamp = jiffies;
 		tx_buffer_info->next_to_watch = i;
 
-		adapter->hw_csum_tx_good++;
 		i++;
 		if (i == tx_ring->count)
 			i = 0;
@@ -5014,23 +5055,16 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                         struct sk_buff *skb, u32 tx_flags,
                         unsigned int first)
 {
+	struct pci_dev *pdev = adapter->pdev;
 	struct ixgbe_tx_buffer *tx_buffer_info;
 	unsigned int len;
 	unsigned int total = skb->len;
 	unsigned int offset = 0, size, count = 0, i;
 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
 	unsigned int f;
-	dma_addr_t *map;
 
 	i = tx_ring->next_to_use;
 
-	if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
-		dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
-		return 0;
-	}
-
-	map = skb_shinfo(skb)->dma_maps;
-
 	if (tx_flags & IXGBE_TX_FLAGS_FCOE)
 		/* excluding fcoe_crc_eof for FCoE */
 		total -= sizeof(struct fcoe_crc_eof);
@@ -5041,7 +5075,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
 		size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
 
 		tx_buffer_info->length = size;
-		tx_buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
+		tx_buffer_info->mapped_as_page = false;
+		tx_buffer_info->dma = pci_map_single(pdev,
+						     skb->data + offset,
+						     size, PCI_DMA_TODEVICE);
+		if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+			goto dma_error;
 		tx_buffer_info->time_stamp = jiffies;
 		tx_buffer_info->next_to_watch = i;
 
@@ -5062,7 +5101,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
 
 		frag = &skb_shinfo(skb)->frags[f];
 		len = min((unsigned int)frag->size, total);
-		offset = 0;
+		offset = frag->page_offset;
 
 		while (len) {
 			i++;
@@ -5073,7 +5112,13 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
 			size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
 
 			tx_buffer_info->length = size;
-			tx_buffer_info->dma = map[f] + offset;
+			tx_buffer_info->dma = pci_map_page(adapter->pdev,
+							   frag->page,
+							   offset, size,
+							   PCI_DMA_TODEVICE);
+			tx_buffer_info->mapped_as_page = true;
+			if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+				goto dma_error;
 			tx_buffer_info->time_stamp = jiffies;
 			tx_buffer_info->next_to_watch = i;
 
@@ -5090,6 +5135,27 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
 	tx_ring->tx_buffer_info[first].next_to_watch = i;
 
 	return count;
+
+dma_error:
+	dev_err(&pdev->dev, "TX DMA map failed\n");
+
+	/* clear timestamp and dma mappings for failed tx_buffer_info map */
+	tx_buffer_info->dma = 0;
+	tx_buffer_info->time_stamp = 0;
+	tx_buffer_info->next_to_watch = 0;
+	count--;
+
+	/* clear timestamp and dma mappings for remaining portion of packet */
+	while (count >= 0) {
+		count--;
+		i--;
+		if (i < 0)
+			i += tx_ring->count;
+		tx_buffer_info = &tx_ring->tx_buffer_info[i];
+		ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+	}
+
+	return count;
 }
 
 static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
@@ -5209,8 +5275,6 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
 static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
                                  struct ixgbe_ring *tx_ring, int size)
 {
-	struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
 	netif_stop_subqueue(netdev, tx_ring->queue_index);
 	/* Herbert's original patch had:
 	 *  smp_mb__after_netif_stop_queue();
@@ -5224,7 +5288,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
 
 	/* A reprieve! - use start_queue because it doesn't call schedule */
 	netif_start_subqueue(netdev, tx_ring->queue_index);
-	++adapter->restart_queue;
+	++tx_ring->restart_queue;
 	return 0;
 }
 
@@ -5239,10 +5303,19 @@ static int ixgbe_maybe_stop_tx(struct net_device *netdev,
 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(dev);
+	int txq = smp_processor_id();
 
 	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
-		return smp_processor_id();
+		return txq;
 
+#ifdef IXGBE_FCOE
+	if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+	    (skb->protocol == htons(ETH_P_FCOE))) {
+		txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
+		txq += adapter->ring_feature[RING_F_FCOE].mask;
+		return txq;
+	}
+#endif
 	if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
 		return (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) >> 13;
 
@@ -5257,7 +5330,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
 	unsigned int first;
 	unsigned int tx_flags = 0;
 	u8 hdr_len = 0;
-	int r_idx = 0, tso;
+	int tso;
 	int count = 0;
 	unsigned int f;
 
@@ -5265,13 +5338,13 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
 		tx_flags |= vlan_tx_tag_get(skb);
 		if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
 			tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
-			tx_flags |= (skb->queue_mapping << 13);
+			tx_flags |= ((skb->queue_mapping & 0x7) << 13);
 		}
 		tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
 		tx_flags |= IXGBE_TX_FLAGS_VLAN;
 	} else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
 		if (skb->priority != TC_PRIO_CONTROL) {
-			tx_flags |= (skb->queue_mapping << 13);
+			tx_flags |= ((skb->queue_mapping & 0x7) << 13);
 			tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
 			tx_flags |= IXGBE_TX_FLAGS_VLAN;
 		} else {
@@ -5280,17 +5353,18 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
 		}
 	}
 
-	r_idx = skb->queue_mapping;
-	tx_ring = &adapter->tx_ring[r_idx];
+	tx_ring = &adapter->tx_ring[skb->queue_mapping];
 
 	if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
 	    (skb->protocol == htons(ETH_P_FCOE))) {
 		tx_flags |= IXGBE_TX_FLAGS_FCOE;
 #ifdef IXGBE_FCOE
-		r_idx = smp_processor_id();
-		r_idx &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
-		r_idx += adapter->ring_feature[RING_F_FCOE].mask;
-		tx_ring = &adapter->tx_ring[r_idx];
+#ifdef CONFIG_IXGBE_DCB
+		tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
+			      << IXGBE_TX_FLAGS_VLAN_SHIFT);
+		tx_flags |= ((adapter->fcoe.up << 13)
+			      << IXGBE_TX_FLAGS_VLAN_SHIFT);
+#endif
 #endif
 	}
 	/* four things can cause us to need a context descriptor */
@@ -5372,10 +5446,8 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
  **/
 static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
 {
-	struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
 	/* only return the current stats */
-	return &adapter->net_stats;
+	return &netdev->stats;
 }
 
 /**
@@ -5527,6 +5599,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
 	.ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
 	.ndo_fcoe_enable = ixgbe_fcoe_enable,
 	.ndo_fcoe_disable = ixgbe_fcoe_disable,
+	.ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
 #endif /* IXGBE_FCOE */
 };
 
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index ef4bdd58e016..21b6633da578 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -52,6 +52,7 @@
 #define IXGBE_DEV_ID_82599_KX4_MEZZ      0x1514
 #define IXGBE_DEV_ID_82599_CX4           0x10F9
 #define IXGBE_DEV_ID_82599_SFP           0x10FB
+#define IXGBE_DEV_ID_82599_SFP_EM        0x1507
 #define IXGBE_DEV_ID_82599_XAUI_LOM      0x10FC
 #define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
 
@@ -1538,6 +1539,16 @@
 #define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR   0x4
 #define IXGBE_FW_PATCH_VERSION_4   0x7
 
+/* Alternative SAN MAC Address Block */
+#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR      0x27 /* Alt. SAN MAC block */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET  0x0 /* Alt. SAN MAC capability */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt. SAN MAC 1 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET  0x7 /* Alt. WWNN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET  0x8 /* Alt. WWPN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC  0x0 /* Alt. SAN MAC exists */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN  0x1 /* Alt. WWN base exists */
+
 /* PCI Bus Info */
 #define IXGBE_PCI_LINK_STATUS     0xB2
 #define IXGBE_PCI_DEVICE_CONTROL2 0xC8
@@ -2171,6 +2182,14 @@ enum ixgbe_fc_mode {
 	ixgbe_fc_default
 };
 
+/* Smart Speed Settings */
+#define IXGBE_SMARTSPEED_MAX_RETRIES	3
+enum ixgbe_smart_speed {
+	ixgbe_smart_speed_auto = 0,
+	ixgbe_smart_speed_on,
+	ixgbe_smart_speed_off
+};
+
 /* PCI bus types */
 enum ixgbe_bus_type {
 	ixgbe_bus_type_unknown = 0,
@@ -2336,6 +2355,7 @@ struct ixgbe_mac_operations {
 	s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
 	s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
 	s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
+	s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
 	s32 (*stop_adapter)(struct ixgbe_hw *);
 	s32 (*get_bus_info)(struct ixgbe_hw *);
 	void (*set_lan_id)(struct ixgbe_hw *);
@@ -2407,6 +2427,10 @@ struct ixgbe_mac_info {
 	u8                              addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
 	u8                              perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
 	u8                              san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+	/* prefix for World Wide Node Name (WWNN) */
+	u16                             wwnn_prefix;
+	/* prefix for World Wide Port Name (WWPN) */
+	u16                             wwpn_prefix;
 	s32                             mc_filter_type;
 	u32                             mcft_size;
 	u32                             vft_size;
@@ -2431,6 +2455,8 @@ struct ixgbe_phy_info {
 	enum ixgbe_media_type           media_type;
 	bool                            reset_disable;
 	ixgbe_autoneg_advertised        autoneg_advertised;
+	enum ixgbe_smart_speed          smart_speed;
+	bool                            smart_speed_active;
 	bool                            multispeed_fiber;
 };
 
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index 9aee0cc922c9..e9d9d595e1b7 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -109,9 +109,8 @@ static int ixpdev_rx(struct net_device *dev, int processed, int budget)
 		if (unlikely(!netif_running(nds[desc->channel])))
 			goto err;
 
-		skb = netdev_alloc_skb(dev, desc->pkt_length + 2);
+		skb = netdev_alloc_skb_ip_align(dev, desc->pkt_length);
 		if (likely(skb != NULL)) {
-			skb_reserve(skb, 2);
 			skb_copy_to_linear_data(skb, buf, desc->pkt_length);
 			skb_put(skb, desc->pkt_length);
 			skb->protocol = eth_type_trans(skb, nds[desc->channel]);
diff --git a/drivers/net/jazzsonic.c b/drivers/net/jazzsonic.c
index 6e5b3f30527f..f47d4d663b19 100644
--- a/drivers/net/jazzsonic.c
+++ b/drivers/net/jazzsonic.c
@@ -81,7 +81,7 @@ static unsigned short known_revisions[] =
 
 static int jazzsonic_open(struct net_device* dev)
 {
-	if (request_irq(dev->irq, &sonic_interrupt, IRQF_DISABLED, "sonic", dev)) {
+	if (request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED, "sonic", dev)) {
 		printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq);
 		return -EAGAIN;
 	}
@@ -130,8 +130,8 @@ static int __devinit sonic_probe1(struct net_device *dev)
 		printk("SONIC Silicon Revision = 0x%04x\n",silicon_revision);
 
 	i = 0;
-	while (known_revisions[i] != 0xffff
-	       && known_revisions[i] != silicon_revision)
+	while (known_revisions[i] != 0xffff &&
+	       known_revisions[i] != silicon_revision)
 		i++;
 
 	if (known_revisions[i] == 0xffff) {
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 1d2a32544ed2..792b88fc3574 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -1050,8 +1050,8 @@ jme_dynamic_pcc(struct jme_adapter *jme)
 
 	if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
 		jme_attempt_pcc(dpi, PCC_P3);
-	else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD
-	|| dpi->intr_cnt > PCC_INTR_THRESHOLD)
+	else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD ||
+		 dpi->intr_cnt > PCC_INTR_THRESHOLD)
 		jme_attempt_pcc(dpi, PCC_P2);
 	else
 		jme_attempt_pcc(dpi, PCC_P1);
@@ -2199,8 +2199,8 @@ jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
 	if (netif_running(netdev))
 		return -EBUSY;
 
-	if (ecmd->use_adaptive_rx_coalesce
-	&& test_bit(JME_FLAG_POLL, &jme->flags)) {
+	if (ecmd->use_adaptive_rx_coalesce &&
+	    test_bit(JME_FLAG_POLL, &jme->flags)) {
 		clear_bit(JME_FLAG_POLL, &jme->flags);
 		jme->jme_rx = netif_rx;
 		jme->jme_vlan_rx = vlan_hwaccel_rx;
@@ -2209,8 +2209,8 @@ jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
 		dpi->cnt		= 0;
 		jme_set_rx_pcc(jme, PCC_P1);
 		jme_interrupt_mode(jme);
-	} else if (!(ecmd->use_adaptive_rx_coalesce)
-	&& !(test_bit(JME_FLAG_POLL, &jme->flags))) {
+	} else if (!(ecmd->use_adaptive_rx_coalesce) &&
+		   !(test_bit(JME_FLAG_POLL, &jme->flags))) {
 		set_bit(JME_FLAG_POLL, &jme->flags);
 		jme->jme_rx = netif_receive_skb;
 		jme->jme_vlan_rx = vlan_hwaccel_receive_skb;
@@ -2764,19 +2764,19 @@ jme_init_one(struct pci_dev *pdev,
 	atomic_set(&jme->rx_empty, 1);
 
 	tasklet_init(&jme->pcc_task,
-		     &jme_pcc_tasklet,
+		     jme_pcc_tasklet,
 		     (unsigned long) jme);
 	tasklet_init(&jme->linkch_task,
-		     &jme_link_change_tasklet,
+		     jme_link_change_tasklet,
 		     (unsigned long) jme);
 	tasklet_init(&jme->txclean_task,
-		     &jme_tx_clean_tasklet,
+		     jme_tx_clean_tasklet,
 		     (unsigned long) jme);
 	tasklet_init(&jme->rxclean_task,
-		     &jme_rx_clean_tasklet,
+		     jme_rx_clean_tasklet,
 		     (unsigned long) jme);
 	tasklet_init(&jme->rxempty_task,
-		     &jme_rx_empty_tasklet,
+		     jme_rx_empty_tasklet,
 		     (unsigned long) jme);
 	tasklet_disable_nosync(&jme->linkch_task);
 	tasklet_disable_nosync(&jme->txclean_task);
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index 03199fa10003..25e2af6997e4 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -400,7 +400,7 @@ static int korina_rx(struct net_device *dev, int limit)
 			dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
 
 			/* Malloc up new buffer. */
-			skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2);
+			skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
 
 			if (!skb_new)
 				break;
@@ -417,9 +417,6 @@ static int korina_rx(struct net_device *dev, int limit)
 			if (devcs & ETH_RX_MP)
 				dev->stats.multicast++;
 
-			/* 16 bit align */
-			skb_reserve(skb_new, 2);
-
 			lp->rx_skb[lp->rx_next_done] = skb_new;
 		}
 
@@ -1017,14 +1014,14 @@ static int korina_open(struct net_device *dev)
 	/* Install the interrupt handler
 	 * that handles the Done Finished
 	 * Ovr and Und Events */
-	ret = request_irq(lp->rx_irq, &korina_rx_dma_interrupt,
+	ret = request_irq(lp->rx_irq, korina_rx_dma_interrupt,
 			IRQF_DISABLED, "Korina ethernet Rx", dev);
 	if (ret < 0) {
 		printk(KERN_ERR "%s: unable to get Rx DMA IRQ %d\n",
 		    dev->name, lp->rx_irq);
 		goto err_release;
 	}
-	ret = request_irq(lp->tx_irq, &korina_tx_dma_interrupt,
+	ret = request_irq(lp->tx_irq, korina_tx_dma_interrupt,
 			IRQF_DISABLED, "Korina ethernet Tx", dev);
 	if (ret < 0) {
 		printk(KERN_ERR "%s: unable to get Tx DMA IRQ %d\n",
@@ -1033,7 +1030,7 @@ static int korina_open(struct net_device *dev)
 	}
 
 	/* Install handler for overrun error. */
-	ret = request_irq(lp->ovr_irq, &korina_ovr_interrupt,
+	ret = request_irq(lp->ovr_irq, korina_ovr_interrupt,
 			IRQF_DISABLED, "Ethernet Overflow", dev);
 	if (ret < 0) {
 		printk(KERN_ERR "%s: unable to get OVR IRQ %d\n",
@@ -1042,7 +1039,7 @@ static int korina_open(struct net_device *dev)
 	}
 
 	/* Install handler for underflow error. */
-	ret = request_irq(lp->und_irq, &korina_und_interrupt,
+	ret = request_irq(lp->und_irq, korina_und_interrupt,
 			IRQF_DISABLED, "Ethernet Underflow", dev);
 	if (ret < 0) {
 		printk(KERN_ERR "%s: unable to get UND IRQ %d\n",
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index 99e954167fa6..5c45cb58d023 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -357,7 +357,7 @@ static void ks8842_rx_frame(struct net_device *netdev,
 
 	/* check the status */
 	if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
-		struct sk_buff *skb = netdev_alloc_skb(netdev, len + 2);
+		struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len);
 
 		dev_dbg(&adapter->pdev->dev, "%s, got package, len: %d\n",
 			__func__, len);
@@ -369,9 +369,6 @@ static void ks8842_rx_frame(struct net_device *netdev,
 			if (status & RXSR_MULTICAST)
 				netdev->stats.multicast++;
 
-			/* Align socket buffer in 4-byte boundary for
-				 better performance. */
-			skb_reserve(skb, 2);
 			data = (u32 *)skb_put(skb, len);
 
 			ks8842_select_bank(adapter, 17);
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index a23f739d222f..6d3ac65bc35c 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -1,4 +1,4 @@
-/* drivers/net/ks8651.c
+/* drivers/net/ks8851.c
  *
  * Copyright 2009 Simtec Electronics
  *	http://www.simtec.co.uk/
@@ -714,7 +714,7 @@ static void ks8851_tx_work(struct work_struct *work)
 {
 	struct ks8851_net *ks = container_of(work, struct ks8851_net, tx_work);
 	struct sk_buff *txb;
-	bool last = false;
+	bool last = skb_queue_empty(&ks->txq);
 
 	mutex_lock(&ks->lock);
 
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index dcda30338b65..8d7d3d4625f6 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -493,14 +493,14 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int
 		static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
 		int hp_port = (readl(bios + 1) & 1)  ? 0x499 : 0x99;
 		/* We can have boards other than the built-in!  Verify this is on-board. */
-		if ((inb(hp_port) & 0xc0) == 0x80
-			&& ioaddr_table[inb(hp_port) & 3] == ioaddr)
+		if ((inb(hp_port) & 0xc0) == 0x80 &&
+		    ioaddr_table[inb(hp_port) & 3] == ioaddr)
 			hp_builtin = hp_port;
 	}
 	iounmap(bios);
 	/* We also recognize the HP Vectra on-board here, but check below. */
-	hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00
-				&& inb(ioaddr+2) == 0x09);
+	hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00 &&
+		    inb(ioaddr+2) == 0x09);
 
 	/* Reset the LANCE.	 */
 	reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
@@ -755,7 +755,7 @@ lance_open(struct net_device *dev)
 	int i;
 
 	if (dev->irq == 0 ||
-		request_irq(dev->irq, &lance_interrupt, 0, lp->name, dev)) {
+		request_irq(dev->irq, lance_interrupt, 0, lp->name, dev)) {
 		return -EAGAIN;
 	}
 
@@ -1035,8 +1035,8 @@ static irqreturn_t lance_interrupt(int irq, void *dev_id)
 	spin_lock (&lp->devlock);
 
 	outw(0x00, dev->base_addr + LANCE_ADDR);
-	while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600
-		   && --boguscnt >= 0) {
+	while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600 &&
+	       --boguscnt >= 0) {
 		/* Acknowledge all of the current interrupt sources ASAP. */
 		outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
 
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index 51e11c3e53e1..7a07430206e3 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -470,11 +470,11 @@ static inline int init_rx_bufs(struct net_device *dev)
 
 	for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
 		dma_addr_t dma_addr;
-		struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4);
+		struct sk_buff *skb;
 
+		skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
 		if (skb == NULL)
 			return -1;
-		skb_reserve(skb, 2);
 		dma_addr = dma_map_single(dev->dev.parent, skb->data,
 					  PKT_BUF_SZ, DMA_FROM_DEVICE);
 		rbd->v_next = rbd+1;
@@ -588,7 +588,7 @@ static int init_i596_mem(struct net_device *dev)
 			     "%s: i82596 initialization successful\n",
 			     dev->name));
 
-	if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) {
+	if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) {
 		printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
 		goto failed;
 	}
@@ -697,12 +697,12 @@ static inline int i596_rx(struct net_device *dev)
 						 (dma_addr_t)SWAP32(rbd->b_data),
 						 PKT_BUF_SZ, DMA_FROM_DEVICE);
 				/* Get fresh skbuff to replace filled one. */
-				newskb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4);
+				newskb = netdev_alloc_skb_ip_align(dev,
+								   PKT_BUF_SZ);
 				if (newskb == NULL) {
 					skb = NULL;	/* drop pkt */
 					goto memory_squeeze;
 				}
-				skb_reserve(newskb, 2);
 
 				/* Pass up the skb already on the Rx ring. */
 				skb_put(skb, pkt_len);
@@ -716,7 +716,7 @@ static inline int i596_rx(struct net_device *dev)
 				rbd->b_data = SWAP32(dma_addr);
 				DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
 			} else
-				skb = netdev_alloc_skb(dev, pkt_len + 2);
+				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
 memory_squeeze:
 			if (skb == NULL) {
 				/* XXX tulip.c can defer packets here!! */
@@ -730,7 +730,6 @@ memory_squeeze:
 					dma_sync_single_for_cpu(dev->dev.parent,
 								(dma_addr_t)SWAP32(rbd->b_data),
 								PKT_BUF_SZ, DMA_FROM_DEVICE);
-					skb_reserve(skb, 2);
 					memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len);
 					dma_sync_single_for_device(dev->dev.parent,
 								   (dma_addr_t)SWAP32(rbd->b_data),
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index 256119882b1e..57f25848fe80 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -464,8 +464,8 @@ static irqreturn_t __ei_interrupt(int irq, void *dev_id)
 			   ei_inb_p(e8390_base + EN0_ISR));
 
 	/* !!Assumption!! -- we stay in page 0.	 Don't break this. */
-	while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0
-		   && ++nr_serviced < MAX_SERVICE)
+	while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 &&
+	       ++nr_serviced < MAX_SERVICE)
 	{
 		if (!netif_running(dev)) {
 			printk(KERN_WARNING "%s: interrupt from stopped card\n", dev->name);
@@ -721,10 +721,10 @@ static void ei_receive(struct net_device *dev)
 		/* Check for bogosity warned by 3c503 book: the status byte is never
 		   written.  This happened a lot during testing! This code should be
 		   cleaned up someday. */
-		if (rx_frame.next != next_frame
-			&& rx_frame.next != next_frame + 1
-			&& rx_frame.next != next_frame - num_rx_pages
-			&& rx_frame.next != next_frame + 1 - num_rx_pages) {
+		if (rx_frame.next != next_frame &&
+		    rx_frame.next != next_frame + 1 &&
+		    rx_frame.next != next_frame - num_rx_pages &&
+		    rx_frame.next != next_frame + 1 - num_rx_pages) {
 			ei_local->current_page = rxing_page;
 			ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
 			dev->stats.rx_errors++;
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index f2a197fd47a5..336e7c7a9275 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -231,8 +231,8 @@ static void temac_set_multicast_list(struct net_device *ndev)
 	int i;
 
 	mutex_lock(&lp->indirect_mutex);
-	if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC)
-			|| ndev->mc_count > MULTICAST_CAM_TABLE_NUM) {
+	if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
+	    ndev->mc_count > MULTICAST_CAM_TABLE_NUM) {
 		/*
 		 *	We must make the kernel realise we had to move
 		 *	into promisc mode or we start all out war on
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 1bc654a73c47..eae4ad749e9d 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -207,20 +207,12 @@ static __net_init int loopback_net_init(struct net *net)
 out_free_netdev:
 	free_netdev(dev);
 out:
-	if (net == &init_net)
+	if (net_eq(net, &init_net))
 		panic("loopback: Failed to register netdevice: %d\n", err);
 	return err;
 }
 
-static __net_exit void loopback_net_exit(struct net *net)
-{
-	struct net_device *dev = net->loopback_dev;
-
-	unregister_netdev(dev);
-}
-
 /* Registered in net/core/dev.c */
 struct pernet_operations __net_initdata loopback_net_ops = {
        .init = loopback_net_init,
-       .exit = loopback_net_exit,
 };
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index cc3ed9cf28be..e20fefc73c8b 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -845,7 +845,7 @@ static int i596_open(struct net_device *dev)
 {
 	int i;
 
-	i = request_irq(dev->irq, &i596_interrupt, IRQF_SHARED, dev->name, dev);
+	i = request_irq(dev->irq, i596_interrupt, IRQF_SHARED, dev->name, dev);
 	if (i) {
 		printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
 		return i;
diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c
index 149e0ed4a055..23b633e2ac42 100644
--- a/drivers/net/mac89x0.c
+++ b/drivers/net/mac89x0.c
@@ -222,8 +222,8 @@ struct net_device * __init mac89x0_probe(int unit)
 		int card_present;
 
 		local_irq_save(flags);
-		card_present = hwreg_present((void*) ioaddr+4)
-		  && hwreg_present((void*) ioaddr + DATA_PORT);
+		card_present = (hwreg_present((void*) ioaddr+4) &&
+				hwreg_present((void*) ioaddr + DATA_PORT));
 		local_irq_restore(flags);
 
 		if (!card_present)
@@ -337,7 +337,7 @@ net_open(struct net_device *dev)
 	writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL) & ~ENABLE_IRQ);
 
 	/* Grab the interrupt */
-	if (request_irq(dev->irq, &net_interrupt, 0, "cs89x0", dev))
+	if (request_irq(dev->irq, net_interrupt, 0, "cs89x0", dev))
 		return -EAGAIN;
 
 	/* Set up the IRQ - Apparently magic */
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index 7d7577b598ea..d9fbad386389 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -897,8 +897,8 @@ static irqreturn_t mace_rxdma_intr(int irq, void *dev_id)
 	    if (next >= N_RX_RING)
 		next = 0;
 	    np = mp->rx_cmds + next;
-	    if (next != mp->rx_fill
-		&& (ld_le16(&np->xfer_status) & ACTIVE) != 0) {
+	    if (next != mp->rx_fill &&
+		(ld_le16(&np->xfer_status) & ACTIVE) != 0) {
 		printk(KERN_DEBUG "mace: lost a status word\n");
 		++mace_lost_status;
 	    } else
diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c
index b3d7d8d77f46..875d361fb79d 100644
--- a/drivers/net/macsonic.c
+++ b/drivers/net/macsonic.c
@@ -140,7 +140,7 @@ static irqreturn_t macsonic_interrupt(int irq, void *dev_id)
 
 static int macsonic_open(struct net_device* dev)
 {
-	if (request_irq(dev->irq, &sonic_interrupt, IRQ_FLG_FAST, "sonic", dev)) {
+	if (request_irq(dev->irq, sonic_interrupt, IRQ_FLG_FAST, "sonic", dev)) {
 		printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq);
 		return -EAGAIN;
 	}
@@ -149,7 +149,7 @@ static int macsonic_open(struct net_device* dev)
 	 * rupt as well, which must prevent re-entrance of the sonic handler.
 	 */
 	if (dev->irq == IRQ_AUTO_3)
-		if (request_irq(IRQ_NUBUS_9, &macsonic_interrupt, IRQ_FLG_FAST, "sonic", dev)) {
+		if (request_irq(IRQ_NUBUS_9, macsonic_interrupt, IRQ_FLG_FAST, "sonic", dev)) {
 			printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, IRQ_NUBUS_9);
 			free_irq(dev->irq, dev);
 			return -EAGAIN;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 2490aa39804c..21a9c9ab4b34 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -29,6 +29,7 @@
 #include <linux/if_link.h>
 #include <linux/if_macvlan.h>
 #include <net/rtnetlink.h>
+#include <net/xfrm.h>
 
 #define MACVLAN_HASH_SIZE	(1 << BITS_PER_BYTE)
 
@@ -38,12 +39,28 @@ struct macvlan_port {
 	struct list_head	vlans;
 };
 
+/**
+ *	struct macvlan_rx_stats - MACVLAN percpu rx stats
+ *	@rx_packets: number of received packets
+ *	@rx_bytes: number of received bytes
+ *	@multicast: number of received multicast packets
+ *	@rx_errors: number of errors
+ */
+struct macvlan_rx_stats {
+	unsigned long rx_packets;
+	unsigned long rx_bytes;
+	unsigned long multicast;
+	unsigned long rx_errors;
+};
+
 struct macvlan_dev {
 	struct net_device	*dev;
 	struct list_head	list;
 	struct hlist_node	hlist;
 	struct macvlan_port	*port;
 	struct net_device	*lowerdev;
+	struct macvlan_rx_stats *rx_stats;
+	enum macvlan_mode	mode;
 };
 
 
@@ -101,41 +118,67 @@ static int macvlan_addr_busy(const struct macvlan_port *port,
 	return 0;
 }
 
+static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
+				    unsigned int len, bool success,
+				    bool multicast)
+{
+	struct macvlan_rx_stats *rx_stats;
+
+	rx_stats = per_cpu_ptr(vlan->rx_stats, smp_processor_id());
+	if (likely(success)) {
+		rx_stats->rx_packets++;;
+		rx_stats->rx_bytes += len;
+		if (multicast)
+			rx_stats->multicast++;
+	} else {
+		rx_stats->rx_errors++;
+	}
+}
+
+static int macvlan_broadcast_one(struct sk_buff *skb, struct net_device *dev,
+				 const struct ethhdr *eth, bool local)
+{
+	if (!skb)
+		return NET_RX_DROP;
+
+	if (local)
+		return dev_forward_skb(dev, skb);
+
+	skb->dev = dev;
+	if (!compare_ether_addr_64bits(eth->h_dest,
+				       dev->broadcast))
+		skb->pkt_type = PACKET_BROADCAST;
+	else
+		skb->pkt_type = PACKET_MULTICAST;
+
+	return netif_rx(skb);
+}
+
 static void macvlan_broadcast(struct sk_buff *skb,
-			      const struct macvlan_port *port)
+			      const struct macvlan_port *port,
+			      struct net_device *src,
+			      enum macvlan_mode mode)
 {
 	const struct ethhdr *eth = eth_hdr(skb);
 	const struct macvlan_dev *vlan;
 	struct hlist_node *n;
-	struct net_device *dev;
 	struct sk_buff *nskb;
 	unsigned int i;
+	int err;
 
 	if (skb->protocol == htons(ETH_P_PAUSE))
 		return;
 
 	for (i = 0; i < MACVLAN_HASH_SIZE; i++) {
 		hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[i], hlist) {
-			dev = vlan->dev;
-
-			nskb = skb_clone(skb, GFP_ATOMIC);
-			if (nskb == NULL) {
-				dev->stats.rx_errors++;
-				dev->stats.rx_dropped++;
+			if (vlan->dev == src || !(vlan->mode & mode))
 				continue;
-			}
-
-			dev->stats.rx_bytes += skb->len + ETH_HLEN;
-			dev->stats.rx_packets++;
-			dev->stats.multicast++;
-
-			nskb->dev = dev;
-			if (!compare_ether_addr_64bits(eth->h_dest, dev->broadcast))
-				nskb->pkt_type = PACKET_BROADCAST;
-			else
-				nskb->pkt_type = PACKET_MULTICAST;
 
-			netif_rx(nskb);
+			nskb = skb_clone(skb, GFP_ATOMIC);
+			err = macvlan_broadcast_one(nskb, vlan->dev, eth,
+					 mode == MACVLAN_MODE_BRIDGE);
+			macvlan_count_rx(vlan, skb->len + ETH_HLEN,
+					 err == NET_RX_SUCCESS, 1);
 		}
 	}
 }
@@ -146,14 +189,34 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
 	const struct ethhdr *eth = eth_hdr(skb);
 	const struct macvlan_port *port;
 	const struct macvlan_dev *vlan;
+	const struct macvlan_dev *src;
 	struct net_device *dev;
+	unsigned int len;
 
 	port = rcu_dereference(skb->dev->macvlan_port);
 	if (port == NULL)
 		return skb;
 
 	if (is_multicast_ether_addr(eth->h_dest)) {
-		macvlan_broadcast(skb, port);
+		src = macvlan_hash_lookup(port, eth->h_source);
+		if (!src)
+			/* frame comes from an external address */
+			macvlan_broadcast(skb, port, NULL,
+					  MACVLAN_MODE_PRIVATE |
+					  MACVLAN_MODE_VEPA    |
+					  MACVLAN_MODE_BRIDGE);
+		else if (src->mode == MACVLAN_MODE_VEPA)
+			/* flood to everyone except source */
+			macvlan_broadcast(skb, port, src->dev,
+					  MACVLAN_MODE_VEPA |
+					  MACVLAN_MODE_BRIDGE);
+		else if (src->mode == MACVLAN_MODE_BRIDGE)
+			/*
+			 * flood only to VEPA ports, bridge ports
+			 * already saw the frame on the way out.
+			 */
+			macvlan_broadcast(skb, port, src->dev,
+					  MACVLAN_MODE_VEPA);
 		return skb;
 	}
 
@@ -166,16 +229,11 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
 		kfree_skb(skb);
 		return NULL;
 	}
-
+	len = skb->len + ETH_HLEN;
 	skb = skb_share_check(skb, GFP_ATOMIC);
-	if (skb == NULL) {
-		dev->stats.rx_errors++;
-		dev->stats.rx_dropped++;
+	macvlan_count_rx(vlan, len, skb != NULL, 0);
+	if (!skb)
 		return NULL;
-	}
-
-	dev->stats.rx_bytes += skb->len + ETH_HLEN;
-	dev->stats.rx_packets++;
 
 	skb->dev = dev;
 	skb->pkt_type = PACKET_HOST;
@@ -184,25 +242,53 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
 	return NULL;
 }
 
+static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	const struct macvlan_dev *vlan = netdev_priv(dev);
+	const struct macvlan_port *port = vlan->port;
+	const struct macvlan_dev *dest;
+
+	if (vlan->mode == MACVLAN_MODE_BRIDGE) {
+		const struct ethhdr *eth = (void *)skb->data;
+
+		/* send to other bridge ports directly */
+		if (is_multicast_ether_addr(eth->h_dest)) {
+			macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE);
+			goto xmit_world;
+		}
+
+		dest = macvlan_hash_lookup(port, eth->h_dest);
+		if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
+			unsigned int length = skb->len + ETH_HLEN;
+			int ret = dev_forward_skb(dest->dev, skb);
+			macvlan_count_rx(dest, length,
+					 ret == NET_RX_SUCCESS, 0);
+
+			return NET_XMIT_SUCCESS;
+		}
+	}
+
+xmit_world:
+	skb->dev = vlan->lowerdev;
+	return dev_queue_xmit(skb);
+}
+
 static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
 				      struct net_device *dev)
 {
 	int i = skb_get_queue_mapping(skb);
 	struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
-	const struct macvlan_dev *vlan = netdev_priv(dev);
 	unsigned int len = skb->len;
 	int ret;
 
-	skb->dev = vlan->lowerdev;
-	ret = dev_queue_xmit(skb);
-
+	ret = macvlan_queue_xmit(skb, dev);
 	if (likely(ret == NET_XMIT_SUCCESS)) {
 		txq->tx_packets++;
 		txq->tx_bytes += len;
 	} else
 		txq->tx_dropped++;
 
-	return NETDEV_TX_OK;
+	return ret;
 }
 
 static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
@@ -366,9 +452,47 @@ static int macvlan_init(struct net_device *dev)
 
 	macvlan_set_lockdep_class(dev);
 
+	vlan->rx_stats = alloc_percpu(struct macvlan_rx_stats);
+	if (!vlan->rx_stats)
+		return -ENOMEM;
+
 	return 0;
 }
 
+static void macvlan_uninit(struct net_device *dev)
+{
+	struct macvlan_dev *vlan = netdev_priv(dev);
+
+	free_percpu(vlan->rx_stats);
+}
+
+static struct net_device_stats *macvlan_dev_get_stats(struct net_device *dev)
+{
+	struct net_device_stats *stats = &dev->stats;
+	struct macvlan_dev *vlan = netdev_priv(dev);
+
+	dev_txq_stats_fold(dev, stats);
+
+	if (vlan->rx_stats) {
+		struct macvlan_rx_stats *p, rx = {0};
+		int i;
+
+		for_each_possible_cpu(i) {
+			p = per_cpu_ptr(vlan->rx_stats, i);
+			rx.rx_packets += p->rx_packets;
+			rx.rx_bytes   += p->rx_bytes;
+			rx.rx_errors  += p->rx_errors;
+			rx.multicast  += p->multicast;
+		}
+		stats->rx_packets = rx.rx_packets;
+		stats->rx_bytes   = rx.rx_bytes;
+		stats->rx_errors  = rx.rx_errors;
+		stats->rx_dropped = rx.rx_errors;
+		stats->multicast  = rx.multicast;
+	}
+	return stats;
+}
+
 static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
 					struct ethtool_drvinfo *drvinfo)
 {
@@ -405,6 +529,7 @@ static const struct ethtool_ops macvlan_ethtool_ops = {
 
 static const struct net_device_ops macvlan_netdev_ops = {
 	.ndo_init		= macvlan_init,
+	.ndo_uninit		= macvlan_uninit,
 	.ndo_open		= macvlan_open,
 	.ndo_stop		= macvlan_stop,
 	.ndo_start_xmit		= macvlan_start_xmit,
@@ -412,6 +537,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
 	.ndo_change_rx_flags	= macvlan_change_rx_flags,
 	.ndo_set_mac_address	= macvlan_set_mac_address,
 	.ndo_set_multicast_list	= macvlan_set_multicast_list,
+	.ndo_get_stats		= macvlan_dev_get_stats,
 	.ndo_validate_addr	= eth_validate_addr,
 };
 
@@ -456,25 +582,6 @@ static void macvlan_port_destroy(struct net_device *dev)
 	kfree(port);
 }
 
-static void macvlan_transfer_operstate(struct net_device *dev)
-{
-	struct macvlan_dev *vlan = netdev_priv(dev);
-	const struct net_device *lowerdev = vlan->lowerdev;
-
-	if (lowerdev->operstate == IF_OPER_DORMANT)
-		netif_dormant_on(dev);
-	else
-		netif_dormant_off(dev);
-
-	if (netif_carrier_ok(lowerdev)) {
-		if (!netif_carrier_ok(dev))
-			netif_carrier_on(dev);
-	} else {
-		if (netif_carrier_ok(dev))
-			netif_carrier_off(dev);
-	}
-}
-
 static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
 {
 	if (tb[IFLA_ADDRESS]) {
@@ -483,6 +590,17 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
 			return -EADDRNOTAVAIL;
 	}
+
+	if (data && data[IFLA_MACVLAN_MODE]) {
+		switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) {
+		case MACVLAN_MODE_PRIVATE:
+		case MACVLAN_MODE_VEPA:
+		case MACVLAN_MODE_BRIDGE:
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
 	return 0;
 }
 
@@ -505,7 +623,7 @@ static int macvlan_get_tx_queues(struct net *net,
 	return 0;
 }
 
-static int macvlan_newlink(struct net_device *dev,
+static int macvlan_newlink(struct net *src_net, struct net_device *dev,
 			   struct nlattr *tb[], struct nlattr *data[])
 {
 	struct macvlan_dev *vlan = netdev_priv(dev);
@@ -516,7 +634,7 @@ static int macvlan_newlink(struct net_device *dev,
 	if (!tb[IFLA_LINK])
 		return -EINVAL;
 
-	lowerdev = __dev_get_by_index(dev_net(dev), nla_get_u32(tb[IFLA_LINK]));
+	lowerdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
 	if (lowerdev == NULL)
 		return -ENODEV;
 
@@ -547,27 +665,61 @@ static int macvlan_newlink(struct net_device *dev,
 	vlan->dev      = dev;
 	vlan->port     = port;
 
+	vlan->mode     = MACVLAN_MODE_VEPA;
+	if (data && data[IFLA_MACVLAN_MODE])
+		vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
+
 	err = register_netdevice(dev);
 	if (err < 0)
 		return err;
 
 	list_add_tail(&vlan->list, &port->vlans);
-	macvlan_transfer_operstate(dev);
+	netif_stacked_transfer_operstate(lowerdev, dev);
 	return 0;
 }
 
-static void macvlan_dellink(struct net_device *dev)
+static void macvlan_dellink(struct net_device *dev, struct list_head *head)
 {
 	struct macvlan_dev *vlan = netdev_priv(dev);
 	struct macvlan_port *port = vlan->port;
 
 	list_del(&vlan->list);
-	unregister_netdevice(dev);
+	unregister_netdevice_queue(dev, head);
 
 	if (list_empty(&port->vlans))
 		macvlan_port_destroy(port->dev);
 }
 
+static int macvlan_changelink(struct net_device *dev,
+		struct nlattr *tb[], struct nlattr *data[])
+{
+	struct macvlan_dev *vlan = netdev_priv(dev);
+	if (data && data[IFLA_MACVLAN_MODE])
+		vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
+	return 0;
+}
+
+static size_t macvlan_get_size(const struct net_device *dev)
+{
+	return nla_total_size(4);
+}
+
+static int macvlan_fill_info(struct sk_buff *skb,
+				const struct net_device *dev)
+{
+	struct macvlan_dev *vlan = netdev_priv(dev);
+
+	NLA_PUT_U32(skb, IFLA_MACVLAN_MODE, vlan->mode);
+	return 0;
+
+nla_put_failure:
+	return -EMSGSIZE;
+}
+
+static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
+	[IFLA_MACVLAN_MODE] = { .type = NLA_U32 },
+};
+
 static struct rtnl_link_ops macvlan_link_ops __read_mostly = {
 	.kind		= "macvlan",
 	.priv_size	= sizeof(struct macvlan_dev),
@@ -576,6 +728,11 @@ static struct rtnl_link_ops macvlan_link_ops __read_mostly = {
 	.validate	= macvlan_validate,
 	.newlink	= macvlan_newlink,
 	.dellink	= macvlan_dellink,
+	.maxtype	= IFLA_MACVLAN_MAX,
+	.policy		= macvlan_policy,
+	.changelink	= macvlan_changelink,
+	.get_size	= macvlan_get_size,
+	.fill_info	= macvlan_fill_info,
 };
 
 static int macvlan_device_event(struct notifier_block *unused,
@@ -592,7 +749,8 @@ static int macvlan_device_event(struct notifier_block *unused,
 	switch (event) {
 	case NETDEV_CHANGE:
 		list_for_each_entry(vlan, &port->vlans, list)
-			macvlan_transfer_operstate(vlan->dev);
+			netif_stacked_transfer_operstate(vlan->lowerdev,
+							 vlan->dev);
 		break;
 	case NETDEV_FEAT_CHANGE:
 		list_for_each_entry(vlan, &port->vlans, list) {
@@ -603,7 +761,7 @@ static int macvlan_device_event(struct notifier_block *unused,
 		break;
 	case NETDEV_UNREGISTER:
 		list_for_each_entry_safe(vlan, next, &port->vlans, list)
-			macvlan_dellink(vlan->dev);
+			macvlan_dellink(vlan->dev, NULL);
 		break;
 	}
 	return NOTIFY_DONE;
diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c
index 21f8754fcf4c..e85bf04cf813 100644
--- a/drivers/net/mdio.c
+++ b/drivers/net/mdio.c
@@ -162,6 +162,10 @@ static u32 mdio45_get_an(const struct mdio_if_info *mdio, u16 addr)
 		result |= ADVERTISED_100baseT_Half;
 	if (reg & ADVERTISE_100FULL)
 		result |= ADVERTISED_100baseT_Full;
+	if (reg & ADVERTISE_PAUSE_CAP)
+		result |= ADVERTISED_Pause;
+	if (reg & ADVERTISE_PAUSE_ASYM)
+		result |= ADVERTISED_Asym_Pause;
 	return result;
 }
 
@@ -344,11 +348,9 @@ void mdio45_ethtool_spauseparam_an(const struct mdio_if_info *mdio,
 
 	old_adv = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_AN,
 				  MDIO_AN_ADVERTISE);
-	adv = old_adv & ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
-	if (ecmd->autoneg)
-		adv |= mii_advertise_flowctrl(
-			(ecmd->rx_pause ? FLOW_CTRL_RX : 0) |
-			(ecmd->tx_pause ? FLOW_CTRL_TX : 0));
+	adv = ((old_adv & ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) |
+	       mii_advertise_flowctrl((ecmd->rx_pause ? FLOW_CTRL_RX : 0) |
+				      (ecmd->tx_pause ? FLOW_CTRL_TX : 0)));
 	if (adv != old_adv) {
 		mdio->mdio_write(mdio->dev, mdio->prtad, MDIO_MMD_AN,
 				 MDIO_AN_ADVERTISE, adv);
diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c
index 8ea98bd89ff1..8e9704f5c122 100644
--- a/drivers/net/mipsnet.c
+++ b/drivers/net/mipsnet.c
@@ -211,7 +211,7 @@ static int mipsnet_open(struct net_device *dev)
 {
 	int err;
 
-	err = request_irq(dev->irq, &mipsnet_interrupt,
+	err = request_irq(dev->irq, mipsnet_interrupt,
 			  IRQF_SHARED, dev->name, (void *) dev);
 	if (err) {
 		release_region(dev->base_addr, sizeof(struct mipsnet_regs));
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index b62e61d4ca3e..796a493f95ab 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -849,7 +849,7 @@ no_csum:
 	return 0;
 }
 
-static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct mv643xx_eth_private *mp = netdev_priv(dev);
 	int queue;
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index f3624517cb0e..d38921906bb7 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -207,7 +207,6 @@ struct myri10ge_priv {
 	int big_bytes;
 	int max_intr_slots;
 	struct net_device *dev;
-	struct net_device_stats stats;
 	spinlock_t stats_lock;
 	u8 __iomem *sram;
 	int sram_size;
@@ -264,6 +263,10 @@ static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat";
 static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat";
 static char *myri10ge_fw_rss_unaligned = "myri10ge_rss_ethp_z8e.dat";
 static char *myri10ge_fw_rss_aligned = "myri10ge_rss_eth_z8e.dat";
+MODULE_FIRMWARE("myri10ge_ethp_z8e.dat");
+MODULE_FIRMWARE("myri10ge_eth_z8e.dat");
+MODULE_FIRMWARE("myri10ge_rss_ethp_z8e.dat");
+MODULE_FIRMWARE("myri10ge_rss_eth_z8e.dat");
 
 static char *myri10ge_fw_name = NULL;
 module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
@@ -407,8 +410,8 @@ myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
 		 * and try to get the completion quickly
 		 * (1ms will be enough for those commands) */
 		for (sleep_total = 0;
-		     sleep_total < 1000
-		     && response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
+		     sleep_total < 1000 &&
+		     response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
 		     sleep_total += 10) {
 			udelay(10);
 			mb();
@@ -416,8 +419,8 @@ myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
 	} else {
 		/* use msleep for most command */
 		for (sleep_total = 0;
-		     sleep_total < 15
-		     && response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
+		     sleep_total < 15 &&
+		     response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
 		     sleep_total++)
 			msleep(1);
 	}
@@ -554,8 +557,8 @@ myri10ge_validate_firmware(struct myri10ge_priv *mgp,
 	sscanf(mgp->fw_version, "%d.%d.%d", &mgp->fw_ver_major,
 	       &mgp->fw_ver_minor, &mgp->fw_ver_tiny);
 
-	if (!(mgp->fw_ver_major == MXGEFW_VERSION_MAJOR
-	      && mgp->fw_ver_minor == MXGEFW_VERSION_MINOR)) {
+	if (!(mgp->fw_ver_major == MXGEFW_VERSION_MAJOR &&
+	      mgp->fw_ver_minor == MXGEFW_VERSION_MINOR)) {
 		dev_err(dev, "Found firmware version %s\n", mgp->fw_version);
 		dev_err(dev, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR,
 			MXGEFW_VERSION_MINOR);
@@ -1409,8 +1412,8 @@ myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
 	}
 
 	/* start the queue if we've stopped it */
-	if (netif_tx_queue_stopped(dev_queue)
-	    && tx->req - tx->done < (tx->mask >> 1)) {
+	if (netif_tx_queue_stopped(dev_queue) &&
+	    tx->req - tx->done < (tx->mask >> 1)) {
 		tx->wake_queue++;
 		netif_tx_wake_queue(dev_queue);
 	}
@@ -1832,7 +1835,7 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
 	/* force stats update */
 	(void)myri10ge_get_stats(netdev);
 	for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
-		data[i] = ((unsigned long *)&mgp->stats)[i];
+		data[i] = ((unsigned long *)&netdev->stats)[i];
 
 	data[i++] = (unsigned int)mgp->tx_boundary;
 	data[i++] = (unsigned int)mgp->wc_enabled;
@@ -3002,7 +3005,7 @@ static struct net_device_stats *myri10ge_get_stats(struct net_device *dev)
 {
 	struct myri10ge_priv *mgp = netdev_priv(dev);
 	struct myri10ge_slice_netstats *slice_stats;
-	struct net_device_stats *stats = &mgp->stats;
+	struct net_device_stats *stats = &dev->stats;
 	int i;
 
 	spin_lock(&mgp->stats_lock);
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index 29ebebc6a95b..b3513ad3b703 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -1084,7 +1084,7 @@ static int __devinit myri_sbus_probe(struct of_device *op, const struct of_devic
 
 	/* Register interrupt handler now. */
 	DET(("Requesting MYRIcom IRQ line.\n"));
-	if (request_irq(dev->irq, &myri_interrupt,
+	if (request_irq(dev->irq, myri_interrupt,
 			IRQF_SHARED, "MyriCOM Ethernet", (void *) dev)) {
 		printk("MyriCOM: Cannot register interrupt handler.\n");
 		goto err;
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index b2722c44337e..797fe164ce27 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -683,8 +683,8 @@ static ssize_t natsemi_set_dspcfg_workaround(struct device *dev,
         /* Find out the new setting */
         if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
                 new_setting = 1;
-        else if (!strncmp("off", buf, count - 1)
-                 || !strncmp("0", buf, count - 1))
+        else if (!strncmp("off", buf, count - 1) ||
+                 !strncmp("0", buf, count - 1))
 		new_setting = 0;
 	else
                  return count;
@@ -757,8 +757,8 @@ static void __devinit natsemi_init_media (struct net_device *dev)
 	np->autoneg    = (tmp & BMCR_ANENABLE)? AUTONEG_ENABLE: AUTONEG_DISABLE;
 	np->advertising= mdio_read(dev, MII_ADVERTISE);
 
-	if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL
-	 && netif_msg_probe(np)) {
+	if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL &&
+	    netif_msg_probe(np)) {
 		printk(KERN_INFO "natsemi %s: Transceiver default autonegotiation %s "
 			"10%s %s duplex.\n",
 			pci_name(np->pci_dev),
@@ -1153,8 +1153,8 @@ static void init_phy_fixup(struct net_device *dev)
 	tmp = mdio_read(dev, MII_BMCR);
 	if (np->autoneg == AUTONEG_ENABLE) {
 		/* renegotiate if something changed */
-		if ((tmp & BMCR_ANENABLE) == 0
-		 || np->advertising != mdio_read(dev, MII_ADVERTISE))
+		if ((tmp & BMCR_ANENABLE) == 0 ||
+		    np->advertising != mdio_read(dev, MII_ADVERTISE))
 		{
 			/* turn on autonegotiation and force negotiation */
 			tmp |= (BMCR_ANENABLE | BMCR_ANRESTART);
@@ -1535,7 +1535,7 @@ static int netdev_open(struct net_device *dev)
 	/* Reset the chip, just in case. */
 	natsemi_reset(dev);
 
-	i = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev);
+	i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
 	if (i) return i;
 
 	if (netif_msg_ifup(np))
@@ -2164,8 +2164,8 @@ static void netdev_tx_done(struct net_device *dev)
 		dev_kfree_skb_irq(np->tx_skbuff[entry]);
 		np->tx_skbuff[entry] = NULL;
 	}
-	if (netif_queue_stopped(dev)
-		&& np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+	if (netif_queue_stopped(dev) &&
+	    np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
 		/* The ring is no longer full, wake queue. */
 		netif_wake_queue(dev);
 	}
@@ -2343,8 +2343,8 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
 			/* Omit CRC size. */
 			/* Check if the packet is long enough to accept
 			 * without copying to a minimally-sized skbuff. */
-			if (pkt_len < rx_copybreak
-			    && (skb = dev_alloc_skb(pkt_len + RX_OFFSET)) != NULL) {
+			if (pkt_len < rx_copybreak &&
+			    (skb = dev_alloc_skb(pkt_len + RX_OFFSET)) != NULL) {
 				/* 16 byte align the IP header */
 				skb_reserve(skb, RX_OFFSET);
 				pci_dma_sync_single_for_cpu(np->pci_dev,
@@ -2390,8 +2390,8 @@ static void netdev_error(struct net_device *dev, int intr_status)
 	spin_lock(&np->lock);
 	if (intr_status & LinkChange) {
 		u16 lpa = mdio_read(dev, MII_LPA);
-		if (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE
-		 && netif_msg_link(np)) {
+		if (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE &&
+		    netif_msg_link(np)) {
 			printk(KERN_INFO
 				"%s: Autonegotiation advertising"
 				" %#04x  partner %#04x.\n", dev->name,
@@ -2488,8 +2488,8 @@ static void __set_rx_mode(struct net_device *dev)
 	if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
 		rx_mode = RxFilterEnable | AcceptBroadcast
 			| AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;
-	} else if ((dev->mc_count > multicast_filter_limit)
-	  || (dev->flags & IFF_ALLMULTI)) {
+	} else if ((dev->mc_count > multicast_filter_limit) ||
+		   (dev->flags & IFF_ALLMULTI)) {
 		rx_mode = RxFilterEnable | AcceptBroadcast
 			| AcceptAllMulticast | AcceptMyPhys;
 	} else {
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index 9f4235466d59..64770298c4f7 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -212,7 +212,7 @@ static int netx_eth_open(struct net_device *ndev)
 	struct netx_eth_priv *priv = netdev_priv(ndev);
 
 	if (request_irq
-	    (ndev->irq, &netx_eth_interrupt, IRQF_SHARED, ndev->name, ndev))
+	    (ndev->irq, netx_eth_interrupt, IRQF_SHARED, ndev->name, ndev))
 		return -EAGAIN;
 
 	writel(ndev->dev_addr[0] |
@@ -510,3 +510,6 @@ module_exit(netx_eth_cleanup);
 MODULE_AUTHOR("Sascha Hauer, Pengutronix");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:" CARDNAME);
+MODULE_FIRMWARE("xc0.bin");
+MODULE_FIRMWARE("xc1.bin");
+MODULE_FIRMWARE("xc2.bin");
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index e1237b802872..76cd1f3e9fc8 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
 
 #define _NETXEN_NIC_LINUX_MAJOR 4
 #define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 50
-#define NETXEN_NIC_LINUX_VERSIONID  "4.0.50"
+#define _NETXEN_NIC_LINUX_SUBVERSION 65
+#define NETXEN_NIC_LINUX_VERSIONID  "4.0.65"
 
 #define NETXEN_VERSION_CODE(a, b, c)	(((a) << 24) + ((b) << 16) + (c))
 #define _major(v)	(((v) >> 24) & 0xff)
@@ -74,8 +74,6 @@
 #define NETXEN_FLASH_TOTAL_SIZE  (NETXEN_NUM_FLASH_SECTORS \
 					* NETXEN_FLASH_SECTOR_SIZE)
 
-#define PHAN_VENDOR_ID 0x4040
-
 #define RCV_DESC_RINGSIZE(rds_ring)	\
 	(sizeof(struct rcv_desc) * (rds_ring)->num_desc)
 #define RCV_BUFF_RINGSIZE(rds_ring)	\
@@ -117,9 +115,11 @@
 #define NX_P3_B0		0x40
 #define NX_P3_B1		0x41
 #define NX_P3_B2		0x42
+#define NX_P3P_A0		0x50
 
 #define NX_IS_REVISION_P2(REVISION)     (REVISION <= NX_P2_C1)
 #define NX_IS_REVISION_P3(REVISION)     (REVISION >= NX_P3_A0)
+#define NX_IS_REVISION_P3P(REVISION)     (REVISION >= NX_P3P_A0)
 
 #define FIRST_PAGE_GROUP_START	0
 #define FIRST_PAGE_GROUP_END	0x100000
@@ -419,6 +419,34 @@ struct status_desc {
 	__le64 status_desc_data[2];
 } __attribute__ ((aligned(16)));
 
+/* UNIFIED ROMIMAGE *************************/
+#define NX_UNI_FW_MIN_SIZE		0x3eb000
+#define NX_UNI_DIR_SECT_PRODUCT_TBL	0x0
+#define NX_UNI_DIR_SECT_BOOTLD		0x6
+#define NX_UNI_DIR_SECT_FW		0x7
+
+/*Offsets */
+#define NX_UNI_CHIP_REV_OFF		10
+#define NX_UNI_FLAGS_OFF		11
+#define NX_UNI_BIOS_VERSION_OFF 	12
+#define NX_UNI_BOOTLD_IDX_OFF		27
+#define NX_UNI_FIRMWARE_IDX_OFF 	29
+
+struct uni_table_desc{
+	uint32_t	findex;
+	uint32_t	num_entries;
+	uint32_t	entry_size;
+	uint32_t	reserved[5];
+};
+
+struct uni_data_desc{
+	uint32_t	findex;
+	uint32_t	size;
+	uint32_t	reserved[5];
+};
+
+/* UNIFIED ROMIMAGE *************************/
+
 /* The version of the main data structure */
 #define	NETXEN_BDINFO_VERSION 1
 
@@ -485,7 +513,15 @@ struct status_desc {
 #define NX_P2_MN_ROMIMAGE	0
 #define NX_P3_CT_ROMIMAGE	1
 #define NX_P3_MN_ROMIMAGE	2
-#define NX_FLASH_ROMIMAGE	3
+#define NX_UNIFIED_ROMIMAGE	3
+#define NX_FLASH_ROMIMAGE	4
+#define NX_UNKNOWN_ROMIMAGE	0xff
+
+#define NX_P2_MN_ROMIMAGE_NAME		"nxromimg.bin"
+#define NX_P3_CT_ROMIMAGE_NAME		"nx3fwct.bin"
+#define NX_P3_MN_ROMIMAGE_NAME		"nx3fwmn.bin"
+#define NX_UNIFIED_ROMIMAGE_NAME	"phanfw.bin"
+#define NX_FLASH_ROMIMAGE_NAME		"flash"
 
 extern char netxen_nic_driver_name[];
 
@@ -543,13 +579,16 @@ struct netxen_hardware_context {
 	void __iomem *pci_base1;
 	void __iomem *pci_base2;
 	void __iomem *db_base;
+	void __iomem *ocm_win_crb;
+
 	unsigned long db_len;
 	unsigned long pci_len0;
 
-	int qdr_sn_window;
-	int ddr_mn_window;
-	u32 mn_win_crb;
-	u32 ms_win_crb;
+	u32 ocm_win;
+	u32 crb_win;
+
+	rwlock_t crb_lock;
+	spinlock_t mem_lock;
 
 	u8 cut_through;
 	u8 revision_id;
@@ -1039,6 +1078,9 @@ typedef struct {
 #define LINKEVENT_LINKSPEED_MBPS	0
 #define LINKEVENT_LINKSPEED_ENCODED	1
 
+#define AUTO_FW_RESET_ENABLED	0xEF10AF12
+#define AUTO_FW_RESET_DISABLED	0xDCBAAF12
+
 /* firmware response header:
  *	63:58 - message type
  *	57:56 - owner
@@ -1086,6 +1128,7 @@ typedef struct {
 #define NETXEN_NIC_MSIX_ENABLED		0x04
 #define NETXEN_NIC_LRO_ENABLED		0x08
 #define NETXEN_NIC_BRIDGE_ENABLED       0X10
+#define NETXEN_NIC_DIAG_ENABLED		0x20
 #define NETXEN_IS_MSI_FAMILY(adapter) \
 	((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED))
 
@@ -1115,10 +1158,6 @@ struct netxen_adapter {
 	struct pci_dev *pdev;
 	struct list_head mac_list;
 
-	u32 curr_window;
-	u32 crb_win;
-	rwlock_t adapter_lock;
-
 	spinlock_t tx_clean_lock;
 
 	u16 num_txd;
@@ -1182,11 +1221,10 @@ struct netxen_adapter {
 	u32 (*crb_read)(struct netxen_adapter *, ulong);
 	int (*crb_write)(struct netxen_adapter *, ulong, u32);
 
-	int (*pci_mem_read)(struct netxen_adapter *, u64, void *, int);
-	int (*pci_mem_write)(struct netxen_adapter *, u64, void *, int);
+	int (*pci_mem_read)(struct netxen_adapter *, u64, u64 *);
+	int (*pci_mem_write)(struct netxen_adapter *, u64, u64);
 
-	unsigned long (*pci_set_window)(struct netxen_adapter *,
-			unsigned long long);
+	int (*pci_set_window)(struct netxen_adapter *, u64, u32 *);
 
 	u32 (*io_read)(struct netxen_adapter *, void __iomem *);
 	void (*io_write)(struct netxen_adapter *, void __iomem *, u32);
@@ -1205,12 +1243,10 @@ struct netxen_adapter {
 
 	struct work_struct  tx_timeout_task;
 
-	struct net_device_stats net_stats;
-
 	nx_nic_intr_coalesce_t coal;
 
 	unsigned long state;
-	u32 resv5;
+	__le32 file_prd_off;	/*File fw product offset*/
 	u32 fw_version;
 	const struct firmware *fw;
 };
@@ -1273,7 +1309,7 @@ int netxen_load_firmware(struct netxen_adapter *adapter);
 int netxen_need_fw_reset(struct netxen_adapter *adapter);
 void netxen_request_firmware(struct netxen_adapter *adapter);
 void netxen_release_firmware(struct netxen_adapter *adapter);
-int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose);
+int netxen_pinit_from_rom(struct netxen_adapter *adapter);
 
 int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
 int netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index 714f38791a9a..ddd704ae0188 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -85,11 +85,9 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
 
 	strncpy(drvinfo->driver, netxen_nic_driver_name, 32);
 	strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32);
-	read_lock(&adapter->adapter_lock);
 	fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
 	fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
 	fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
-	read_unlock(&adapter->adapter_lock);
 	sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
 
 	strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
@@ -259,18 +257,18 @@ netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
 	/* read which mode */
 	if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
 		/* autonegotiation */
-		if (adapter->phy_write
-		    && adapter->phy_write(adapter,
-					  NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
-					  ecmd->autoneg) != 0)
+		if (adapter->phy_write &&
+		    adapter->phy_write(adapter,
+				       NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
+				       ecmd->autoneg) != 0)
 			return -EIO;
 		else
 			adapter->link_autoneg = ecmd->autoneg;
 
-		if (adapter->phy_read
-		    && adapter->phy_read(adapter,
-					 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
-					 &status) != 0)
+		if (adapter->phy_read &&
+		    adapter->phy_read(adapter,
+				      NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+				      &status) != 0)
 			return -EIO;
 
 		/* speed */
@@ -290,10 +288,10 @@ netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
 			netxen_clear_phy_duplex(status);
 		if (ecmd->duplex == DUPLEX_FULL)
 			netxen_set_phy_duplex(status);
-		if (adapter->phy_write
-		    && adapter->phy_write(adapter,
-					  NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
-					  *((int *)&status)) != 0)
+		if (adapter->phy_write &&
+		    adapter->phy_write(adapter,
+				       NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+				       *((int *)&status)) != 0)
 			return -EIO;
 		else {
 			adapter->link_speed = ecmd->speed;
@@ -444,10 +442,10 @@ static u32 netxen_nic_test_link(struct net_device *dev)
 
 	/* read which mode */
 	if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
-		if (adapter->phy_read
-		    && adapter->phy_read(adapter,
-					 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
-					 &status) != 0)
+		if (adapter->phy_read &&
+		    adapter->phy_read(adapter,
+				      NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+				      &status) != 0)
 			return -EIO;
 		else {
 			val = netxen_get_phy_link(status);
@@ -690,8 +688,8 @@ static int netxen_nic_reg_test(struct net_device *dev)
 	u32 data_read, data_written;
 
 	data_read = NXRD32(adapter, NETXEN_PCIX_PH_REG(0));
-	if ((data_read & 0xffff) != PHAN_VENDOR_ID)
-	return 1;
+	if ((data_read & 0xffff) != adapter->pdev->vendor)
+		return 1;
 
 	data_written = (u32)0xa5a5a5a5;
 
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 17bb3818d84e..d138fc22927a 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -664,40 +664,51 @@ enum {
 #define NETXEN_NIU_AP_STATION_ADDR_0(I)    (NETXEN_CRB_NIU+0xa0040+(I)*0x10000)
 #define NETXEN_NIU_AP_STATION_ADDR_1(I)    (NETXEN_CRB_NIU+0xa0044+(I)*0x10000)
 
+
+#define TEST_AGT_CTRL	(0x00)
+
+#define TA_CTL_START	1
+#define TA_CTL_ENABLE	2
+#define TA_CTL_WRITE	4
+#define TA_CTL_BUSY	8
+
 /*
  *   Register offsets for MN
  */
-#define	MIU_CONTROL	       (0x000)
-#define MIU_TEST_AGT_CTRL      (0x090)
-#define MIU_TEST_AGT_ADDR_LO   (0x094)
-#define MIU_TEST_AGT_ADDR_HI   (0x098)
-#define MIU_TEST_AGT_WRDATA_LO (0x0a0)
-#define MIU_TEST_AGT_WRDATA_HI (0x0a4)
-#define MIU_TEST_AGT_WRDATA(i) (0x0a0+(4*(i)))
-#define MIU_TEST_AGT_RDDATA_LO (0x0a8)
-#define MIU_TEST_AGT_RDDATA_HI (0x0ac)
-#define MIU_TEST_AGT_RDDATA(i) (0x0a8+(4*(i)))
-#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
-#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
-
-/* MIU_TEST_AGT_CTRL flags. work for SIU as well */
-#define MIU_TA_CTL_START        1
-#define MIU_TA_CTL_ENABLE       2
-#define MIU_TA_CTL_WRITE        4
-#define MIU_TA_CTL_BUSY         8
-
-#define SIU_TEST_AGT_CTRL      (0x060)
-#define SIU_TEST_AGT_ADDR_LO   (0x064)
-#define SIU_TEST_AGT_ADDR_HI   (0x078)
-#define SIU_TEST_AGT_WRDATA_LO (0x068)
-#define SIU_TEST_AGT_WRDATA_HI (0x06c)
-#define SIU_TEST_AGT_WRDATA(i) (0x068+(4*(i)))
-#define SIU_TEST_AGT_RDDATA_LO (0x070)
-#define SIU_TEST_AGT_RDDATA_HI (0x074)
-#define SIU_TEST_AGT_RDDATA(i) (0x070+(4*(i)))
-
-#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8
-#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22)
+#define MIU_TEST_AGT_BASE		(0x90)
+
+#define MIU_TEST_AGT_ADDR_LO		(0x04)
+#define MIU_TEST_AGT_ADDR_HI		(0x08)
+#define MIU_TEST_AGT_WRDATA_LO		(0x10)
+#define MIU_TEST_AGT_WRDATA_HI		(0x14)
+#define MIU_TEST_AGT_WRDATA_UPPER_LO	(0x20)
+#define MIU_TEST_AGT_WRDATA_UPPER_HI	(0x24)
+#define MIU_TEST_AGT_WRDATA(i)		(0x10+(0x10*((i)>>1))+(4*((i)&1)))
+#define MIU_TEST_AGT_RDDATA_LO		(0x18)
+#define MIU_TEST_AGT_RDDATA_HI		(0x1c)
+#define MIU_TEST_AGT_RDDATA_UPPER_LO	(0x28)
+#define MIU_TEST_AGT_RDDATA_UPPER_HI	(0x2c)
+#define MIU_TEST_AGT_RDDATA(i)		(0x18+(0x10*((i)>>1))+(4*((i)&1)))
+
+#define MIU_TEST_AGT_ADDR_MASK		0xfffffff8
+#define MIU_TEST_AGT_UPPER_ADDR(off)	(0)
+
+/*
+ *   Register offsets for MS
+ */
+#define SIU_TEST_AGT_BASE		(0x60)
+
+#define SIU_TEST_AGT_ADDR_LO		(0x04)
+#define SIU_TEST_AGT_ADDR_HI		(0x18)
+#define SIU_TEST_AGT_WRDATA_LO		(0x08)
+#define SIU_TEST_AGT_WRDATA_HI		(0x0c)
+#define SIU_TEST_AGT_WRDATA(i)		(0x08+(4*(i)))
+#define SIU_TEST_AGT_RDDATA_LO		(0x10)
+#define SIU_TEST_AGT_RDDATA_HI		(0x14)
+#define SIU_TEST_AGT_RDDATA(i)		(0x10+(4*(i)))
+
+#define SIU_TEST_AGT_ADDR_MASK		0x3ffff8
+#define SIU_TEST_AGT_UPPER_ADDR(off)	((off)>>22)
 
 /* XG Link status */
 #define XG_LINK_UP	0x10
@@ -859,6 +870,9 @@ enum {
 		(PCIX_SN_WINDOW_F0 + (0x20 * (func))) :\
 		(PCIX_SN_WINDOW_F4 + (0x10 * ((func)-4))))
 
+#define PCIX_OCM_WINDOW		(0x10800)
+#define PCIX_OCM_WINDOW_REG(func)	(PCIX_OCM_WINDOW + 0x20 * (func))
+
 #define PCIX_TARGET_STATUS	(0x10118)
 #define PCIX_TARGET_STATUS_F1	(0x10160)
 #define PCIX_TARGET_STATUS_F2	(0x10164)
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 52a3798d8d94..2e364fee3cbb 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -31,6 +31,7 @@
 #define MASK(n) ((1ULL<<(n))-1)
 #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff))
 #define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff))
+#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
 #define MS_WIN(addr) (addr & 0x0ffc0000)
 
 #define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
@@ -41,6 +42,11 @@
 #define CRB_HI(off)	((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
 #define CRB_INDIRECT_2M	(0x1e0000UL)
 
+static void netxen_nic_io_write_128M(struct netxen_adapter *adapter,
+		void __iomem *addr, u32 data);
+static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter,
+		void __iomem *addr);
+
 #ifndef readq
 static inline u64 readq(void __iomem *addr)
 {
@@ -326,7 +332,7 @@ netxen_pcie_sem_lock(struct netxen_adapter *adapter, int sem, u32 id_reg)
 		if (done == 1)
 			break;
 		if (++timeout >= NETXEN_PCIE_SEM_TIMEOUT)
-			return -1;
+			return -EIO;
 		msleep(1);
 	}
 
@@ -1073,89 +1079,71 @@ int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac)
  * Changes the CRB window to the specified window.
  */
 static void
-netxen_nic_pci_change_crbwindow_128M(struct netxen_adapter *adapter, u32 wndw)
+netxen_nic_pci_set_crbwindow_128M(struct netxen_adapter *adapter,
+		u32 window)
 {
 	void __iomem *offset;
-	u32 tmp;
-	int count = 0;
-	uint8_t func = adapter->ahw.pci_func;
+	int count = 10;
+	u8 func = adapter->ahw.pci_func;
 
-	if (adapter->curr_window == wndw)
+	if (adapter->ahw.crb_win == window)
 		return;
-	/*
-	 * Move the CRB window.
-	 * We need to write to the "direct access" region of PCI
-	 * to avoid a race condition where the window register has
-	 * not been successfully written across CRB before the target
-	 * register address is received by PCI. The direct region bypasses
-	 * the CRB bus.
-	 */
+
 	offset = PCI_OFFSET_SECOND_RANGE(adapter,
 			NETXEN_PCIX_PH_REG(PCIE_CRB_WINDOW_REG(func)));
 
-	if (wndw & 0x1)
-		wndw = NETXEN_WINDOW_ONE;
+	writel(window, offset);
+	do {
+		if (window == readl(offset))
+			break;
 
-	writel(wndw, offset);
+		if (printk_ratelimit())
+			dev_warn(&adapter->pdev->dev,
+					"failed to set CRB window to %d\n",
+					(window == NETXEN_WINDOW_ONE));
+		udelay(1);
 
-	/* MUST make sure window is set before we forge on... */
-	while ((tmp = readl(offset)) != wndw) {
-		printk(KERN_WARNING "%s: %s WARNING: CRB window value not "
-		       "registered properly: 0x%08x.\n",
-		       netxen_nic_driver_name, __func__, tmp);
-		mdelay(1);
-		if (count >= 10)
-			break;
-		count++;
-	}
+	} while (--count > 0);
 
-	if (wndw == NETXEN_WINDOW_ONE)
-		adapter->curr_window = 1;
-	else
-		adapter->curr_window = 0;
+	if (count > 0)
+		adapter->ahw.crb_win = window;
 }
 
 /*
- * Return -1 if off is not valid,
+ * Returns < 0 if off is not valid,
  *	 1 if window access is needed. 'off' is set to offset from
  *	   CRB space in 128M pci map
  *	 0 if no window access is needed. 'off' is set to 2M addr
  * In: 'off' is offset from base in 128M pci map
  */
 static int
-netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter, ulong *off)
+netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter,
+		ulong off, void __iomem **addr)
 {
 	crb_128M_2M_sub_block_map_t *m;
 
 
-	if (*off >= NETXEN_CRB_MAX)
-		return -1;
-
-	if (*off >= NETXEN_PCI_CAMQM && (*off < NETXEN_PCI_CAMQM_2M_END)) {
-		*off = (*off - NETXEN_PCI_CAMQM) + NETXEN_PCI_CAMQM_2M_BASE +
-			(ulong)adapter->ahw.pci_base0;
-		return 0;
-	}
-
-	if (*off < NETXEN_PCI_CRBSPACE)
-		return -1;
+	if ((off >= NETXEN_CRB_MAX) || (off < NETXEN_PCI_CRBSPACE))
+		return -EINVAL;
 
-	*off -= NETXEN_PCI_CRBSPACE;
+	off -= NETXEN_PCI_CRBSPACE;
 
 	/*
 	 * Try direct map
 	 */
-	m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
+	m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
 
-	if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
-		*off = *off + m->start_2M - m->start_128M +
-			(ulong)adapter->ahw.pci_base0;
+	if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
+		*addr = adapter->ahw.pci_base0 + m->start_2M +
+			(off - m->start_128M);
 		return 0;
 	}
 
 	/*
 	 * Not in direct map, use crb window
 	 */
+	*addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M +
+		(off & MASK(16));
 	return 1;
 }
 
@@ -1165,52 +1153,78 @@ netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter, ulong *off)
  * side effect: lock crb window
  */
 static void
-netxen_nic_pci_set_crbwindow_2M(struct netxen_adapter *adapter, ulong *off)
+netxen_nic_pci_set_crbwindow_2M(struct netxen_adapter *adapter, ulong off)
 {
-	u32 win_read;
+	u32 window;
+	void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M;
 
-	adapter->crb_win = CRB_HI(*off);
-	writel(adapter->crb_win, (adapter->ahw.pci_base0 + CRB_WINDOW_2M));
-	/*
-	 * Read back value to make sure write has gone through before trying
-	 * to use it.
-	 */
-	win_read = readl(adapter->ahw.pci_base0 + CRB_WINDOW_2M);
-	if (win_read != adapter->crb_win) {
-		printk(KERN_ERR "%s: Written crbwin (0x%x) != "
-				"Read crbwin (0x%x), off=0x%lx\n",
-				__func__, adapter->crb_win, win_read, *off);
+	off -= NETXEN_PCI_CRBSPACE;
+
+	window = CRB_HI(off);
+
+	if (adapter->ahw.crb_win == window)
+		return;
+
+	writel(window, addr);
+	if (readl(addr) != window) {
+		if (printk_ratelimit())
+			dev_warn(&adapter->pdev->dev,
+				"failed to set CRB window to %d off 0x%lx\n",
+				window, off);
 	}
-	*off = (*off & MASK(16)) + CRB_INDIRECT_2M +
-		(ulong)adapter->ahw.pci_base0;
+	adapter->ahw.crb_win = window;
+}
+
+static void __iomem *
+netxen_nic_map_indirect_address_128M(struct netxen_adapter *adapter,
+		ulong win_off, void __iomem **mem_ptr)
+{
+	ulong off = win_off;
+	void __iomem *addr;
+	resource_size_t mem_base;
+
+	if (ADDR_IN_WINDOW1(win_off))
+		off = NETXEN_CRB_NORMAL(win_off);
+
+	addr = pci_base_offset(adapter, off);
+	if (addr)
+		return addr;
+
+	if (adapter->ahw.pci_len0 == 0)
+		off -= NETXEN_PCI_CRBSPACE;
+
+	mem_base = pci_resource_start(adapter->pdev, 0);
+	*mem_ptr = ioremap(mem_base + (off & PAGE_MASK), PAGE_SIZE);
+	if (*mem_ptr)
+		addr = *mem_ptr + (off & (PAGE_SIZE - 1));
+
+	return addr;
 }
 
 static int
 netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter, ulong off, u32 data)
 {
 	unsigned long flags;
-	void __iomem *addr;
-
-	if (ADDR_IN_WINDOW1(off))
-		addr = NETXEN_CRB_NORMALIZE(adapter, off);
-	else
-		addr = pci_base_offset(adapter, off);
+	void __iomem *addr, *mem_ptr = NULL;
 
-	BUG_ON(!addr);
+	addr = netxen_nic_map_indirect_address_128M(adapter, off, &mem_ptr);
+	if (!addr)
+		return -EIO;
 
-	if (ADDR_IN_WINDOW1(off)) {	/* Window 1 */
-		read_lock(&adapter->adapter_lock);
+	if (ADDR_IN_WINDOW1(off)) { /* Window 1 */
+		netxen_nic_io_write_128M(adapter, addr, data);
+	} else {        /* Window 0 */
+		write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+		netxen_nic_pci_set_crbwindow_128M(adapter, 0);
 		writel(data, addr);
-		read_unlock(&adapter->adapter_lock);
-	} else {		/* Window 0 */
-		write_lock_irqsave(&adapter->adapter_lock, flags);
-		addr = pci_base_offset(adapter, off);
-		netxen_nic_pci_change_crbwindow_128M(adapter, 0);
-		writel(data, addr);
-		netxen_nic_pci_change_crbwindow_128M(adapter, 1);
-		write_unlock_irqrestore(&adapter->adapter_lock, flags);
+		netxen_nic_pci_set_crbwindow_128M(adapter,
+				NETXEN_WINDOW_ONE);
+		write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
 	}
 
+	if (mem_ptr)
+		iounmap(mem_ptr);
+
 	return 0;
 }
 
@@ -1218,28 +1232,27 @@ static u32
 netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter, ulong off)
 {
 	unsigned long flags;
-	void __iomem *addr;
+	void __iomem *addr, *mem_ptr = NULL;
 	u32 data;
 
-	if (ADDR_IN_WINDOW1(off))
-		addr = NETXEN_CRB_NORMALIZE(adapter, off);
-	else
-		addr = pci_base_offset(adapter, off);
-
-	BUG_ON(!addr);
+	addr = netxen_nic_map_indirect_address_128M(adapter, off, &mem_ptr);
+	if (!addr)
+		return -EIO;
 
-	if (ADDR_IN_WINDOW1(off)) {	/* Window 1 */
-		read_lock(&adapter->adapter_lock);
-		data = readl(addr);
-		read_unlock(&adapter->adapter_lock);
-	} else {		/* Window 0 */
-		write_lock_irqsave(&adapter->adapter_lock, flags);
-		netxen_nic_pci_change_crbwindow_128M(adapter, 0);
+	if (ADDR_IN_WINDOW1(off)) { /* Window 1 */
+		data = netxen_nic_io_read_128M(adapter, addr);
+	} else {        /* Window 0 */
+		write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+		netxen_nic_pci_set_crbwindow_128M(adapter, 0);
 		data = readl(addr);
-		netxen_nic_pci_change_crbwindow_128M(adapter, 1);
-		write_unlock_irqrestore(&adapter->adapter_lock, flags);
+		netxen_nic_pci_set_crbwindow_128M(adapter,
+				NETXEN_WINDOW_ONE);
+		write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
 	}
 
+	if (mem_ptr)
+		iounmap(mem_ptr);
+
 	return data;
 }
 
@@ -1248,28 +1261,30 @@ netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter, ulong off, u32 data)
 {
 	unsigned long flags;
 	int rv;
+	void __iomem *addr = NULL;
 
-	rv = netxen_nic_pci_get_crb_addr_2M(adapter, &off);
+	rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr);
 
-	if (rv == -1) {
-		printk(KERN_ERR "%s: invalid offset: 0x%016lx\n",
-				__func__, off);
-		dump_stack();
-		return -1;
+	if (rv == 0) {
+		writel(data, addr);
+		return 0;
 	}
 
-	if (rv == 1) {
-		write_lock_irqsave(&adapter->adapter_lock, flags);
+	if (rv > 0) {
+		/* indirect access */
+		write_lock_irqsave(&adapter->ahw.crb_lock, flags);
 		crb_win_lock(adapter);
-		netxen_nic_pci_set_crbwindow_2M(adapter, &off);
-		writel(data, (void __iomem *)off);
+		netxen_nic_pci_set_crbwindow_2M(adapter, off);
+		writel(data, addr);
 		crb_win_unlock(adapter);
-		write_unlock_irqrestore(&adapter->adapter_lock, flags);
-	} else
-		writel(data, (void __iomem *)off);
-
+		write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+		return 0;
+	}
 
-	return 0;
+	dev_err(&adapter->pdev->dev,
+			"%s: invalid offset: 0x%016lx\n", __func__, off);
+	dump_stack();
+	return -EIO;
 }
 
 static u32
@@ -1278,102 +1293,37 @@ netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter, ulong off)
 	unsigned long flags;
 	int rv;
 	u32 data;
+	void __iomem *addr = NULL;
 
-	rv = netxen_nic_pci_get_crb_addr_2M(adapter, &off);
+	rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr);
 
-	if (rv == -1) {
-		printk(KERN_ERR "%s: invalid offset: 0x%016lx\n",
-				__func__, off);
-		dump_stack();
-		return -1;
-	}
+	if (rv == 0)
+		return readl(addr);
 
-	if (rv == 1) {
-		write_lock_irqsave(&adapter->adapter_lock, flags);
+	if (rv > 0) {
+		/* indirect access */
+		write_lock_irqsave(&adapter->ahw.crb_lock, flags);
 		crb_win_lock(adapter);
-		netxen_nic_pci_set_crbwindow_2M(adapter, &off);
-		data = readl((void __iomem *)off);
+		netxen_nic_pci_set_crbwindow_2M(adapter, off);
+		data = readl(addr);
 		crb_win_unlock(adapter);
-		write_unlock_irqrestore(&adapter->adapter_lock, flags);
-	} else
-		data = readl((void __iomem *)off);
-
-	return data;
-}
-
-static int netxen_pci_set_window_warning_count;
-
-static unsigned long
-netxen_nic_pci_set_window_128M(struct netxen_adapter *adapter,
-		unsigned long long addr)
-{
-	void __iomem *offset;
-	int window;
-	unsigned long long	qdr_max;
-	uint8_t func = adapter->ahw.pci_func;
-
-	if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
-		qdr_max = NETXEN_ADDR_QDR_NET_MAX_P2;
-	} else {
-		qdr_max = NETXEN_ADDR_QDR_NET_MAX_P3;
+		write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+		return data;
 	}
 
-	if (ADDR_IN_RANGE(addr, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
-		/* DDR network side */
-		addr -= NETXEN_ADDR_DDR_NET;
-		window = (addr >> 25) & 0x3ff;
-		if (adapter->ahw.ddr_mn_window != window) {
-			adapter->ahw.ddr_mn_window = window;
-			offset = PCI_OFFSET_SECOND_RANGE(adapter,
-				NETXEN_PCIX_PH_REG(PCIE_MN_WINDOW_REG(func)));
-			writel(window, offset);
-			/* MUST make sure window is set before we forge on... */
-			readl(offset);
-		}
-		addr -= (window * NETXEN_WINDOW_ONE);
-		addr += NETXEN_PCI_DDR_NET;
-	} else if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) {
-		addr -= NETXEN_ADDR_OCM0;
-		addr += NETXEN_PCI_OCM0;
-	} else if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
-		addr -= NETXEN_ADDR_OCM1;
-		addr += NETXEN_PCI_OCM1;
-	} else if (ADDR_IN_RANGE(addr, NETXEN_ADDR_QDR_NET, qdr_max)) {
-		/* QDR network side */
-		addr -= NETXEN_ADDR_QDR_NET;
-		window = (addr >> 22) & 0x3f;
-		if (adapter->ahw.qdr_sn_window != window) {
-			adapter->ahw.qdr_sn_window = window;
-			offset = PCI_OFFSET_SECOND_RANGE(adapter,
-				NETXEN_PCIX_PH_REG(PCIE_SN_WINDOW_REG(func)));
-			writel((window << 22), offset);
-			/* MUST make sure window is set before we forge on... */
-			readl(offset);
-		}
-		addr -= (window * 0x400000);
-		addr += NETXEN_PCI_QDR_NET;
-	} else {
-		/*
-		 * peg gdb frequently accesses memory that doesn't exist,
-		 * this limits the chit chat so debugging isn't slowed down.
-		 */
-		if ((netxen_pci_set_window_warning_count++ < 8)
-		    || (netxen_pci_set_window_warning_count % 64 == 0))
-			printk("%s: Warning:netxen_nic_pci_set_window()"
-			       " Unknown address range!\n",
-			       netxen_nic_driver_name);
-		addr = -1UL;
-	}
-	return addr;
+	dev_err(&adapter->pdev->dev,
+			"%s: invalid offset: 0x%016lx\n", __func__, off);
+	dump_stack();
+	return -1;
 }
 
 /* window 1 registers only */
 static void netxen_nic_io_write_128M(struct netxen_adapter *adapter,
 		void __iomem *addr, u32 data)
 {
-	read_lock(&adapter->adapter_lock);
+	read_lock(&adapter->ahw.crb_lock);
 	writel(data, addr);
-	read_unlock(&adapter->adapter_lock);
+	read_unlock(&adapter->ahw.crb_lock);
 }
 
 static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter,
@@ -1381,9 +1331,9 @@ static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter,
 {
 	u32 val;
 
-	read_lock(&adapter->adapter_lock);
+	read_lock(&adapter->ahw.crb_lock);
 	val = readl(addr);
-	read_unlock(&adapter->adapter_lock);
+	read_unlock(&adapter->ahw.crb_lock);
 
 	return val;
 }
@@ -1403,488 +1353,437 @@ static u32 netxen_nic_io_read_2M(struct netxen_adapter *adapter,
 void __iomem *
 netxen_get_ioaddr(struct netxen_adapter *adapter, u32 offset)
 {
-	ulong off = offset;
+	void __iomem *addr = NULL;
 
 	if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
-		if (offset < NETXEN_CRB_PCIX_HOST2 &&
-				offset > NETXEN_CRB_PCIX_HOST)
-			return PCI_OFFSET_SECOND_RANGE(adapter, offset);
-		return NETXEN_CRB_NORMALIZE(adapter, offset);
+		if ((offset < NETXEN_CRB_PCIX_HOST2) &&
+				(offset > NETXEN_CRB_PCIX_HOST))
+			addr = PCI_OFFSET_SECOND_RANGE(adapter, offset);
+		else
+			addr = NETXEN_CRB_NORMALIZE(adapter, offset);
+	} else {
+		WARN_ON(netxen_nic_pci_get_crb_addr_2M(adapter,
+					offset, &addr));
 	}
 
-	BUG_ON(netxen_nic_pci_get_crb_addr_2M(adapter, &off));
-	return (void __iomem *)off;
+	return addr;
 }
 
-static unsigned long
-netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter,
-		unsigned long long addr)
+static int
+netxen_nic_pci_set_window_128M(struct netxen_adapter *adapter,
+		u64 addr, u32 *start)
 {
-	int window;
-	u32 win_read;
-
-	if (ADDR_IN_RANGE(addr, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
-		/* DDR network side */
-		window = MN_WIN(addr);
-		adapter->ahw.ddr_mn_window = window;
-		NXWR32(adapter, adapter->ahw.mn_win_crb, window);
-		win_read = NXRD32(adapter, adapter->ahw.mn_win_crb);
-		if ((win_read << 17) != window) {
-			printk(KERN_INFO "Written MNwin (0x%x) != "
-				"Read MNwin (0x%x)\n", window, win_read);
-		}
-		addr = GET_MEM_OFFS_2M(addr) + NETXEN_PCI_DDR_NET;
+	if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) {
+		*start = (addr - NETXEN_ADDR_OCM0  + NETXEN_PCI_OCM0);
+		return 0;
 	} else if (ADDR_IN_RANGE(addr,
-				NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) {
-		if ((addr & 0x00ff800) == 0xff800) {
-			printk("%s: QM access not handled.\n", __func__);
-			addr = -1UL;
-		}
+				NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
+		*start = (addr - NETXEN_ADDR_OCM1 + NETXEN_PCI_OCM1);
+		return 0;
+	}
+
+	return -EIO;
+}
 
+static int
+netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter,
+		u64 addr, u32 *start)
+{
+	u32 window;
+	struct pci_dev *pdev = adapter->pdev;
+
+	if ((addr & 0x00ff800) == 0xff800) {
+		if (printk_ratelimit())
+			dev_warn(&pdev->dev, "QM access not handled\n");
+		return -EIO;
+	}
+
+	if (NX_IS_REVISION_P3P(adapter->ahw.revision_id))
+		window = OCM_WIN_P3P(addr);
+	else
 		window = OCM_WIN(addr);
-		adapter->ahw.ddr_mn_window = window;
-		NXWR32(adapter, adapter->ahw.mn_win_crb, window);
-		win_read = NXRD32(adapter, adapter->ahw.mn_win_crb);
-		if ((win_read >> 7) != window) {
-			printk(KERN_INFO "%s: Written OCMwin (0x%x) != "
-					"Read OCMwin (0x%x)\n",
-					__func__, window, win_read);
-		}
-		addr = GET_MEM_OFFS_2M(addr) + NETXEN_PCI_OCM0_2M;
 
-	} else if (ADDR_IN_RANGE(addr,
-			NETXEN_ADDR_QDR_NET, NETXEN_ADDR_QDR_NET_MAX_P3)) {
-		/* QDR network side */
-		window = MS_WIN(addr);
-		adapter->ahw.qdr_sn_window = window;
-		NXWR32(adapter, adapter->ahw.ms_win_crb, window);
-		win_read = NXRD32(adapter, adapter->ahw.ms_win_crb);
-		if (win_read != window) {
-			printk(KERN_INFO "%s: Written MSwin (0x%x) != "
-					"Read MSwin (0x%x)\n",
-					__func__, window, win_read);
-		}
-		addr = GET_MEM_OFFS_2M(addr) + NETXEN_PCI_QDR_NET;
+	writel(window, adapter->ahw.ocm_win_crb);
+	/* read back to flush */
+	readl(adapter->ahw.ocm_win_crb);
 
-	} else {
-		/*
-		 * peg gdb frequently accesses memory that doesn't exist,
-		 * this limits the chit chat so debugging isn't slowed down.
-		 */
-		if ((netxen_pci_set_window_warning_count++ < 8)
-			|| (netxen_pci_set_window_warning_count%64 == 0)) {
-			printk("%s: Warning:%s Unknown address range!\n",
-					__func__, netxen_nic_driver_name);
+	adapter->ahw.ocm_win = window;
+	*start = NETXEN_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
+	return 0;
 }
-		addr = -1UL;
+
+static int
+netxen_nic_pci_mem_access_direct(struct netxen_adapter *adapter, u64 off,
+		u64 *data, int op)
+{
+	void __iomem *addr, *mem_ptr = NULL;
+	resource_size_t mem_base;
+	int ret = -EIO;
+	u32 start;
+
+	spin_lock(&adapter->ahw.mem_lock);
+
+	ret = adapter->pci_set_window(adapter, off, &start);
+	if (ret != 0)
+		goto unlock;
+
+	addr = pci_base_offset(adapter, start);
+	if (addr)
+		goto noremap;
+
+	mem_base = pci_resource_start(adapter->pdev, 0) + (start & PAGE_MASK);
+
+	mem_ptr = ioremap(mem_base, PAGE_SIZE);
+	if (mem_ptr == NULL) {
+		ret = -EIO;
+		goto unlock;
 	}
-	return addr;
+
+	addr = mem_ptr + (start & (PAGE_SIZE - 1));
+
+noremap:
+	if (op == 0)	/* read */
+		*data = readq(addr);
+	else		/* write */
+		writeq(*data, addr);
+
+unlock:
+	spin_unlock(&adapter->ahw.mem_lock);
+
+	if (mem_ptr)
+		iounmap(mem_ptr);
+	return ret;
 }
 
 #define MAX_CTL_CHECK   1000
 
 static int
 netxen_nic_pci_mem_write_128M(struct netxen_adapter *adapter,
-		u64 off, void *data, int size)
+		u64 off, u64 data)
 {
-	unsigned long   flags;
-	int	     i, j, ret = 0, loop, sz[2], off0;
-	uint32_t      temp;
-	uint64_t      off8, tmpw, word[2] = {0, 0};
+	int j, ret;
+	u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo;
 	void __iomem *mem_crb;
 
-	if (size != 8)
+	/* Only 64-bit aligned access */
+	if (off & 7)
 		return -EIO;
 
+	/* P2 has different SIU and MIU test agent base addr */
 	if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
 				NETXEN_ADDR_QDR_NET_MAX_P2)) {
-		mem_crb = pci_base_offset(adapter, NETXEN_CRB_QDR_NET);
+		mem_crb = pci_base_offset(adapter,
+				NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE);
+		addr_hi = SIU_TEST_AGT_ADDR_HI;
+		data_lo = SIU_TEST_AGT_WRDATA_LO;
+		data_hi = SIU_TEST_AGT_WRDATA_HI;
+		off_lo = off & SIU_TEST_AGT_ADDR_MASK;
+		off_hi = SIU_TEST_AGT_UPPER_ADDR(off);
 		goto correct;
 	}
 
 	if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
-		mem_crb = pci_base_offset(adapter, NETXEN_CRB_DDR_NET);
+		mem_crb = pci_base_offset(adapter,
+				NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+		addr_hi = MIU_TEST_AGT_ADDR_HI;
+		data_lo = MIU_TEST_AGT_WRDATA_LO;
+		data_hi = MIU_TEST_AGT_WRDATA_HI;
+		off_lo = off & MIU_TEST_AGT_ADDR_MASK;
+		off_hi = 0;
 		goto correct;
 	}
 
-	return -EIO;
-
-correct:
-	off8 = off & 0xfffffff8;
-	off0 = off & 0x7;
-	sz[0] = (size < (8 - off0)) ? size : (8 - off0);
-	sz[1] = size - sz[0];
-	loop = ((off0 + size - 1) >> 3) + 1;
-
-	if ((size != 8) || (off0 != 0))  {
-		for (i = 0; i < loop; i++) {
-			if (adapter->pci_mem_read(adapter,
-				off8 + (i << 3), &word[i], 8))
-				return -1;
+	if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) ||
+		ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
+		if (adapter->ahw.pci_len0 != 0) {
+			return netxen_nic_pci_mem_access_direct(adapter,
+					off, &data, 1);
 		}
 	}
 
-	switch (size) {
-	case 1:
-		tmpw = *((uint8_t *)data);
-		break;
-	case 2:
-		tmpw = *((uint16_t *)data);
-		break;
-	case 4:
-		tmpw = *((uint32_t *)data);
-		break;
-	case 8:
-	default:
-		tmpw = *((uint64_t *)data);
-		break;
-	}
-	word[0] &= ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
-	word[0] |= tmpw << (off0 * 8);
+	return -EIO;
 
-	if (loop == 2) {
-		word[1] &= ~(~0ULL << (sz[1] * 8));
-		word[1] |= tmpw >> (sz[0] * 8);
+correct:
+	spin_lock(&adapter->ahw.mem_lock);
+	netxen_nic_pci_set_crbwindow_128M(adapter, 0);
+
+	writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+	writel(off_hi, (mem_crb + addr_hi));
+	writel(data & 0xffffffff, (mem_crb + data_lo));
+	writel((data >> 32) & 0xffffffff, (mem_crb + data_hi));
+	writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
+	writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
+			(mem_crb + TEST_AGT_CTRL));
+
+	for (j = 0; j < MAX_CTL_CHECK; j++) {
+		temp = readl((mem_crb + TEST_AGT_CTRL));
+		if ((temp & TA_CTL_BUSY) == 0)
+			break;
 	}
 
-	write_lock_irqsave(&adapter->adapter_lock, flags);
-	netxen_nic_pci_change_crbwindow_128M(adapter, 0);
-
-	for (i = 0; i < loop; i++) {
-		writel((uint32_t)(off8 + (i << 3)),
-			(mem_crb+MIU_TEST_AGT_ADDR_LO));
-		writel(0,
-			(mem_crb+MIU_TEST_AGT_ADDR_HI));
-		writel(word[i] & 0xffffffff,
-			(mem_crb+MIU_TEST_AGT_WRDATA_LO));
-		writel((word[i] >> 32) & 0xffffffff,
-			(mem_crb+MIU_TEST_AGT_WRDATA_HI));
-		writel(MIU_TA_CTL_ENABLE|MIU_TA_CTL_WRITE,
-			(mem_crb+MIU_TEST_AGT_CTRL));
-		writel(MIU_TA_CTL_START|MIU_TA_CTL_ENABLE|MIU_TA_CTL_WRITE,
-			(mem_crb+MIU_TEST_AGT_CTRL));
-
-		for (j = 0; j < MAX_CTL_CHECK; j++) {
-			temp = readl(
-			     (mem_crb+MIU_TEST_AGT_CTRL));
-			if ((temp & MIU_TA_CTL_BUSY) == 0)
-				break;
-		}
-
-		if (j >= MAX_CTL_CHECK) {
-			if (printk_ratelimit())
-				dev_err(&adapter->pdev->dev,
+	if (j >= MAX_CTL_CHECK) {
+		if (printk_ratelimit())
+			dev_err(&adapter->pdev->dev,
 					"failed to write through agent\n");
-			ret = -1;
-			break;
-		}
-	}
+		ret = -EIO;
+	} else
+		ret = 0;
 
-	netxen_nic_pci_change_crbwindow_128M(adapter, 1);
-	write_unlock_irqrestore(&adapter->adapter_lock, flags);
+	netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE);
+	spin_unlock(&adapter->ahw.mem_lock);
 	return ret;
 }
 
 static int
 netxen_nic_pci_mem_read_128M(struct netxen_adapter *adapter,
-		u64 off, void *data, int size)
+		u64 off, u64 *data)
 {
-	unsigned long   flags;
-	int	     i, j = 0, k, start, end, loop, sz[2], off0[2];
-	uint32_t      temp;
-	uint64_t      off8, val, word[2] = {0, 0};
+	int j, ret;
+	u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo;
+	u64 val;
 	void __iomem *mem_crb;
 
-	if (size != 8)
+	/* Only 64-bit aligned access */
+	if (off & 7)
 		return -EIO;
 
+	/* P2 has different SIU and MIU test agent base addr */
 	if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
 				NETXEN_ADDR_QDR_NET_MAX_P2)) {
-		mem_crb = pci_base_offset(adapter, NETXEN_CRB_QDR_NET);
+		mem_crb = pci_base_offset(adapter,
+				NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE);
+		addr_hi = SIU_TEST_AGT_ADDR_HI;
+		data_lo = SIU_TEST_AGT_RDDATA_LO;
+		data_hi = SIU_TEST_AGT_RDDATA_HI;
+		off_lo = off & SIU_TEST_AGT_ADDR_MASK;
+		off_hi = SIU_TEST_AGT_UPPER_ADDR(off);
 		goto correct;
 	}
 
 	if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
-		mem_crb = pci_base_offset(adapter, NETXEN_CRB_DDR_NET);
+		mem_crb = pci_base_offset(adapter,
+				NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+		addr_hi = MIU_TEST_AGT_ADDR_HI;
+		data_lo = MIU_TEST_AGT_RDDATA_LO;
+		data_hi = MIU_TEST_AGT_RDDATA_HI;
+		off_lo = off & MIU_TEST_AGT_ADDR_MASK;
+		off_hi = 0;
 		goto correct;
 	}
 
+	if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) ||
+		ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
+		if (adapter->ahw.pci_len0 != 0) {
+			return netxen_nic_pci_mem_access_direct(adapter,
+					off, data, 0);
+		}
+	}
+
 	return -EIO;
 
 correct:
-	off8 = off & 0xfffffff8;
-	off0[0] = off & 0x7;
-	off0[1] = 0;
-	sz[0] = (size < (8 - off0[0])) ? size : (8 - off0[0]);
-	sz[1] = size - sz[0];
-	loop = ((off0[0] + size - 1) >> 3) + 1;
-
-	write_lock_irqsave(&adapter->adapter_lock, flags);
-	netxen_nic_pci_change_crbwindow_128M(adapter, 0);
-
-	for (i = 0; i < loop; i++) {
-		writel((uint32_t)(off8 + (i << 3)),
-			(mem_crb+MIU_TEST_AGT_ADDR_LO));
-		writel(0,
-			(mem_crb+MIU_TEST_AGT_ADDR_HI));
-		writel(MIU_TA_CTL_ENABLE,
-			(mem_crb+MIU_TEST_AGT_CTRL));
-		writel(MIU_TA_CTL_START|MIU_TA_CTL_ENABLE,
-			(mem_crb+MIU_TEST_AGT_CTRL));
+	spin_lock(&adapter->ahw.mem_lock);
+	netxen_nic_pci_set_crbwindow_128M(adapter, 0);
 
-		for (j = 0; j < MAX_CTL_CHECK; j++) {
-			temp = readl(
-			      (mem_crb+MIU_TEST_AGT_CTRL));
-			if ((temp & MIU_TA_CTL_BUSY) == 0)
-				break;
-		}
+	writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+	writel(off_hi, (mem_crb + addr_hi));
+	writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
+	writel((TA_CTL_START|TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
 
-		if (j >= MAX_CTL_CHECK) {
-			if (printk_ratelimit())
-				dev_err(&adapter->pdev->dev,
-					"failed to read through agent\n");
+	for (j = 0; j < MAX_CTL_CHECK; j++) {
+		temp = readl(mem_crb + TEST_AGT_CTRL);
+		if ((temp & TA_CTL_BUSY) == 0)
 			break;
-		}
-
-		start = off0[i] >> 2;
-		end   = (off0[i] + sz[i] - 1) >> 2;
-		for (k = start; k <= end; k++) {
-			word[i] |= ((uint64_t) readl(
-				    (mem_crb +
-				    MIU_TEST_AGT_RDDATA(k))) << (32*k));
-		}
 	}
 
-	netxen_nic_pci_change_crbwindow_128M(adapter, 1);
-	write_unlock_irqrestore(&adapter->adapter_lock, flags);
-
-	if (j >= MAX_CTL_CHECK)
-		return -1;
-
-	if (sz[0] == 8) {
-		val = word[0];
+	if (j >= MAX_CTL_CHECK) {
+		if (printk_ratelimit())
+			dev_err(&adapter->pdev->dev,
+					"failed to read through agent\n");
+		ret = -EIO;
 	} else {
-		val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
-			((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
-	}
 
-	switch (size) {
-	case 1:
-		*(uint8_t  *)data = val;
-		break;
-	case 2:
-		*(uint16_t *)data = val;
-		break;
-	case 4:
-		*(uint32_t *)data = val;
-		break;
-	case 8:
-		*(uint64_t *)data = val;
-		break;
+		temp = readl(mem_crb + data_hi);
+		val = ((u64)temp << 32);
+		val |= readl(mem_crb + data_lo);
+		*data = val;
+		ret = 0;
 	}
-	return 0;
+
+	netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE);
+	spin_unlock(&adapter->ahw.mem_lock);
+
+	return ret;
 }
 
 static int
 netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter,
-		u64 off, void *data, int size)
+		u64 off, u64 data)
 {
-	int i, j, ret = 0, loop, sz[2], off0;
-	uint32_t temp;
-	uint64_t off8, tmpw, word[2] = {0, 0};
+	int i, j, ret;
+	u32 temp, off8;
+	u64 stride;
 	void __iomem *mem_crb;
 
-	if (size != 8)
+	/* Only 64-bit aligned access */
+	if (off & 7)
 		return -EIO;
 
+	/* P3 onward, test agent base for MIU and SIU is same */
 	if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
 				NETXEN_ADDR_QDR_NET_MAX_P3)) {
-		mem_crb = netxen_get_ioaddr(adapter, NETXEN_CRB_QDR_NET);
+		mem_crb = netxen_get_ioaddr(adapter,
+				NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE);
 		goto correct;
 	}
 
 	if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
-		mem_crb = netxen_get_ioaddr(adapter, NETXEN_CRB_DDR_NET);
+		mem_crb = netxen_get_ioaddr(adapter,
+				NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
 		goto correct;
 	}
 
+	if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX))
+		return netxen_nic_pci_mem_access_direct(adapter, off, &data, 1);
+
 	return -EIO;
 
 correct:
-	off8 = off & 0xfffffff8;
-	off0 = off & 0x7;
-	sz[0] = (size < (8 - off0)) ? size : (8 - off0);
-	sz[1] = size - sz[0];
-	loop = ((off0 + size - 1) >> 3) + 1;
-
-	if ((size != 8) || (off0 != 0)) {
-		for (i = 0; i < loop; i++) {
-			if (adapter->pci_mem_read(adapter,
-					off8 + (i << 3), &word[i], 8))
-				return -1;
-		}
-	}
-
-	switch (size) {
-	case 1:
-		tmpw = *((uint8_t *)data);
-		break;
-	case 2:
-		tmpw = *((uint16_t *)data);
-		break;
-	case 4:
-		tmpw = *((uint32_t *)data);
-		break;
-	case 8:
-	default:
-		tmpw = *((uint64_t *)data);
-	break;
-	}
+	stride = NX_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
 
-	word[0] &= ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
-	word[0] |= tmpw << (off0 * 8);
+	off8 = off & ~(stride-1);
 
-	if (loop == 2) {
-		word[1] &= ~(~0ULL << (sz[1] * 8));
-		word[1] |= tmpw >> (sz[0] * 8);
-	}
+	spin_lock(&adapter->ahw.mem_lock);
 
-	/*
-	 * don't lock here - write_wx gets the lock if each time
-	 * write_lock_irqsave(&adapter->adapter_lock, flags);
-	 * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
-	 */
+	writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+	writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
 
-	for (i = 0; i < loop; i++) {
-		writel(off8 + (i << 3), mem_crb+MIU_TEST_AGT_ADDR_LO);
-		writel(0, mem_crb+MIU_TEST_AGT_ADDR_HI);
-		writel(word[i] & 0xffffffff, mem_crb+MIU_TEST_AGT_WRDATA_LO);
-		writel((word[i] >> 32) & 0xffffffff,
-				mem_crb+MIU_TEST_AGT_WRDATA_HI);
-		writel((MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE),
-				mem_crb+MIU_TEST_AGT_CTRL);
-		writel(MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE,
-				mem_crb+MIU_TEST_AGT_CTRL);
+	i = 0;
+	if (stride == 16) {
+		writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
+		writel((TA_CTL_START | TA_CTL_ENABLE),
+				(mem_crb + TEST_AGT_CTRL));
 
 		for (j = 0; j < MAX_CTL_CHECK; j++) {
-			temp = readl(mem_crb + MIU_TEST_AGT_CTRL);
-			if ((temp & MIU_TA_CTL_BUSY) == 0)
+			temp = readl(mem_crb + TEST_AGT_CTRL);
+			if ((temp & TA_CTL_BUSY) == 0)
 				break;
 		}
 
 		if (j >= MAX_CTL_CHECK) {
-			if (printk_ratelimit())
-				dev_err(&adapter->pdev->dev,
-					"failed to write through agent\n");
-			ret = -1;
-			break;
+			ret = -EIO;
+			goto done;
 		}
+
+		i = (off & 0xf) ? 0 : 2;
+		writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
+				mem_crb + MIU_TEST_AGT_WRDATA(i));
+		writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
+				mem_crb + MIU_TEST_AGT_WRDATA(i+1));
+		i = (off & 0xf) ? 2 : 0;
 	}
 
-	/*
-	 * netxen_nic_pci_change_crbwindow_128M(adapter, 1);
-	 * write_unlock_irqrestore(&adapter->adapter_lock, flags);
-	 */
+	writel(data & 0xffffffff,
+			mem_crb + MIU_TEST_AGT_WRDATA(i));
+	writel((data >> 32) & 0xffffffff,
+			mem_crb + MIU_TEST_AGT_WRDATA(i+1));
+
+	writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
+	writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
+			(mem_crb + TEST_AGT_CTRL));
+
+	for (j = 0; j < MAX_CTL_CHECK; j++) {
+		temp = readl(mem_crb + TEST_AGT_CTRL);
+		if ((temp & TA_CTL_BUSY) == 0)
+			break;
+	}
+
+	if (j >= MAX_CTL_CHECK) {
+		if (printk_ratelimit())
+			dev_err(&adapter->pdev->dev,
+					"failed to write through agent\n");
+		ret = -EIO;
+	} else
+		ret = 0;
+
+done:
+	spin_unlock(&adapter->ahw.mem_lock);
+
 	return ret;
 }
 
 static int
 netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter,
-		u64 off, void *data, int size)
+		u64 off, u64 *data)
 {
-	int i, j = 0, k, start, end, loop, sz[2], off0[2];
-	uint32_t      temp;
-	uint64_t      off8, val, word[2] = {0, 0};
+	int j, ret;
+	u32 temp, off8;
+	u64 val, stride;
 	void __iomem *mem_crb;
 
-	if (size != 8)
+	/* Only 64-bit aligned access */
+	if (off & 7)
 		return -EIO;
 
+	/* P3 onward, test agent base for MIU and SIU is same */
 	if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
 				NETXEN_ADDR_QDR_NET_MAX_P3)) {
-		mem_crb = netxen_get_ioaddr(adapter, NETXEN_CRB_QDR_NET);
+		mem_crb = netxen_get_ioaddr(adapter,
+				NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE);
 		goto correct;
 	}
 
 	if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
-		mem_crb = netxen_get_ioaddr(adapter, NETXEN_CRB_DDR_NET);
+		mem_crb = netxen_get_ioaddr(adapter,
+				NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
 		goto correct;
 	}
 
+	if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) {
+		return netxen_nic_pci_mem_access_direct(adapter,
+				off, data, 0);
+	}
+
 	return -EIO;
 
 correct:
-	off8 = off & 0xfffffff8;
-	off0[0] = off & 0x7;
-	off0[1] = 0;
-	sz[0] = (size < (8 - off0[0])) ? size : (8 - off0[0]);
-	sz[1] = size - sz[0];
-	loop = ((off0[0] + size - 1) >> 3) + 1;
+	stride = NX_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
 
-	/*
-	 * don't lock here - write_wx gets the lock if each time
-	 * write_lock_irqsave(&adapter->adapter_lock, flags);
-	 * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
-	 */
+	off8 = off & ~(stride-1);
 
-	for (i = 0; i < loop; i++) {
-		writel(off8 + (i << 3), mem_crb + MIU_TEST_AGT_ADDR_LO);
-		writel(0, mem_crb + MIU_TEST_AGT_ADDR_HI);
-		writel(MIU_TA_CTL_ENABLE, mem_crb + MIU_TEST_AGT_CTRL);
-		writel(MIU_TA_CTL_START | MIU_TA_CTL_ENABLE,
-				mem_crb + MIU_TEST_AGT_CTRL);
+	spin_lock(&adapter->ahw.mem_lock);
 
-		for (j = 0; j < MAX_CTL_CHECK; j++) {
-			temp = readl(mem_crb + MIU_TEST_AGT_CTRL);
-			if ((temp & MIU_TA_CTL_BUSY) == 0)
-				break;
-		}
+	writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+	writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
+	writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
+	writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
 
-		if (j >= MAX_CTL_CHECK) {
-			if (printk_ratelimit())
-				dev_err(&adapter->pdev->dev,
-					"failed to read through agent\n");
+	for (j = 0; j < MAX_CTL_CHECK; j++) {
+		temp = readl(mem_crb + TEST_AGT_CTRL);
+		if ((temp & TA_CTL_BUSY) == 0)
 			break;
-		}
-
-		start = off0[i] >> 2;
-		end   = (off0[i] + sz[i] - 1) >> 2;
-		for (k = start; k <= end; k++) {
-			temp = readl(mem_crb + MIU_TEST_AGT_RDDATA(k));
-			word[i] |= ((uint64_t)temp << (32 * k));
-		}
 	}
 
-	/*
-	 * netxen_nic_pci_change_crbwindow_128M(adapter, 1);
-	 * write_unlock_irqrestore(&adapter->adapter_lock, flags);
-	 */
-
-	if (j >= MAX_CTL_CHECK)
-		return -1;
-
-	if (sz[0] == 8) {
-		val = word[0];
+	if (j >= MAX_CTL_CHECK) {
+		if (printk_ratelimit())
+			dev_err(&adapter->pdev->dev,
+					"failed to read through agent\n");
+		ret = -EIO;
 	} else {
-		val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
-		((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
-	}
+		off8 = MIU_TEST_AGT_RDDATA_LO;
+		if ((stride == 16) && (off & 0xf))
+			off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
 
-	switch (size) {
-	case 1:
-		*(uint8_t  *)data = val;
-		break;
-	case 2:
-		*(uint16_t *)data = val;
-		break;
-	case 4:
-		*(uint32_t *)data = val;
-		break;
-	case 8:
-		*(uint64_t *)data = val;
-		break;
+		temp = readl(mem_crb + off8 + 4);
+		val = (u64)temp << 32;
+		val |= readl(mem_crb + off8);
+		*data = val;
+		ret = 0;
 	}
-	return 0;
+
+	spin_unlock(&adapter->ahw.mem_lock);
+
+	return ret;
 }
 
 void
@@ -2037,10 +1936,10 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
 			return;
 		}
 
-		if (adapter->phy_read
-		    && adapter->phy_read(adapter,
-			     NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
-			     &status) == 0) {
+		if (adapter->phy_read &&
+		    adapter->phy_read(adapter,
+				      NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+				      &status) == 0) {
 			if (netxen_get_phy_link(status)) {
 				switch (netxen_get_phy_speed(status)) {
 				case 0:
@@ -2067,10 +1966,10 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
 					adapter->link_duplex = -1;
 					break;
 				}
-				if (adapter->phy_read
-				    && adapter->phy_read(adapter,
-					     NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
-					     &autoneg) != 0)
+				if (adapter->phy_read &&
+				    adapter->phy_read(adapter,
+						      NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
+						      &autoneg) != 0)
 					adapter->link_autoneg = autoneg;
 			} else
 				goto link_down;
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 8a0904368e08..80a667460514 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -46,6 +46,7 @@ static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM];
 static void
 netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
 		struct nx_host_rds_ring *rds_ring);
+static int netxen_p3_has_mn(struct netxen_adapter *adapter);
 
 static void crb_addr_transform_setup(void)
 {
@@ -437,7 +438,7 @@ int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
 #define NETXEN_BOARDNUM 		0x400c
 #define NETXEN_CHIPNUM			0x4010
 
-int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
+int netxen_pinit_from_rom(struct netxen_adapter *adapter)
 {
 	int addr, val;
 	int i, n, init_delay = 0;
@@ -450,21 +451,6 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
 	NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0xffffffff);
 	netxen_rom_unlock(adapter);
 
-	if (verbose) {
-		if (netxen_rom_fast_read(adapter, NETXEN_BOARDTYPE, &val) == 0)
-			printk("P2 ROM board type: 0x%08x\n", val);
-		else
-			printk("Could not read board type\n");
-		if (netxen_rom_fast_read(adapter, NETXEN_BOARDNUM, &val) == 0)
-			printk("P2 ROM board  num: 0x%08x\n", val);
-		else
-			printk("Could not read board number\n");
-		if (netxen_rom_fast_read(adapter, NETXEN_CHIPNUM, &val) == 0)
-			printk("P2 ROM chip   num: 0x%08x\n", val);
-		else
-			printk("Could not read chip number\n");
-	}
-
 	if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
 		if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
 			(n != 0xcafecafe) ||
@@ -486,11 +472,7 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
 		n &= ~0x80000000;
 	}
 
-	if (n < 1024) {
-		if (verbose)
-			printk(KERN_DEBUG "%s: %d CRB init values found"
-			       " in ROM.\n", netxen_nic_driver_name, n);
-	} else {
+	if (n >= 1024) {
 		printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not"
 		       " initialized.\n", __func__, n);
 		return -EIO;
@@ -502,6 +484,7 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
 				netxen_nic_driver_name);
 		return -ENOMEM;
 	}
+
 	for (i = 0; i < n; i++) {
 		if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
 		netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
@@ -512,11 +495,8 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
 		buf[i].addr = addr;
 		buf[i].data = val;
 
-		if (verbose)
-			printk(KERN_DEBUG "%s: PCI:     0x%08x == 0x%08x\n",
-				netxen_nic_driver_name,
-				(u32)netxen_decode_crb_addr(addr), val);
 	}
+
 	for (i = 0; i < n; i++) {
 
 		off = netxen_decode_crb_addr(buf[i].addr);
@@ -526,6 +506,10 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
 			continue;
 		}
 		off += NETXEN_PCI_CRBSPACE;
+
+		if (off & 1)
+			continue;
+
 		/* skipping cold reboot MAGIC */
 		if (off == NETXEN_CAM_RAM(0x1fc))
 			continue;
@@ -546,7 +530,8 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
 				continue;
 			if ((off & 0x0ff00000) == NETXEN_CRB_DDR_NET)
 				continue;
-			if (off == (NETXEN_CRB_PEG_NET_1 + 0x18))
+			if (off == (NETXEN_CRB_PEG_NET_1 + 0x18) &&
+				!NX_IS_REVISION_P3P(adapter->ahw.revision_id))
 				buf[i].data = 0x1020;
 			/* skip the function enable register */
 			if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION))
@@ -607,6 +592,172 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
 	return 0;
 }
 
+static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section)
+{
+	uint32_t i;
+	struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+	__le32 entries = cpu_to_le32(directory->num_entries);
+
+	for (i = 0; i < entries; i++) {
+
+		__le32 offs = cpu_to_le32(directory->findex) +
+				(i * cpu_to_le32(directory->entry_size));
+		__le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8));
+
+		if (tab_type == section)
+			return (struct uni_table_desc *) &unirom[offs];
+	}
+
+	return NULL;
+}
+
+static int
+nx_set_product_offs(struct netxen_adapter *adapter)
+{
+	struct uni_table_desc *ptab_descr;
+	const u8 *unirom = adapter->fw->data;
+	uint32_t i;
+	__le32 entries;
+
+	ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL);
+	if (ptab_descr == NULL)
+		return -1;
+
+	entries = cpu_to_le32(ptab_descr->num_entries);
+
+	for (i = 0; i < entries; i++) {
+
+		__le32 flags, file_chiprev, offs;
+		u8 chiprev = adapter->ahw.revision_id;
+		int mn_present = netxen_p3_has_mn(adapter);
+		uint32_t flagbit;
+
+		offs = cpu_to_le32(ptab_descr->findex) +
+				(i * cpu_to_le32(ptab_descr->entry_size));
+		flags = cpu_to_le32(*((int *)&unirom[offs] + NX_UNI_FLAGS_OFF));
+		file_chiprev = cpu_to_le32(*((int *)&unirom[offs] +
+							NX_UNI_CHIP_REV_OFF));
+
+		flagbit = mn_present ? 1 : 2;
+
+		if ((chiprev == file_chiprev) &&
+					((1ULL << flagbit) & flags)) {
+			adapter->file_prd_off = offs;
+			return 0;
+		}
+	}
+
+	return -1;
+}
+
+
+static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter,
+			u32 section, u32 idx_offset)
+{
+	const u8 *unirom = adapter->fw->data;
+	int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+								idx_offset));
+	struct uni_table_desc *tab_desc;
+	__le32 offs;
+
+	tab_desc = nx_get_table_desc(unirom, section);
+
+	if (tab_desc == NULL)
+		return NULL;
+
+	offs = cpu_to_le32(tab_desc->findex) +
+			(cpu_to_le32(tab_desc->entry_size) * idx);
+
+	return (struct uni_data_desc *)&unirom[offs];
+}
+
+static u8 *
+nx_get_bootld_offs(struct netxen_adapter *adapter)
+{
+	u32 offs = NETXEN_BOOTLD_START;
+
+	if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
+		offs = cpu_to_le32((nx_get_data_desc(adapter,
+					NX_UNI_DIR_SECT_BOOTLD,
+					NX_UNI_BOOTLD_IDX_OFF))->findex);
+
+	return (u8 *)&adapter->fw->data[offs];
+}
+
+static u8 *
+nx_get_fw_offs(struct netxen_adapter *adapter)
+{
+	u32 offs = NETXEN_IMAGE_START;
+
+	if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
+		offs = cpu_to_le32((nx_get_data_desc(adapter,
+					NX_UNI_DIR_SECT_FW,
+					NX_UNI_FIRMWARE_IDX_OFF))->findex);
+
+	return (u8 *)&adapter->fw->data[offs];
+}
+
+static __le32
+nx_get_fw_size(struct netxen_adapter *adapter)
+{
+	if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
+		return cpu_to_le32((nx_get_data_desc(adapter,
+					NX_UNI_DIR_SECT_FW,
+					NX_UNI_FIRMWARE_IDX_OFF))->size);
+	else
+		return cpu_to_le32(
+				*(u32 *)&adapter->fw->data[NX_FW_SIZE_OFFSET]);
+}
+
+static __le32
+nx_get_fw_version(struct netxen_adapter *adapter)
+{
+	struct uni_data_desc *fw_data_desc;
+	const struct firmware *fw = adapter->fw;
+	__le32 major, minor, sub;
+	const u8 *ver_str;
+	int i, ret = 0;
+
+	if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
+
+		fw_data_desc = nx_get_data_desc(adapter,
+				NX_UNI_DIR_SECT_FW, NX_UNI_FIRMWARE_IDX_OFF);
+		ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) +
+				cpu_to_le32(fw_data_desc->size) - 17;
+
+		for (i = 0; i < 12; i++) {
+			if (!strncmp(&ver_str[i], "REV=", 4)) {
+				ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
+							&major, &minor, &sub);
+				break;
+			}
+		}
+
+		if (ret != 3)
+			return 0;
+
+		return major + (minor << 8) + (sub << 16);
+
+	} else
+		return cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
+}
+
+static __le32
+nx_get_bios_version(struct netxen_adapter *adapter)
+{
+	const struct firmware *fw = adapter->fw;
+	__le32 bios_ver, prd_off = adapter->file_prd_off;
+
+	if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
+		bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
+						+ NX_UNI_BIOS_VERSION_OFF));
+		return (bios_ver << 24) + ((bios_ver >> 8) & 0xff00) +
+							(bios_ver >> 24);
+	} else
+		return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]);
+
+}
+
 int
 netxen_need_fw_reset(struct netxen_adapter *adapter)
 {
@@ -646,9 +797,8 @@ netxen_need_fw_reset(struct netxen_adapter *adapter)
 	/* check if we have got newer or different file firmware */
 	if (adapter->fw) {
 
-		const struct firmware *fw = adapter->fw;
+		val = nx_get_fw_version(adapter);
 
-		val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
 		version = NETXEN_DECODE_VERSION(val);
 
 		major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
@@ -658,7 +808,8 @@ netxen_need_fw_reset(struct netxen_adapter *adapter)
 		if (version > NETXEN_VERSION_CODE(major, minor, build))
 			return 1;
 
-		if (version == NETXEN_VERSION_CODE(major, minor, build)) {
+		if (version == NETXEN_VERSION_CODE(major, minor, build) &&
+			adapter->fw_type != NX_UNIFIED_ROMIMAGE) {
 
 			val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL);
 			fw_type = (val & 0x4) ?
@@ -673,7 +824,11 @@ netxen_need_fw_reset(struct netxen_adapter *adapter)
 }
 
 static char *fw_name[] = {
-	"nxromimg.bin", "nx3fwct.bin", "nx3fwmn.bin", "flash",
+	NX_P2_MN_ROMIMAGE_NAME,
+	NX_P3_CT_ROMIMAGE_NAME,
+	NX_P3_MN_ROMIMAGE_NAME,
+	NX_UNIFIED_ROMIMAGE_NAME,
+	NX_FLASH_ROMIMAGE_NAME,
 };
 
 int
@@ -695,26 +850,28 @@ netxen_load_firmware(struct netxen_adapter *adapter)
 
 		size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8;
 
-		ptr64 = (u64 *)&fw->data[NETXEN_BOOTLD_START];
+		ptr64 = (u64 *)nx_get_bootld_offs(adapter);
 		flashaddr = NETXEN_BOOTLD_START;
 
 		for (i = 0; i < size; i++) {
 			data = cpu_to_le64(ptr64[i]);
-			adapter->pci_mem_write(adapter, flashaddr, &data, 8);
+
+			if (adapter->pci_mem_write(adapter, flashaddr, data))
+				return -EIO;
+
 			flashaddr += 8;
 		}
 
-		size = *(u32 *)&fw->data[NX_FW_SIZE_OFFSET];
-		size = (__force u32)cpu_to_le32(size) / 8;
+		size = (__force u32)nx_get_fw_size(adapter) / 8;
 
-		ptr64 = (u64 *)&fw->data[NETXEN_IMAGE_START];
+		ptr64 = (u64 *)nx_get_fw_offs(adapter);
 		flashaddr = NETXEN_IMAGE_START;
 
 		for (i = 0; i < size; i++) {
 			data = cpu_to_le64(ptr64[i]);
 
 			if (adapter->pci_mem_write(adapter,
-						flashaddr, &data, 8))
+						flashaddr, data))
 				return -EIO;
 
 			flashaddr += 8;
@@ -728,17 +885,17 @@ netxen_load_firmware(struct netxen_adapter *adapter)
 
 		for (i = 0; i < size; i++) {
 			if (netxen_rom_fast_read(adapter,
-					flashaddr, &lo) != 0)
+					flashaddr, (int *)&lo) != 0)
 				return -EIO;
 			if (netxen_rom_fast_read(adapter,
-					flashaddr + 4, &hi) != 0)
+					flashaddr + 4, (int *)&hi) != 0)
 				return -EIO;
 
 			/* hi, lo are already in host endian byteorder */
 			data = (((u64)hi << 32) | lo);
 
 			if (adapter->pci_mem_write(adapter,
-						flashaddr, &data, 8))
+						flashaddr, data))
 				return -EIO;
 
 			flashaddr += 8;
@@ -746,7 +903,10 @@ netxen_load_firmware(struct netxen_adapter *adapter)
 	}
 	msleep(1);
 
-	if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+	if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
+		NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x18, 0x1020);
+		NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001e);
+	} else if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
 		NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001d);
 	else {
 		NXWR32(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL, 0x3fff);
@@ -757,21 +917,31 @@ netxen_load_firmware(struct netxen_adapter *adapter)
 }
 
 static int
-netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
+netxen_validate_firmware(struct netxen_adapter *adapter)
 {
 	__le32 val;
-	u32 ver, min_ver, bios;
+	u32 ver, min_ver, bios, min_size;
 	struct pci_dev *pdev = adapter->pdev;
 	const struct firmware *fw = adapter->fw;
+	u8 fw_type = adapter->fw_type;
 
-	if (fw->size < NX_FW_MIN_SIZE)
-		return -EINVAL;
+	if (fw_type == NX_UNIFIED_ROMIMAGE) {
+		if (nx_set_product_offs(adapter))
+			return -EINVAL;
+
+		min_size = NX_UNI_FW_MIN_SIZE;
+	} else {
+		val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]);
+		if ((__force u32)val != NETXEN_BDINFO_MAGIC)
+			return -EINVAL;
+
+		min_size = NX_FW_MIN_SIZE;
+	}
 
-	val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]);
-	if ((__force u32)val != NETXEN_BDINFO_MAGIC)
+	if (fw->size < min_size)
 		return -EINVAL;
 
-	val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
+	val = nx_get_fw_version(adapter);
 
 	if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
 		min_ver = NETXEN_VERSION_CODE(4, 0, 216);
@@ -783,15 +953,15 @@ netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
 	if ((_major(ver) > _NETXEN_NIC_LINUX_MAJOR) || (ver < min_ver)) {
 		dev_err(&pdev->dev,
 				"%s: firmware version %d.%d.%d unsupported\n",
-				fwname, _major(ver), _minor(ver), _build(ver));
+		fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
 		return -EINVAL;
 	}
 
-	val = cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]);
+	val = nx_get_bios_version(adapter);
 	netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios);
 	if ((__force u32)val != bios) {
 		dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
-				fwname);
+				fw_name[fw_type]);
 		return -EINVAL;
 	}
 
@@ -802,7 +972,7 @@ netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
 	val = NETXEN_DECODE_VERSION(val);
 	if (val > ver) {
 		dev_info(&pdev->dev, "%s: firmware is older than flash\n",
-				fwname);
+				fw_name[fw_type]);
 		return -EINVAL;
 	}
 
@@ -810,6 +980,41 @@ netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
 	return 0;
 }
 
+static void
+nx_get_next_fwtype(struct netxen_adapter *adapter)
+{
+	u8 fw_type;
+
+	switch (adapter->fw_type) {
+	case NX_UNKNOWN_ROMIMAGE:
+		fw_type = NX_UNIFIED_ROMIMAGE;
+		break;
+
+	case NX_UNIFIED_ROMIMAGE:
+		if (NX_IS_REVISION_P3P(adapter->ahw.revision_id))
+			fw_type = NX_FLASH_ROMIMAGE;
+		else if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+			fw_type = NX_P2_MN_ROMIMAGE;
+		else if (netxen_p3_has_mn(adapter))
+			fw_type = NX_P3_MN_ROMIMAGE;
+		else
+			fw_type = NX_P3_CT_ROMIMAGE;
+		break;
+
+	case NX_P3_MN_ROMIMAGE:
+		fw_type = NX_P3_CT_ROMIMAGE;
+		break;
+
+	case NX_P2_MN_ROMIMAGE:
+	case NX_P3_CT_ROMIMAGE:
+	default:
+		fw_type = NX_FLASH_ROMIMAGE;
+		break;
+	}
+
+	adapter->fw_type = fw_type;
+}
+
 static int
 netxen_p3_has_mn(struct netxen_adapter *adapter)
 {
@@ -831,49 +1036,29 @@ netxen_p3_has_mn(struct netxen_adapter *adapter)
 
 void netxen_request_firmware(struct netxen_adapter *adapter)
 {
-	u8 fw_type;
 	struct pci_dev *pdev = adapter->pdev;
 	int rc = 0;
 
-	if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
-		fw_type = NX_P2_MN_ROMIMAGE;
-		goto request_fw;
-	}
+	adapter->fw_type = NX_UNKNOWN_ROMIMAGE;
 
-	fw_type = netxen_p3_has_mn(adapter) ?
-		NX_P3_MN_ROMIMAGE : NX_P3_CT_ROMIMAGE;
+next:
+	nx_get_next_fwtype(adapter);
 
-request_fw:
-	rc = request_firmware(&adapter->fw, fw_name[fw_type], &pdev->dev);
-	if (rc != 0) {
-		if (fw_type == NX_P3_MN_ROMIMAGE) {
-			msleep(1);
-			fw_type = NX_P3_CT_ROMIMAGE;
-			goto request_fw;
-		}
-
-		fw_type = NX_FLASH_ROMIMAGE;
+	if (adapter->fw_type == NX_FLASH_ROMIMAGE) {
 		adapter->fw = NULL;
-		goto done;
-	}
-
-	rc = netxen_validate_firmware(adapter, fw_name[fw_type]);
-	if (rc != 0) {
-		release_firmware(adapter->fw);
-
-		if (fw_type == NX_P3_MN_ROMIMAGE) {
+	} else {
+		rc = request_firmware(&adapter->fw,
+				fw_name[adapter->fw_type], &pdev->dev);
+		if (rc != 0)
+			goto next;
+
+		rc = netxen_validate_firmware(adapter);
+		if (rc != 0) {
+			release_firmware(adapter->fw);
 			msleep(1);
-			fw_type = NX_P3_CT_ROMIMAGE;
-			goto request_fw;
+			goto next;
 		}
-
-		fw_type = NX_FLASH_ROMIMAGE;
-		adapter->fw = NULL;
-		goto done;
 	}
-
-done:
-	adapter->fw_type = fw_type;
 }
 
 
@@ -1508,10 +1693,8 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
 					      (rds_ring->num_desc - 1)));
 			netxen_set_msg_ctxid(msg, adapter->portnum);
 			netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid));
-			read_lock(&adapter->adapter_lock);
-			writel(msg, DB_NORMALIZE(adapter,
-					    NETXEN_RCV_PRODUCER_OFFSET));
-			read_unlock(&adapter->adapter_lock);
+			NXWRIO(adapter, DB_NORMALIZE(adapter,
+					NETXEN_RCV_PRODUCER_OFFSET), msg);
 		}
 	}
 }
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 3bf78dbfbf0f..e5d187fce51b 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -34,13 +34,18 @@
 #include <net/ip.h>
 #include <linux/ipv6.h>
 #include <linux/inetdevice.h>
+#include <linux/sysfs.h>
 
-MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
+MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
+MODULE_FIRMWARE(NX_P2_MN_ROMIMAGE_NAME);
+MODULE_FIRMWARE(NX_P3_CT_ROMIMAGE_NAME);
+MODULE_FIRMWARE(NX_P3_MN_ROMIMAGE_NAME);
+MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME);
 
 char netxen_nic_driver_name[] = "netxen_nic";
-static char netxen_nic_driver_string[] = "NetXen Network Driver version "
+static char netxen_nic_driver_string[] = "QLogic/NetXen Network Driver v"
     NETXEN_NIC_LINUX_VERSIONID;
 
 static int port_mode = NETXEN_PORT_MODE_AUTO_NEG;
@@ -52,7 +57,8 @@ static int use_msi = 1;
 
 static int use_msi_x = 1;
 
-/* Local functions to NetXen NIC driver */
+static unsigned long auto_fw_reset = AUTO_FW_RESET_ENABLED;
+
 static int __devinit netxen_nic_probe(struct pci_dev *pdev,
 		const struct pci_device_id *ent);
 static void __devexit netxen_nic_remove(struct pci_dev *pdev);
@@ -73,6 +79,8 @@ static void netxen_nic_poll_controller(struct net_device *netdev);
 
 static void netxen_create_sysfs_entries(struct netxen_adapter *adapter);
 static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
+static void netxen_create_diag_entries(struct netxen_adapter *adapter);
+static void netxen_remove_diag_entries(struct netxen_adapter *adapter);
 
 static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter);
 static int netxen_can_start_firmware(struct netxen_adapter *adapter);
@@ -609,14 +617,12 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
 	 * Set the CRB window to invalid. If any register in window 0 is
 	 * accessed it should set the window to 0 and then reset it to 1.
 	 */
-	adapter->curr_window = 255;
-	adapter->ahw.qdr_sn_window = -1;
-	adapter->ahw.ddr_mn_window = -1;
+	adapter->ahw.crb_win = -1;
+	adapter->ahw.ocm_win = -1;
 
 	/* remap phys address */
 	mem_base = pci_resource_start(pdev, 0);	/* 0 is for BAR 0 */
 	mem_len = pci_resource_len(pdev, 0);
-	pci_len0 = 0;
 
 	/* 128 Meg of memory */
 	if (mem_len == NETXEN_PCI_128MB_SIZE) {
@@ -625,6 +631,7 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
 				SECOND_PAGE_GROUP_SIZE);
 		mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
 				THIRD_PAGE_GROUP_SIZE);
+		pci_len0 = FIRST_PAGE_GROUP_SIZE;
 	} else if (mem_len == NETXEN_PCI_32MB_SIZE) {
 		mem_ptr1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
 		mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
@@ -637,19 +644,6 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
 			return -EIO;
 		}
 		pci_len0 = mem_len;
-
-		adapter->ahw.ddr_mn_window = 0;
-		adapter->ahw.qdr_sn_window = 0;
-
-		adapter->ahw.mn_win_crb = NETXEN_PCI_CRBSPACE +
-			0x100000 + PCIX_MN_WINDOW + (pci_func * 0x20);
-		adapter->ahw.ms_win_crb = NETXEN_PCI_CRBSPACE +
-			0x100000 + PCIX_SN_WINDOW;
-		if (pci_func < 4)
-			adapter->ahw.ms_win_crb += (pci_func * 0x20);
-		else
-			adapter->ahw.ms_win_crb +=
-					0xA0 + ((pci_func - 4) * 0x10);
 	} else {
 		return -EIO;
 	}
@@ -663,6 +657,15 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
 	adapter->ahw.pci_base1 = mem_ptr1;
 	adapter->ahw.pci_base2 = mem_ptr2;
 
+	if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
+		adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter,
+			NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
+
+	} else if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+		adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter,
+			NETXEN_PCIX_PS_REG(PCIE_MN_WINDOW_REG(pci_func)));
+	}
+
 	if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
 		goto skip_doorbell;
 
@@ -727,7 +730,8 @@ netxen_check_options(struct netxen_adapter *adapter)
 	if (adapter->portnum == 0) {
 		get_brd_name_by_type(adapter->ahw.board_type, brd_name);
 
-		printk(KERN_INFO "NetXen %s Board S/N %s  Chip rev 0x%x\n",
+		pr_info("%s: %s Board S/N %s  Chip rev 0x%x\n",
+				module_name(THIS_MODULE),
 				brd_name, serial_num, adapter->ahw.revision_id);
 	}
 
@@ -815,11 +819,11 @@ netxen_start_firmware(struct netxen_adapter *adapter)
 	if (err < 0)
 		goto err_out;
 	if (err == 0)
-		goto ready;
+		goto wait_init;
 
 	if (first_boot != 0x55555555) {
 		NXWR32(adapter, CRB_CMDPEG_STATE, 0);
-		netxen_pinit_from_rom(adapter, 0);
+		netxen_pinit_from_rom(adapter);
 		msleep(1);
 	}
 
@@ -858,9 +862,6 @@ netxen_start_firmware(struct netxen_adapter *adapter)
 		| (_NETXEN_NIC_LINUX_SUBVERSION);
 	NXWR32(adapter, CRB_DRIVER_VERSION, val);
 
-ready:
-	NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_READY);
-
 wait_init:
 	/* Handshake with the card before we register the devices. */
 	err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
@@ -869,6 +870,8 @@ wait_init:
 		goto err_out;
 	}
 
+	NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_READY);
+
 	nx_update_dma_mask(adapter);
 
 	netxen_check_options(adapter);
@@ -1209,16 +1212,10 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	int pci_func_id = PCI_FUNC(pdev->devfn);
 	uint8_t revision_id;
 
-	if (pdev->class != 0x020000) {
-		printk(KERN_DEBUG "NetXen function %d, class %x will not "
-				"be enabled.\n",pci_func_id, pdev->class);
-		return -ENODEV;
-	}
-
 	if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) {
-		printk(KERN_WARNING "NetXen chip revisions between 0x%x-0x%x"
+		pr_warning("%s: chip revisions between 0x%x-0x%x"
 				"will not be enabled.\n",
-				NX_P3_A0, NX_P3_B1);
+				module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1);
 		return -ENODEV;
 	}
 
@@ -1252,7 +1249,9 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	revision_id = pdev->revision;
 	adapter->ahw.revision_id = revision_id;
 
-	rwlock_init(&adapter->adapter_lock);
+	rwlock_init(&adapter->ahw.crb_lock);
+	spin_lock_init(&adapter->ahw.mem_lock);
+
 	spin_lock_init(&adapter->tx_clean_lock);
 	INIT_LIST_HEAD(&adapter->mac_list);
 
@@ -1282,7 +1281,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 	err = netxen_start_firmware(adapter);
 	if (err)
-		goto err_out_iounmap;
+		goto err_out_decr_ref;
 
 	/*
 	 * See if the firmware gave us a virtual-physical port mapping.
@@ -1317,6 +1316,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		break;
 	}
 
+	netxen_create_diag_entries(adapter);
+
 	return 0;
 
 err_out_disable_msi:
@@ -1324,6 +1325,7 @@ err_out_disable_msi:
 
 	netxen_free_dummy_dma(adapter);
 
+err_out_decr_ref:
 	nx_decr_dev_ref_cnt(adapter);
 
 err_out_iounmap:
@@ -1369,6 +1371,8 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
 
 	netxen_teardown_intr(adapter);
 
+	netxen_remove_diag_entries(adapter);
+
 	netxen_cleanup_pci_map(adapter);
 
 	netxen_release_firmware(adapter);
@@ -1449,7 +1453,8 @@ netxen_nic_resume(struct pci_dev *pdev)
 	if (err)
 		return err;
 
-	adapter->curr_window = 255;
+	adapter->ahw.crb_win = -1;
+	adapter->ahw.ocm_win = -1;
 
 	err = netxen_start_firmware(adapter);
 	if (err) {
@@ -1927,7 +1932,7 @@ request_reset:
 struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
 {
 	struct netxen_adapter *adapter = netdev_priv(netdev);
-	struct net_device_stats *stats = &adapter->net_stats;
+	struct net_device_stats *stats = &netdev->stats;
 
 	memset(stats, 0, sizeof(*stats));
 
@@ -2184,14 +2189,13 @@ netxen_fwinit_work(struct work_struct *work)
 					netxen_fwinit_work, 2 * FW_POLL_DELAY);
 			return;
 		}
-		break;
 
 	case NX_DEV_FAILED:
 	default:
+		nx_incr_dev_ref_cnt(adapter);
 		break;
 	}
 
-	nx_incr_dev_ref_cnt(adapter);
 	clear_bit(__NX_RESETTING, &adapter->state);
 }
 
@@ -2213,18 +2217,23 @@ netxen_detach_work(struct work_struct *work)
 
 	status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
 
-	ref_cnt = nx_decr_dev_ref_cnt(adapter);
-
 	if (status & NX_RCODE_FATAL_ERROR)
-		return;
+		goto err_ret;
 
 	if (adapter->temp == NX_TEMP_PANIC)
-		return;
+		goto err_ret;
+
+	ref_cnt = nx_decr_dev_ref_cnt(adapter);
 
 	delay = (ref_cnt == 0) ? 0 : (2 * FW_POLL_DELAY);
 
 	adapter->fw_wait_cnt = 0;
 	netxen_schedule_work(adapter, netxen_fwinit_work, delay);
+
+	return;
+
+err_ret:
+	clear_bit(__NX_RESETTING, &adapter->state);
 }
 
 static int
@@ -2263,7 +2272,8 @@ netxen_check_health(struct netxen_adapter *adapter)
 	dev_info(&netdev->dev, "firmware hang detected\n");
 
 detach:
-	if (!test_and_set_bit(__NX_RESETTING, &adapter->state))
+	if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
+			!test_and_set_bit(__NX_RESETTING, &adapter->state))
 		netxen_schedule_work(adapter, netxen_detach_work, 0);
 	return 1;
 }
@@ -2341,6 +2351,197 @@ static struct device_attribute dev_attr_bridged_mode = {
        .store = netxen_store_bridged_mode,
 };
 
+static ssize_t
+netxen_store_diag_mode(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t len)
+{
+	struct netxen_adapter *adapter = dev_get_drvdata(dev);
+	unsigned long new;
+
+	if (strict_strtoul(buf, 2, &new))
+		return -EINVAL;
+
+	if (!!new != !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
+		adapter->flags ^= NETXEN_NIC_DIAG_ENABLED;
+
+	return len;
+}
+
+static ssize_t
+netxen_show_diag_mode(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct netxen_adapter *adapter = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%d\n",
+			!!(adapter->flags & NETXEN_NIC_DIAG_ENABLED));
+}
+
+static struct device_attribute dev_attr_diag_mode = {
+	.attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
+	.show = netxen_show_diag_mode,
+	.store = netxen_store_diag_mode,
+};
+
+static int
+netxen_sysfs_validate_crb(struct netxen_adapter *adapter,
+		loff_t offset, size_t size)
+{
+	if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
+		return -EIO;
+
+	if ((size != 4) || (offset & 0x3))
+		return  -EINVAL;
+
+	if (offset < NETXEN_PCI_CRBSPACE)
+		return -EINVAL;
+
+	return 0;
+}
+
+static ssize_t
+netxen_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
+		char *buf, loff_t offset, size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct netxen_adapter *adapter = dev_get_drvdata(dev);
+	u32 data;
+	int ret;
+
+	ret = netxen_sysfs_validate_crb(adapter, offset, size);
+	if (ret != 0)
+		return ret;
+
+	data = NXRD32(adapter, offset);
+	memcpy(buf, &data, size);
+	return size;
+}
+
+static ssize_t
+netxen_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr,
+		char *buf, loff_t offset, size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct netxen_adapter *adapter = dev_get_drvdata(dev);
+	u32 data;
+	int ret;
+
+	ret = netxen_sysfs_validate_crb(adapter, offset, size);
+	if (ret != 0)
+		return ret;
+
+	memcpy(&data, buf, size);
+	NXWR32(adapter, offset, data);
+	return size;
+}
+
+static int
+netxen_sysfs_validate_mem(struct netxen_adapter *adapter,
+		loff_t offset, size_t size)
+{
+	if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
+		return -EIO;
+
+	if ((size != 8) || (offset & 0x7))
+		return  -EIO;
+
+	return 0;
+}
+
+static ssize_t
+netxen_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
+		char *buf, loff_t offset, size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct netxen_adapter *adapter = dev_get_drvdata(dev);
+	u64 data;
+	int ret;
+
+	ret = netxen_sysfs_validate_mem(adapter, offset, size);
+	if (ret != 0)
+		return ret;
+
+	if (adapter->pci_mem_read(adapter, offset, &data))
+		return -EIO;
+
+	memcpy(buf, &data, size);
+
+	return size;
+}
+
+ssize_t netxen_sysfs_write_mem(struct kobject *kobj,
+		struct bin_attribute *attr, char *buf,
+		loff_t offset, size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct netxen_adapter *adapter = dev_get_drvdata(dev);
+	u64 data;
+	int ret;
+
+	ret = netxen_sysfs_validate_mem(adapter, offset, size);
+	if (ret != 0)
+		return ret;
+
+	memcpy(&data, buf, size);
+
+	if (adapter->pci_mem_write(adapter, offset, data))
+		return -EIO;
+
+	return size;
+}
+
+
+static struct bin_attribute bin_attr_crb = {
+	.attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
+	.size = 0,
+	.read = netxen_sysfs_read_crb,
+	.write = netxen_sysfs_write_crb,
+};
+
+static struct bin_attribute bin_attr_mem = {
+	.attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
+	.size = 0,
+	.read = netxen_sysfs_read_mem,
+	.write = netxen_sysfs_write_mem,
+};
+
+#ifdef CONFIG_MODULES
+static ssize_t
+netxen_store_auto_fw_reset(struct module_attribute *mattr,
+		struct module *mod, const char *buf, size_t count)
+
+{
+	unsigned long new;
+
+	if (strict_strtoul(buf, 16, &new))
+		return -EINVAL;
+
+	if ((new == AUTO_FW_RESET_ENABLED) || (new == AUTO_FW_RESET_DISABLED)) {
+		auto_fw_reset = new;
+		return count;
+	}
+
+	return -EINVAL;
+}
+
+static ssize_t
+netxen_show_auto_fw_reset(struct module_attribute *mattr,
+		struct module *mod, char *buf)
+
+{
+	if (auto_fw_reset == AUTO_FW_RESET_ENABLED)
+		return sprintf(buf, "enabled\n");
+	else
+		return sprintf(buf, "disabled\n");
+}
+
+static struct module_attribute mod_attr_fw_reset = {
+	.attr = {.name = "auto_fw_reset", .mode = (S_IRUGO | S_IWUSR)},
+	.show = netxen_show_auto_fw_reset,
+	.store = netxen_store_auto_fw_reset,
+};
+#endif
+
 static void
 netxen_create_sysfs_entries(struct netxen_adapter *adapter)
 {
@@ -2366,6 +2567,33 @@ netxen_remove_sysfs_entries(struct netxen_adapter *adapter)
 		device_remove_file(dev, &dev_attr_bridged_mode);
 }
 
+static void
+netxen_create_diag_entries(struct netxen_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	struct device *dev;
+
+	dev = &pdev->dev;
+	if (device_create_file(dev, &dev_attr_diag_mode))
+		dev_info(dev, "failed to create diag_mode sysfs entry\n");
+	if (device_create_bin_file(dev, &bin_attr_crb))
+		dev_info(dev, "failed to create crb sysfs entry\n");
+	if (device_create_bin_file(dev, &bin_attr_mem))
+		dev_info(dev, "failed to create mem sysfs entry\n");
+}
+
+
+static void
+netxen_remove_diag_entries(struct netxen_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	struct device *dev = &pdev->dev;
+
+	device_remove_file(dev, &dev_attr_diag_mode);
+	device_remove_bin_file(dev, &bin_attr_crb);
+	device_remove_bin_file(dev, &bin_attr_mem);
+}
+
 #ifdef CONFIG_INET
 
 #define is_netxen_netdev(dev) (dev->netdev_ops == &netxen_netdev_ops)
@@ -2518,6 +2746,10 @@ static struct pci_driver netxen_driver = {
 
 static int __init netxen_init_module(void)
 {
+#ifdef CONFIG_MODULES
+	struct module *mod = THIS_MODULE;
+#endif
+
 	printk(KERN_INFO "%s\n", netxen_nic_driver_string);
 
 #ifdef CONFIG_INET
@@ -2525,6 +2757,12 @@ static int __init netxen_init_module(void)
 	register_inetaddr_notifier(&netxen_inetaddr_cb);
 #endif
 
+#ifdef CONFIG_MODULES
+	if (sysfs_create_file(&mod->mkobj.kobj, &mod_attr_fw_reset.attr))
+		printk(KERN_ERR "%s: Failed to create auto_fw_reset "
+				"sysfs entry.", netxen_nic_driver_name);
+#endif
+
 	return pci_register_driver(&netxen_driver);
 }
 
@@ -2532,6 +2770,12 @@ module_init(netxen_init_module);
 
 static void __exit netxen_exit_module(void)
 {
+#ifdef CONFIG_MODULES
+	struct module *mod = THIS_MODULE;
+
+	sysfs_remove_file(&mod->mkobj.kobj, &mod_attr_fw_reset.attr);
+#endif
+
 	pci_unregister_driver(&netxen_driver);
 
 #ifdef CONFIG_INET
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c
index 462d20f26436..6a87d810e59d 100644
--- a/drivers/net/ni5010.c
+++ b/drivers/net/ni5010.c
@@ -377,7 +377,7 @@ static int ni5010_open(struct net_device *dev)
 
 	PRINTK2((KERN_DEBUG "%s: entering ni5010_open()\n", dev->name));
 
-	if (request_irq(dev->irq, &ni5010_interrupt, 0, boardname, dev)) {
+	if (request_irq(dev->irq, ni5010_interrupt, 0, boardname, dev)) {
 		printk(KERN_WARNING "%s: Cannot get irq %#2x\n", dev->name, dev->irq);
 		return -EAGAIN;
 	}
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index aad3b370c562..b42f5e522f90 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -284,7 +284,7 @@ static int ni52_open(struct net_device *dev)
 	startrecv586(dev);
 	ni_enaint();
 
-	ret = request_irq(dev->irq, &ni52_interrupt, 0, dev->name, dev);
+	ret = request_irq(dev->irq, ni52_interrupt, 0, dev->name, dev);
 	if (ret) {
 		ni_reset586();
 		return ret;
@@ -477,8 +477,8 @@ static int __init ni52_probe1(struct net_device *dev, int ioaddr)
 	for (i = 0; i < ETH_ALEN; i++)
 		dev->dev_addr[i] = inb(dev->base_addr+i);
 
-	if (dev->dev_addr[0] != NI52_ADDR0 || dev->dev_addr[1] != NI52_ADDR1
-		 || dev->dev_addr[2] != NI52_ADDR2) {
+	if (dev->dev_addr[0] != NI52_ADDR0 || dev->dev_addr[1] != NI52_ADDR1 ||
+	    dev->dev_addr[2] != NI52_ADDR2) {
 		retval = -ENODEV;
 		goto out;
 	}
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index 752c2e4d9cf4..ae19aafd2c7e 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -294,7 +294,7 @@ static void ni65_set_performance(struct priv *p)
 static int ni65_open(struct net_device *dev)
 {
 	struct priv *p = dev->ml_priv;
-	int irqval = request_irq(dev->irq, &ni65_interrupt,0,
+	int irqval = request_irq(dev->irq, ni65_interrupt,0,
                         cards[p->cardno].cardname,dev);
 	if (irqval) {
 		printk(KERN_ERR "%s: unable to get IRQ %d (irqval=%d).\n",
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index d6c7ac68f6ea..8ce58c4c7dd3 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -45,10 +45,6 @@ MODULE_DESCRIPTION("NIU ethernet driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_MODULE_VERSION);
 
-#ifndef DMA_44BIT_MASK
-#define DMA_44BIT_MASK	0x00000fffffffffffULL
-#endif
-
 #ifndef readq
 static u64 readq(void __iomem *reg)
 {
@@ -7855,10 +7851,13 @@ static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 	}
 }
 
-static int niu_get_stats_count(struct net_device *dev)
+static int niu_get_sset_count(struct net_device *dev, int stringset)
 {
 	struct niu *np = netdev_priv(dev);
 
+	if (stringset != ETH_SS_STATS)
+		return -EINVAL;
+
 	return ((np->flags & NIU_FLAGS_XMAC ?
 		 NUM_XMAC_STAT_KEYS :
 		 NUM_BMAC_STAT_KEYS) +
@@ -7978,7 +7977,7 @@ static const struct ethtool_ops niu_ethtool_ops = {
 	.get_settings		= niu_get_settings,
 	.set_settings		= niu_set_settings,
 	.get_strings		= niu_get_strings,
-	.get_stats_count	= niu_get_stats_count,
+	.get_sset_count		= niu_get_sset_count,
 	.get_ethtool_stats	= niu_get_ethtool_stats,
 	.phys_id		= niu_phys_id,
 	.get_rxnfc		= niu_get_nfc,
@@ -8144,7 +8143,7 @@ static void __devinit niu_vpd_parse_version(struct niu *np)
 	int i;
 
 	for (i = 0; i < len - 5; i++) {
-		if (!strncmp(s + i, "FCode ", 5))
+		if (!strncmp(s + i, "FCode ", 6))
 			break;
 	}
 	if (i >= len - 5)
@@ -9915,7 +9914,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
 		  PCI_EXP_DEVCTL_RELAX_EN);
 	pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
 
-	dma_mask = DMA_44BIT_MASK;
+	dma_mask = DMA_BIT_MASK(44);
 	err = pci_set_dma_mask(pdev, dma_mask);
 	if (!err) {
 		dev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 57fd483dbb1f..1f6327d41536 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -648,8 +648,8 @@ static void phy_intr(struct net_device *ndev)
 		dprintk("phy_intr: tbisr=%08x, tanar=%08x, tanlpar=%08x\n",
 			tbisr, tanar, tanlpar);
 
-		if ( (fullduplex = (tanlpar & TANAR_FULL_DUP)
-		      && (tanar & TANAR_FULL_DUP)) ) {
+		if ( (fullduplex = (tanlpar & TANAR_FULL_DUP) &&
+		      (tanar & TANAR_FULL_DUP)) ) {
 
 			/* both of us are full duplex */
 			writel(readl(dev->base + TXCFG)
@@ -661,12 +661,12 @@ static void phy_intr(struct net_device *ndev)
 			writel(readl(dev->base + GPIOR) | GPIOR_GP1_OUT,
 			       dev->base + GPIOR);
 
-		} else if(((tanlpar & TANAR_HALF_DUP)
-			   && (tanar & TANAR_HALF_DUP))
-			|| ((tanlpar & TANAR_FULL_DUP)
-			    && (tanar & TANAR_HALF_DUP))
-			|| ((tanlpar & TANAR_HALF_DUP)
-			    && (tanar & TANAR_FULL_DUP))) {
+		} else if (((tanlpar & TANAR_HALF_DUP) &&
+			    (tanar & TANAR_HALF_DUP)) ||
+			   ((tanlpar & TANAR_FULL_DUP) &&
+			    (tanar & TANAR_HALF_DUP)) ||
+			   ((tanlpar & TANAR_HALF_DUP) &&
+			    (tanar & TANAR_FULL_DUP))) {
 
 			/* one or both of us are half duplex */
 			writel((readl(dev->base + TXCFG)
@@ -720,16 +720,16 @@ static void phy_intr(struct net_device *ndev)
 
 	newlinkstate = (cfg & CFG_LNKSTS) ? LINK_UP : LINK_DOWN;
 
-	if (newlinkstate & LINK_UP
-	    && dev->linkstate != newlinkstate) {
+	if (newlinkstate & LINK_UP &&
+	    dev->linkstate != newlinkstate) {
 		netif_start_queue(ndev);
 		netif_wake_queue(ndev);
 		printk(KERN_INFO "%s: link now %s mbps, %s duplex and up.\n",
 			ndev->name,
 			speeds[speed],
 			fullduplex ? "full" : "half");
-	} else if (newlinkstate & LINK_DOWN
-		   && dev->linkstate != newlinkstate) {
+	} else if (newlinkstate & LINK_DOWN &&
+		   dev->linkstate != newlinkstate) {
 		netif_stop_queue(ndev);
 		printk(KERN_INFO "%s: link now down.\n", ndev->name);
 	}
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index c254a7f5b9f5..1673eb045e1e 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -1216,7 +1216,7 @@ static int pasemi_mac_open(struct net_device *dev)
 	snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx",
 		 dev->name);
 
-	ret = request_irq(mac->tx->chan.irq, &pasemi_mac_tx_intr, IRQF_DISABLED,
+	ret = request_irq(mac->tx->chan.irq, pasemi_mac_tx_intr, IRQF_DISABLED,
 			  mac->tx_irq_name, mac->tx);
 	if (ret) {
 		dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
@@ -1227,7 +1227,7 @@ static int pasemi_mac_open(struct net_device *dev)
 	snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx",
 		 dev->name);
 
-	ret = request_irq(mac->rx->chan.irq, &pasemi_mac_rx_intr, IRQF_DISABLED,
+	ret = request_irq(mac->rx->chan.irq, pasemi_mac_rx_intr, IRQF_DISABLED,
 			  mac->rx_irq_name, mac->rx);
 	if (ret) {
 		dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
diff --git a/drivers/net/pasemi_mac_ethtool.c b/drivers/net/pasemi_mac_ethtool.c
index 28a86224879d..fefa79e34b95 100644
--- a/drivers/net/pasemi_mac_ethtool.c
+++ b/drivers/net/pasemi_mac_ethtool.c
@@ -77,6 +77,19 @@ pasemi_mac_ethtool_get_settings(struct net_device *netdev,
 	return phy_ethtool_gset(phydev, cmd);
 }
 
+static int
+pasemi_mac_ethtool_set_settings(struct net_device *netdev,
+			       struct ethtool_cmd *cmd)
+{
+	struct pasemi_mac *mac = netdev_priv(netdev);
+	struct phy_device *phydev = mac->phydev;
+
+	if (!phydev)
+		return -EOPNOTSUPP;
+
+	return phy_ethtool_sset(phydev, cmd);
+}
+
 static void
 pasemi_mac_ethtool_get_drvinfo(struct net_device *netdev,
 			       struct ethtool_drvinfo *drvinfo)
@@ -150,6 +163,7 @@ static void pasemi_mac_get_strings(struct net_device *netdev, u32 stringset,
 
 const struct ethtool_ops pasemi_mac_ethtool_ops = {
 	.get_settings		= pasemi_mac_ethtool_get_settings,
+	.set_settings		= pasemi_mac_ethtool_set_settings,
 	.get_drvinfo		= pasemi_mac_ethtool_get_drvinfo,
 	.get_msglevel		= pasemi_mac_ethtool_get_msglevel,
 	.set_msglevel		= pasemi_mac_ethtool_set_msglevel,
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 0c44b48f1384..480af402affd 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -1225,8 +1225,8 @@ static void netdrv_timer (unsigned long data)
 	mii_lpa = mdio_read (dev, tp->phys[0], MII_LPA);
 
 	if (!tp->duplex_lock && mii_lpa != 0xffff) {
-		int duplex = (mii_lpa & LPA_100FULL)
-		    || (mii_lpa & 0x01C0) == 0x0040;
+		int duplex = ((mii_lpa & LPA_100FULL) ||
+			      (mii_lpa & 0x01C0) == 0x0040);
 		if (tp->full_duplex != duplex) {
 			tp->full_duplex = duplex;
 			printk (KERN_INFO
@@ -1612,8 +1612,8 @@ static void netdrv_weird_interrupt (struct net_device *dev,
 	    (tp->drv_flags & HAS_LNK_CHNG)) {
 		/* Really link-change on new chips. */
 		int lpar = NETDRV_R16 (NWayLPAR);
-		int duplex = (lpar & 0x0100) || (lpar & 0x01C0) == 0x0040
-				|| tp->duplex_lock;
+		int duplex = ((lpar & 0x0100) || (lpar & 0x01C0) == 0x0040 ||
+			      tp->duplex_lock);
 		if (tp->full_duplex != duplex) {
 			tp->full_duplex = duplex;
 			NETDRV_W8 (Cfg9346, Cfg9346_Unlock);
@@ -1820,8 +1820,8 @@ static void netdrv_set_rx_mode (struct net_device *dev)
 		    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
 		    AcceptAllPhys;
 		mc_filter[1] = mc_filter[0] = 0xffffffff;
-	} else if ((dev->mc_count > multicast_filter_limit)
-		   || (dev->flags & IFF_ALLMULTI)) {
+	} else if ((dev->mc_count > multicast_filter_limit) ||
+		   (dev->flags & IFF_ALLMULTI)) {
 		/* Too many to filter perfectly -- accept all multicasts. */
 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
 		mc_filter[1] = mc_filter[0] = 0xffffffff;
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 800597b82d18..81bafd578478 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -1214,8 +1214,8 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
 	ei_local->irqlock = 1;
    
 	/* !!Assumption!! -- we stay in page 0.	 Don't break this. */
-	while ((interrupts = inb_p(e8390_base + EN0_ISR)) != 0
-		   && ++nr_serviced < MAX_SERVICE) 
+	while ((interrupts = inb_p(e8390_base + EN0_ISR)) != 0 &&
+	       ++nr_serviced < MAX_SERVICE)
 	{
 		if (!netif_running(dev) || (interrupts == 0xff)) {
 			if (ei_debug > 1)
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 6e3e1ced6db4..8ad8384fc1c0 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -256,7 +256,7 @@ static int fmvj18x_probe(struct pcmcia_device *link)
 
     /* Interrupt setup */
     link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
-    link->irq.Handler = &fjn_interrupt;
+    link->irq.Handler = fjn_interrupt;
 
     /* General socket configuration */
     link->conf.Attributes = CONF_ENABLE_IRQ;
@@ -364,9 +364,9 @@ static int fmvj18x_config(struct pcmcia_device *link)
 	switch (link->manf_id) {
 	case MANFID_TDK:
 	    cardtype = TDK;
-	    if (link->card_id == PRODID_TDK_GN3410
-			|| link->card_id == PRODID_TDK_NP9610
-			|| link->card_id == PRODID_TDK_MN3200) {
+	    if (link->card_id == PRODID_TDK_GN3410 ||
+		link->card_id == PRODID_TDK_NP9610 ||
+		link->card_id == PRODID_TDK_MN3200) {
 		/* MultiFunction Card */
 		link->conf.ConfigBase = 0x800;
 		link->conf.ConfigIndex = 0x47;
@@ -582,11 +582,11 @@ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id)
     */ 
     for (i = 0; i < 0x200; i++) {
 	if (readb(base+i*2) == 0x22) {	
-	    if (readb(base+(i-1)*2) == 0xff
-	     && readb(base+(i+5)*2) == 0x04
-	     && readb(base+(i+6)*2) == 0x06
-	     && readb(base+(i+13)*2) == 0xff) 
-		break;
+		if (readb(base+(i-1)*2) == 0xff &&
+		    readb(base+(i+5)*2) == 0x04 &&
+		    readb(base+(i+6)*2) == 0x06 &&
+		    readb(base+(i+13)*2) == 0xff)
+			break;
 	}
     }
 
@@ -1186,8 +1186,8 @@ static void set_rx_mode(struct net_device *dev)
     if (dev->flags & IFF_PROMISC) {
 	memset(mc_filter, 0xff, sizeof(mc_filter));
 	outb(3, ioaddr + RX_MODE);	/* Enable promiscuous mode */
-    } else if (dev->mc_count > MC_FILTERBREAK
-	       ||  (dev->flags & IFF_ALLMULTI)) {
+    } else if (dev->mc_count > MC_FILTERBREAK ||
+	       (dev->flags & IFF_ALLMULTI)) {
 	/* Too many to filter perfectly -- accept all multicasts. */
 	memset(mc_filter, 0xff, sizeof(mc_filter));
 	outb(2, ioaddr + RX_MODE);	/* Use normal mode. */
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index dae5ef6b2609..8a5ae3b182ed 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -464,7 +464,7 @@ static int nmclan_probe(struct pcmcia_device *link)
     link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
     link->io.IOAddrLines = 5;
     link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
-    link->irq.Handler = &mace_interrupt;
+    link->irq.Handler = mace_interrupt;
     link->conf.Attributes = CONF_ENABLE_IRQ;
     link->conf.IntType = INT_MEMORY_AND_IO;
     link->conf.ConfigIndex = 1;
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index cbe462ed221f..2d26b6ca28b9 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -586,8 +586,8 @@ static int pcnet_config(struct pcmcia_device *link)
 	dev->if_port = 0;
     }
 
-    if ((link->conf.ConfigBase == 0x03c0)
-	&& (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) {
+    if ((link->conf.ConfigBase == 0x03c0) &&
+	(link->manf_id == 0x149) && (link->card_id == 0xc1ab)) {
 	printk(KERN_INFO "pcnet_cs: this is an AX88190 card!\n");
 	printk(KERN_INFO "pcnet_cs: use axnet_cs instead.\n");
 	goto failed;
@@ -1751,6 +1751,13 @@ static struct pcmcia_device_id pcnet_ids[] = {
 	PCMCIA_DEVICE_NULL
 };
 MODULE_DEVICE_TABLE(pcmcia, pcnet_ids);
+MODULE_FIRMWARE("cis/PCMLM28.cis");
+MODULE_FIRMWARE("cis/DP83903.cis");
+MODULE_FIRMWARE("cis/LA-PCM.cis");
+MODULE_FIRMWARE("PE520.cis");
+MODULE_FIRMWARE("cis/NE2K.cis");
+MODULE_FIRMWARE("cis/PE-200.cis");
+MODULE_FIRMWARE("cis/tamarack.cis");
 
 static struct pcmcia_driver pcnet_driver = {
 	.drv		= {
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 9e0da370912e..cc4853bc0253 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -480,10 +480,10 @@ static int mhz_mfc_config(struct pcmcia_device *link)
 	mem.CardOffset = link->conf.ConfigBase;
     i = pcmcia_map_mem_page(link, link->win, &mem);
 
-    if ((i == 0)
-	&& (smc->manfid == MANFID_MEGAHERTZ)
-	&& (smc->cardid == PRODID_MEGAHERTZ_EM3288))
-	mhz_3288_power(link);
+    if ((i == 0) &&
+	(smc->manfid == MANFID_MEGAHERTZ) &&
+	(smc->cardid == PRODID_MEGAHERTZ_EM3288))
+	    mhz_3288_power(link);
 
     return 0;
 }
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index fe504b7f369f..a2eda28f903e 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -385,9 +385,9 @@ PrintRegisters(struct net_device *dev)
 	    printk("\n");
 	}
 	for (page=0x40 ; page <= 0x5f; page++) {
-	    if (page == 0x43 || (page >= 0x46 && page <= 0x4f)
-		|| (page >= 0x51 && page <=0x5e))
-		continue;
+		if (page == 0x43 || (page >= 0x46 && page <= 0x4f) ||
+		    (page >= 0x51 && page <=0x5e))
+			continue;
 	    printk(KDBG_XIRC "Register page %2x: ", page);
 	    SelectPage(page);
 	    for (i = 8; i < 16; i++)
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index c1b3f09f452c..dcc67a35e8f2 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -1515,8 +1515,8 @@ static void __devinit pcnet32_probe_vlbus(unsigned int *pcnet32_portlist)
 		if (request_region
 		    (ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) {
 			/* check if there is really a pcnet chip on that ioaddr */
-			if ((inb(ioaddr + 14) == 0x57)
-			    && (inb(ioaddr + 15) == 0x57)) {
+			if ((inb(ioaddr + 14) == 0x57) &&
+			    (inb(ioaddr + 15) == 0x57)) {
 				pcnet32_probe1(ioaddr, 0, NULL);
 			} else {
 				release_region(ioaddr, PCNET32_TOTAL_SIZE);
@@ -1610,8 +1610,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
 		a = &pcnet32_wio;
 	} else {
 		pcnet32_dwio_reset(ioaddr);
-		if (pcnet32_dwio_read_csr(ioaddr, 0) == 4
-		    && pcnet32_dwio_check(ioaddr)) {
+		if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 &&
+		    pcnet32_dwio_check(ioaddr)) {
 			a = &pcnet32_dwio;
 		} else {
 			if (pcnet32_debug & NETIF_MSG_PROBE)
@@ -1750,8 +1750,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
 	for (i = 0; i < 6; i++)
 		promaddr[i] = inb(ioaddr + i);
 
-	if (memcmp(promaddr, dev->dev_addr, 6)
-	    || !is_valid_ether_addr(dev->dev_addr)) {
+	if (memcmp(promaddr, dev->dev_addr, 6) ||
+	    !is_valid_ether_addr(dev->dev_addr)) {
 		if (is_valid_ether_addr(promaddr)) {
 			if (pcnet32_debug & NETIF_MSG_PROBE) {
 				printk(" warning: CSR address invalid,\n");
@@ -1840,8 +1840,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
 	lp->mii = mii;
 	lp->chip_version = chip_version;
 	lp->msg_enable = pcnet32_debug;
-	if ((cards_found >= MAX_UNITS)
-	    || (options[cards_found] >= sizeof(options_mapping)))
+	if ((cards_found >= MAX_UNITS) ||
+	    (options[cards_found] >= sizeof(options_mapping)))
 		lp->options = PCNET32_PORT_ASEL;
 	else
 		lp->options = options_mapping[options[cards_found]];
@@ -1866,8 +1866,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
 		goto err_free_ring;
 	}
 	/* detect special T1/E1 WAN card by checking for MAC address */
-	if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0
-	    && dev->dev_addr[2] == 0x75)
+	if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 &&
+	    dev->dev_addr[2] == 0x75)
 		lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
 
 	lp->init_block->mode = cpu_to_le16(0x0003);	/* Disable Rx and Tx. */
@@ -2095,7 +2095,7 @@ static int pcnet32_open(struct net_device *dev)
 	int rc;
 	unsigned long flags;
 
-	if (request_irq(dev->irq, &pcnet32_interrupt,
+	if (request_irq(dev->irq, pcnet32_interrupt,
 			lp->shared_irq ? IRQF_SHARED : 0, dev->name,
 			(void *)dev)) {
 		return -EAGAIN;
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index f81e53222230..f63c96a4ecb4 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -16,6 +16,7 @@
 
 #include <linux/module.h>
 #include <linux/phy.h>
+#include <linux/brcmphy.h>
 
 #define PHY_ID_BCM50610		0x0143bd60
 #define PHY_ID_BCM50610M	0x0143bd70
@@ -24,6 +25,9 @@
 #define BRCM_PHY_MODEL(phydev) \
 	((phydev)->drv->phy_id & (phydev)->drv->phy_id_mask)
 
+#define BRCM_PHY_REV(phydev) \
+	((phydev)->drv->phy_id & ~((phydev)->drv->phy_id_mask))
+
 
 #define MII_BCM54XX_ECR		0x10	/* BCM54xx extended control register */
 #define MII_BCM54XX_ECR_IM	0x1000	/* Interrupt mask */
@@ -94,22 +98,35 @@
 #define BCM_LED_SRC_OFF		0xe	/* Tied high */
 #define BCM_LED_SRC_ON		0xf	/* Tied low */
 
+
 /*
  * BCM5482: Shadow registers
  * Shadow values go into bits [14:10] of register 0x1c to select a shadow
  * register to access.
  */
+/* 00101: Spare Control Register 3 */
+#define BCM54XX_SHD_SCR3		0x05
+#define  BCM54XX_SHD_SCR3_DEF_CLK125	0x0001
+#define  BCM54XX_SHD_SCR3_DLLAPD_DIS	0x0002
+#define  BCM54XX_SHD_SCR3_TRDDAPD	0x0004
+
+/* 01010: Auto Power-Down */
+#define BCM54XX_SHD_APD			0x0a
+#define  BCM54XX_SHD_APD_EN		0x0020
+
 #define BCM5482_SHD_LEDS1	0x0d	/* 01101: LED Selector 1 */
 					/* LED3 / ~LINKSPD[2] selector */
 #define BCM5482_SHD_LEDS1_LED3(src)	((src & 0xf) << 4)
 					/* LED1 / ~LINKSPD[1] selector */
 #define BCM5482_SHD_LEDS1_LED1(src)	((src & 0xf) << 0)
+#define BCM54XX_SHD_RGMII_MODE	0x0b	/* 01011: RGMII Mode Selector */
 #define BCM5482_SHD_SSD		0x14	/* 10100: Secondary SerDes control */
 #define BCM5482_SHD_SSD_LEDM	0x0008	/* SSD LED Mode enable */
 #define BCM5482_SHD_SSD_EN	0x0001	/* SSD enable */
 #define BCM5482_SHD_MODE	0x1f	/* 11111: Mode Control Register */
 #define BCM5482_SHD_MODE_1000BX	0x0001	/* Enable 1000BASE-X registers */
 
+
 /*
  * EXPANSION SHADOW ACCESS REGISTERS.  (PHY REG 0x15, 0x16, and 0x17)
  */
@@ -138,16 +155,6 @@
 #define BCM5482_SSD_SGMII_SLAVE_EN	0x0002	/* Slave mode enable */
 #define BCM5482_SSD_SGMII_SLAVE_AD	0x0001	/* Slave auto-detection */
 
-/*
- * Device flags for PHYs that can be configured for different operating
- * modes.
- */
-#define PHY_BCM_FLAGS_VALID		0x80000000
-#define PHY_BCM_FLAGS_INTF_XAUI		0x00000020
-#define PHY_BCM_FLAGS_INTF_SGMII	0x00000010
-#define PHY_BCM_FLAGS_MODE_1000BX	0x00000002
-#define PHY_BCM_FLAGS_MODE_COPPER	0x00000001
-
 
 /*****************************************************************************/
 /* Fast Ethernet Transceiver definitions. */
@@ -237,53 +244,145 @@ static int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val)
 	return phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum | val);
 }
 
+/* Needs SMDSP clock enabled via bcm54xx_phydsp_config() */
 static int bcm50610_a0_workaround(struct phy_device *phydev)
 {
 	int err;
 
-	err = bcm54xx_auxctl_write(phydev,
-				   MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
-				   MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA |
-				   MII_BCM54XX_AUXCTL_ACTL_TX_6DB);
-	if (err < 0)
-		return err;
-
-	err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP08,
-				MII_BCM54XX_EXP_EXP08_RJCT_2MHZ	|
-				MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE);
-	if (err < 0)
-		goto error;
-
 	err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_AADJ1CH0,
 				MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN |
 				MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF);
 	if (err < 0)
-		goto error;
+		return err;
 
 	err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_AADJ1CH3,
 					MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ);
 	if (err < 0)
-		goto error;
+		return err;
 
 	err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP75,
 				MII_BCM54XX_EXP_EXP75_VDACCTRL);
 	if (err < 0)
-		goto error;
+		return err;
 
 	err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP96,
 				MII_BCM54XX_EXP_EXP96_MYST);
 	if (err < 0)
-		goto error;
+		return err;
 
 	err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP97,
 				MII_BCM54XX_EXP_EXP97_MYST);
 
+	return err;
+}
+
+static int bcm54xx_phydsp_config(struct phy_device *phydev)
+{
+	int err, err2;
+
+	/* Enable the SMDSP clock */
+	err = bcm54xx_auxctl_write(phydev,
+				   MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
+				   MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA |
+				   MII_BCM54XX_AUXCTL_ACTL_TX_6DB);
+	if (err < 0)
+		return err;
+
+	if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
+	    BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) {
+		/* Clear bit 9 to fix a phy interop issue. */
+		err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP08,
+					MII_BCM54XX_EXP_EXP08_RJCT_2MHZ);
+		if (err < 0)
+			goto error;
+
+		if (phydev->drv->phy_id == PHY_ID_BCM50610) {
+			err = bcm50610_a0_workaround(phydev);
+			if (err < 0)
+				goto error;
+		}
+	}
+
+	if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM57780) {
+		int val;
+
+		val = bcm54xx_exp_read(phydev, MII_BCM54XX_EXP_EXP75);
+		if (val < 0)
+			goto error;
+
+		val |= MII_BCM54XX_EXP_EXP75_CM_OSC;
+		err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP75, val);
+	}
+
 error:
-	bcm54xx_auxctl_write(phydev,
-			     MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
-			     MII_BCM54XX_AUXCTL_ACTL_TX_6DB);
+	/* Disable the SMDSP clock */
+	err2 = bcm54xx_auxctl_write(phydev,
+				    MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
+				    MII_BCM54XX_AUXCTL_ACTL_TX_6DB);
 
-	return err;
+	/* Return the first error reported. */
+	return err ? err : err2;
+}
+
+static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
+{
+	u32 val, orig;
+	bool clk125en = true;
+
+	/* Abort if we are using an untested phy. */
+	if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 ||
+	    BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 ||
+	    BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M)
+		return;
+
+	val = bcm54xx_shadow_read(phydev, BCM54XX_SHD_SCR3);
+	if (val < 0)
+		return;
+
+	orig = val;
+
+	if ((BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
+	     BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) &&
+	    BRCM_PHY_REV(phydev) >= 0x3) {
+		/*
+		 * Here, bit 0 _disables_ CLK125 when set.
+		 * This bit is set by default.
+		 */
+		clk125en = false;
+	} else {
+		if (phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED) {
+			/* Here, bit 0 _enables_ CLK125 when set */
+			val &= ~BCM54XX_SHD_SCR3_DEF_CLK125;
+			clk125en = false;
+		}
+	}
+
+	if (clk125en == false ||
+	    (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
+		val &= ~BCM54XX_SHD_SCR3_DLLAPD_DIS;
+	else
+		val |= BCM54XX_SHD_SCR3_DLLAPD_DIS;
+
+	if (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY)
+		val |= BCM54XX_SHD_SCR3_TRDDAPD;
+
+	if (orig != val)
+		bcm54xx_shadow_write(phydev, BCM54XX_SHD_SCR3, val);
+
+	val = bcm54xx_shadow_read(phydev, BCM54XX_SHD_APD);
+	if (val < 0)
+		return;
+
+	orig = val;
+
+	if (clk125en == false ||
+	    (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
+		val |= BCM54XX_SHD_APD_EN;
+	else
+		val &= ~BCM54XX_SHD_APD_EN;
+
+	if (orig != val)
+		bcm54xx_shadow_write(phydev, BCM54XX_SHD_APD, val);
 }
 
 static int bcm54xx_config_init(struct phy_device *phydev)
@@ -308,38 +407,17 @@ static int bcm54xx_config_init(struct phy_device *phydev)
 	if (err < 0)
 		return err;
 
-	if (phydev->drv->phy_id == PHY_ID_BCM50610) {
-		err = bcm50610_a0_workaround(phydev);
-		if (err < 0)
-			return err;
-	}
-
-	if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM57780) {
-		int err2;
-
-		err = bcm54xx_auxctl_write(phydev,
-					   MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
-					   MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA |
-					   MII_BCM54XX_AUXCTL_ACTL_TX_6DB);
-		if (err < 0)
-			return err;
-
-		reg = bcm54xx_exp_read(phydev, MII_BCM54XX_EXP_EXP75);
-		if (reg < 0)
-			goto error;
+	if ((BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
+	     BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) &&
+	    (phydev->dev_flags & PHY_BRCM_CLEAR_RGMII_MODE))
+		bcm54xx_shadow_write(phydev, BCM54XX_SHD_RGMII_MODE, 0);
 
-		reg |= MII_BCM54XX_EXP_EXP75_CM_OSC;
-		err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP75, reg);
+	if ((phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED) ||
+	    (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY) ||
+	    (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
+		bcm54xx_adjust_rxrefclk(phydev);
 
-error:
-		err2 = bcm54xx_auxctl_write(phydev,
-					    MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
-					    MII_BCM54XX_AUXCTL_ACTL_TX_6DB);
-		if (err)
-			return err;
-		if (err2)
-			return err2;
-	}
+	bcm54xx_phydsp_config(phydev);
 
 	return 0;
 }
@@ -564,9 +642,11 @@ static int brcm_fet_config_init(struct phy_device *phydev)
 	if (err < 0)
 		goto done;
 
-	/* Enable auto power down */
-	err = brcm_phy_setbits(phydev, MII_BRCM_FET_SHDW_AUXSTAT2,
-				       MII_BRCM_FET_SHDW_AS2_APDE);
+	if (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE) {
+		/* Enable auto power down */
+		err = brcm_phy_setbits(phydev, MII_BRCM_FET_SHDW_AUXSTAT2,
+					       MII_BRCM_FET_SHDW_AS2_APDE);
+	}
 
 done:
 	/* Disable shadow register access */
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 6b71b0034060..b0e9f9c51721 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -254,12 +254,12 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
 	if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
 		return -EINVAL;
 
-	if (cmd->autoneg == AUTONEG_DISABLE
-			&& ((cmd->speed != SPEED_1000
-					&& cmd->speed != SPEED_100
-					&& cmd->speed != SPEED_10)
-				|| (cmd->duplex != DUPLEX_HALF
-					&& cmd->duplex != DUPLEX_FULL)))
+	if (cmd->autoneg == AUTONEG_DISABLE &&
+	    ((cmd->speed != SPEED_1000 &&
+	      cmd->speed != SPEED_100 &&
+	      cmd->speed != SPEED_10) ||
+	     (cmd->duplex != DUPLEX_HALF &&
+	      cmd->duplex != DUPLEX_FULL)))
 		return -EINVAL;
 
 	phydev->autoneg = cmd->autoneg;
@@ -353,9 +353,9 @@ int phy_mii_ioctl(struct phy_device *phydev,
 
 		phy_write(phydev, mii_data->reg_num, val);
 		
-		if (mii_data->reg_num == MII_BMCR 
-				&& val & BMCR_RESET
-				&& phydev->drv->config_init) {
+		if (mii_data->reg_num == MII_BMCR &&
+		    val & BMCR_RESET &&
+		    phydev->drv->config_init) {
 			phy_scan_fixups(phydev);
 			phydev->drv->config_init(phydev);
 		}
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index 00487f569cfd..3327e9fc7b51 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -372,8 +372,8 @@ plip_bh(struct work_struct *work)
 
 	nl->is_deferred = 0;
 	f = connection_state_table[nl->connection];
-	if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK
-	    && (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
+	if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK &&
+	    (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
 		nl->is_deferred = 1;
 		schedule_delayed_work(&nl->deferred, 1);
 	}
@@ -416,9 +416,8 @@ plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
 
 		if (error != ERROR) { /* Timeout */
 			nl->timeout_count++;
-			if ((error == HS_TIMEOUT
-			     && nl->timeout_count <= 10)
-			    || nl->timeout_count <= 3) {
+			if ((error == HS_TIMEOUT && nl->timeout_count <= 10) ||
+			    nl->timeout_count <= 3) {
 				spin_unlock_irq(&nl->lock);
 				/* Try again later */
 				return TIMEOUT;
@@ -624,8 +623,8 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl,
 		if (plip_receive(nibble_timeout, dev,
 				 &rcv->nibble, &rcv->length.b.msb))
 			return TIMEOUT;
-		if (rcv->length.h > dev->mtu + dev->hard_header_len
-		    || rcv->length.h < 8) {
+		if (rcv->length.h > dev->mtu + dev->hard_header_len ||
+		    rcv->length.h < 8) {
 			printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
 			return ERROR;
 		}
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 6de8399d6dd9..6a375ea4947d 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -36,7 +36,7 @@
 
 #define PPP_VERSION	"2.4.2"
 
-#define OBUFSIZE	256
+#define OBUFSIZE	4096
 
 /* Structure for storing local state. */
 struct asyncppp {
@@ -337,10 +337,7 @@ ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
 	return 0;
 }
 
-/*
- * This can now be called from hard interrupt level as well
- * as soft interrupt level or mainline.
- */
+/* May sleep, don't call from interrupt level or with interrupts disabled */
 static void
 ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
 		  char *cflags, int count)
@@ -561,8 +558,8 @@ ppp_async_encode(struct asyncppp *ap)
 		 * Start of a new packet - insert the leading FLAG
 		 * character if necessary.
 		 */
-		if (islcp || flag_time == 0
-		    || time_after_eq(jiffies, ap->last_xmit + flag_time))
+		if (islcp || flag_time == 0 ||
+		    time_after_eq(jiffies, ap->last_xmit + flag_time))
 			*buf++ = PPP_FLAG;
 		ap->last_xmit = jiffies;
 		fcs = PPP_INITFCS;
@@ -699,8 +696,8 @@ ppp_async_push(struct asyncppp *ap)
 		 */
 		clear_bit(XMIT_BUSY, &ap->xmit_flags);
 		/* any more work to do? if not, exit the loop */
-		if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags)
-		      || (!tty_stuffed && ap->tpkt)))
+		if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) ||
+		      (!tty_stuffed && ap->tpkt)))
 			break;
 		/* more work to do, see if we can do it now */
 		if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags))
@@ -757,8 +754,8 @@ scan_ordinary(struct asyncppp *ap, const unsigned char *buf, int count)
 
 	for (i = 0; i < count; ++i) {
 		c = buf[i];
-		if (c == PPP_ESCAPE || c == PPP_FLAG
-		    || (c < 0x20 && (ap->raccm & (1 << c)) != 0))
+		if (c == PPP_ESCAPE || c == PPP_FLAG ||
+		    (c < 0x20 && (ap->raccm & (1 << c)) != 0))
 			break;
 	}
 	return i;
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c
index 034c1c650bcb..695bc83e0cfd 100644
--- a/drivers/net/ppp_deflate.c
+++ b/drivers/net/ppp_deflate.c
@@ -111,11 +111,11 @@ static void *z_comp_alloc(unsigned char *options, int opt_len)
 	struct ppp_deflate_state *state;
 	int w_size;
 
-	if (opt_len != CILEN_DEFLATE
-	    || (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT)
-	    || options[1] != CILEN_DEFLATE
-	    || DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL
-	    || options[3] != DEFLATE_CHK_SEQUENCE)
+	if (opt_len != CILEN_DEFLATE ||
+	    (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) ||
+	    options[1] != CILEN_DEFLATE ||
+	    DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL ||
+	    options[3] != DEFLATE_CHK_SEQUENCE)
 		return NULL;
 	w_size = DEFLATE_SIZE(options[2]);
 	if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE)
@@ -163,12 +163,12 @@ static int z_comp_init(void *arg, unsigned char *options, int opt_len,
 {
 	struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
 
-	if (opt_len < CILEN_DEFLATE
-	    || (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT)
-	    || options[1] != CILEN_DEFLATE
-	    || DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL
-	    || DEFLATE_SIZE(options[2]) != state->w_size
-	    || options[3] != DEFLATE_CHK_SEQUENCE)
+	if (opt_len < CILEN_DEFLATE ||
+	    (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) ||
+	    options[1] != CILEN_DEFLATE ||
+	    DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL ||
+	    DEFLATE_SIZE(options[2]) != state->w_size ||
+	    options[3] != DEFLATE_CHK_SEQUENCE)
 		return 0;
 
 	state->seqno = 0;
@@ -330,11 +330,11 @@ static void *z_decomp_alloc(unsigned char *options, int opt_len)
 	struct ppp_deflate_state *state;
 	int w_size;
 
-	if (opt_len != CILEN_DEFLATE
-	    || (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT)
-	    || options[1] != CILEN_DEFLATE
-	    || DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL
-	    || options[3] != DEFLATE_CHK_SEQUENCE)
+	if (opt_len != CILEN_DEFLATE ||
+	    (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) ||
+	    options[1] != CILEN_DEFLATE ||
+	    DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL ||
+	    options[3] != DEFLATE_CHK_SEQUENCE)
 		return NULL;
 	w_size = DEFLATE_SIZE(options[2]);
 	if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE)
@@ -381,12 +381,12 @@ static int z_decomp_init(void *arg, unsigned char *options, int opt_len,
 {
 	struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
 
-	if (opt_len < CILEN_DEFLATE
-	    || (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT)
-	    || options[1] != CILEN_DEFLATE
-	    || DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL
-	    || DEFLATE_SIZE(options[2]) != state->w_size
-	    || options[3] != DEFLATE_CHK_SEQUENCE)
+	if (opt_len < CILEN_DEFLATE ||
+	    (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) ||
+	    options[1] != CILEN_DEFLATE ||
+	    DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL ||
+	    DEFLATE_SIZE(options[2]) != state->w_size ||
+	    options[3] != DEFLATE_CHK_SEQUENCE)
 		return 0;
 
 	state->seqno = 0;
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 965adb6174c3..2282e729edbe 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -184,7 +184,7 @@ static atomic_t ppp_unit_count = ATOMIC_INIT(0);
 static atomic_t channel_count = ATOMIC_INIT(0);
 
 /* per-net private data for this module */
-static int ppp_net_id;
+static int ppp_net_id __read_mostly;
 struct ppp_net {
 	/* units to ppp mapping */
 	struct idr units_idr;
@@ -425,8 +425,8 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
 			 * network traffic (demand mode).
 			 */
 			struct ppp *ppp = PF_TO_PPP(pf);
-			if (ppp->n_channels == 0
-			    && (ppp->flags & SC_LOOP_TRAFFIC) == 0)
+			if (ppp->n_channels == 0 &&
+			    (ppp->flags & SC_LOOP_TRAFFIC) == 0)
 				break;
 		}
 		ret = -EAGAIN;
@@ -511,8 +511,8 @@ static unsigned int ppp_poll(struct file *file, poll_table *wait)
 	else if (pf->kind == INTERFACE) {
 		/* see comment in ppp_read */
 		struct ppp *ppp = PF_TO_PPP(pf);
-		if (ppp->n_channels == 0
-		    && (ppp->flags & SC_LOOP_TRAFFIC) == 0)
+		if (ppp->n_channels == 0 &&
+		    (ppp->flags & SC_LOOP_TRAFFIC) == 0)
 			mask |= POLLIN | POLLRDNORM;
 	}
 
@@ -864,12 +864,7 @@ static const struct file_operations ppp_device_fops = {
 
 static __net_init int ppp_init_net(struct net *net)
 {
-	struct ppp_net *pn;
-	int err;
-
-	pn = kzalloc(sizeof(*pn), GFP_KERNEL);
-	if (!pn)
-		return -ENOMEM;
+	struct ppp_net *pn = net_generic(net, ppp_net_id);
 
 	idr_init(&pn->units_idr);
 	mutex_init(&pn->all_ppp_mutex);
@@ -879,32 +874,21 @@ static __net_init int ppp_init_net(struct net *net)
 
 	spin_lock_init(&pn->all_channels_lock);
 
-	err = net_assign_generic(net, ppp_net_id, pn);
-	if (err) {
-		kfree(pn);
-		return err;
-	}
-
 	return 0;
 }
 
 static __net_exit void ppp_exit_net(struct net *net)
 {
-	struct ppp_net *pn;
+	struct ppp_net *pn = net_generic(net, ppp_net_id);
 
-	pn = net_generic(net, ppp_net_id);
 	idr_destroy(&pn->units_idr);
-	/*
-	 * if someone has cached our net then
-	 * further net_generic call will return NULL
-	 */
-	net_assign_generic(net, ppp_net_id, NULL);
-	kfree(pn);
 }
 
 static struct pernet_operations ppp_net_ops = {
 	.init = ppp_init_net,
 	.exit = ppp_exit_net,
+	.id   = &ppp_net_id,
+	.size = sizeof(struct ppp_net),
 };
 
 #define PPP_MAJOR	108
@@ -917,7 +901,7 @@ static int __init ppp_init(void)
 
 	printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n");
 
-	err = register_pernet_gen_device(&ppp_net_id, &ppp_net_ops);
+	err = register_pernet_device(&ppp_net_ops);
 	if (err) {
 		printk(KERN_ERR "failed to register PPP pernet device (%d)\n", err);
 		goto out;
@@ -943,7 +927,7 @@ static int __init ppp_init(void)
 out_chrdev:
 	unregister_chrdev(PPP_MAJOR, "ppp");
 out_net:
-	unregister_pernet_gen_device(ppp_net_id, &ppp_net_ops);
+	unregister_pernet_device(&ppp_net_ops);
 out:
 	return err;
 }
@@ -1073,8 +1057,8 @@ ppp_xmit_process(struct ppp *ppp)
 	ppp_xmit_lock(ppp);
 	if (!ppp->closing) {
 		ppp_push(ppp);
-		while (!ppp->xmit_pending
-		       && (skb = skb_dequeue(&ppp->file.xq)))
+		while (!ppp->xmit_pending &&
+		       (skb = skb_dequeue(&ppp->file.xq)))
 			ppp_send_frame(ppp, skb);
 		/* If there's no work left to do, tell the core net
 		   code that we can accept some more. */
@@ -1153,18 +1137,18 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
 		/* the filter instructions are constructed assuming
 		   a four-byte PPP header on each packet */
 		*skb_push(skb, 2) = 1;
-		if (ppp->pass_filter
-		    && sk_run_filter(skb, ppp->pass_filter,
-				     ppp->pass_len) == 0) {
+		if (ppp->pass_filter &&
+		    sk_run_filter(skb, ppp->pass_filter,
+				  ppp->pass_len) == 0) {
 			if (ppp->debug & 1)
 				printk(KERN_DEBUG "PPP: outbound frame not passed\n");
 			kfree_skb(skb);
 			return;
 		}
 		/* if this packet passes the active filter, record the time */
-		if (!(ppp->active_filter
-		      && sk_run_filter(skb, ppp->active_filter,
-				       ppp->active_len) == 0))
+		if (!(ppp->active_filter &&
+		      sk_run_filter(skb, ppp->active_filter,
+				    ppp->active_len) == 0))
 			ppp->last_xmit = jiffies;
 		skb_pull(skb, 2);
 #else
@@ -1218,8 +1202,8 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
 	}
 
 	/* try to do packet compression */
-	if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state
-	    && proto != PPP_LCP && proto != PPP_CCP) {
+	if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state &&
+	    proto != PPP_LCP && proto != PPP_CCP) {
 		if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
 			if (net_ratelimit())
 				printk(KERN_ERR "ppp: compression required but down - pkt dropped.\n");
@@ -1593,8 +1577,8 @@ ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
 		/* put it on the channel queue */
 		skb_queue_tail(&pch->file.rq, skb);
 		/* drop old frames if queue too long */
-		while (pch->file.rq.qlen > PPP_MAX_RQLEN
-		       && (skb = skb_dequeue(&pch->file.rq)))
+		while (pch->file.rq.qlen > PPP_MAX_RQLEN &&
+		       (skb = skb_dequeue(&pch->file.rq)))
 			kfree_skb(skb);
 		wake_up_interruptible(&pch->file.rwait);
 	} else {
@@ -1670,8 +1654,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
 	 * Note that some decompressors need to see uncompressed frames
 	 * that come in as well as compressed frames.
 	 */
-	if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)
-	    && (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0)
+	if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN) &&
+	    (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0)
 		skb = ppp_decompress_frame(ppp, skb);
 
 	if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR)
@@ -1742,8 +1726,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
 		/* control or unknown frame - pass it to pppd */
 		skb_queue_tail(&ppp->file.rq, skb);
 		/* limit queue length by dropping old frames */
-		while (ppp->file.rq.qlen > PPP_MAX_RQLEN
-		       && (skb = skb_dequeue(&ppp->file.rq)))
+		while (ppp->file.rq.qlen > PPP_MAX_RQLEN &&
+		       (skb = skb_dequeue(&ppp->file.rq)))
 			kfree_skb(skb);
 		/* wake up any process polling or blocking on read */
 		wake_up_interruptible(&ppp->file.rwait);
@@ -1761,26 +1745,26 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
 				goto err;
 
 			*skb_push(skb, 2) = 0;
-			if (ppp->pass_filter
-			    && sk_run_filter(skb, ppp->pass_filter,
-					     ppp->pass_len) == 0) {
+			if (ppp->pass_filter &&
+			    sk_run_filter(skb, ppp->pass_filter,
+					  ppp->pass_len) == 0) {
 				if (ppp->debug & 1)
 					printk(KERN_DEBUG "PPP: inbound frame "
 					       "not passed\n");
 				kfree_skb(skb);
 				return;
 			}
-			if (!(ppp->active_filter
-			      && sk_run_filter(skb, ppp->active_filter,
-					       ppp->active_len) == 0))
+			if (!(ppp->active_filter &&
+			      sk_run_filter(skb, ppp->active_filter,
+					    ppp->active_len) == 0))
 				ppp->last_recv = jiffies;
 			__skb_pull(skb, 2);
 		} else
 #endif /* CONFIG_PPP_FILTER */
 			ppp->last_recv = jiffies;
 
-		if ((ppp->dev->flags & IFF_UP) == 0
-		    || ppp->npmode[npi] != NPMODE_PASS) {
+		if ((ppp->dev->flags & IFF_UP) == 0 ||
+		    ppp->npmode[npi] != NPMODE_PASS) {
 			kfree_skb(skb);
 		} else {
 			/* chop off protocol */
@@ -2244,13 +2228,13 @@ ppp_set_compress(struct ppp *ppp, unsigned long arg)
 	unsigned char ccp_option[CCP_MAX_OPTION_LENGTH];
 
 	err = -EFAULT;
-	if (copy_from_user(&data, (void __user *) arg, sizeof(data))
-	    || (data.length <= CCP_MAX_OPTION_LENGTH
-		&& copy_from_user(ccp_option, (void __user *) data.ptr, data.length)))
+	if (copy_from_user(&data, (void __user *) arg, sizeof(data)) ||
+	    (data.length <= CCP_MAX_OPTION_LENGTH &&
+	     copy_from_user(ccp_option, (void __user *) data.ptr, data.length)))
 		goto out;
 	err = -EINVAL;
-	if (data.length > CCP_MAX_OPTION_LENGTH
-	    || ccp_option[1] < 2 || ccp_option[1] > data.length)
+	if (data.length > CCP_MAX_OPTION_LENGTH ||
+	    ccp_option[1] < 2 || ccp_option[1] > data.length)
 		goto out;
 
 	cp = try_then_request_module(
@@ -2835,7 +2819,7 @@ static void __exit ppp_cleanup(void)
 	unregister_chrdev(PPP_MAJOR, "ppp");
 	device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
 	class_destroy(ppp_class);
-	unregister_pernet_gen_device(ppp_net_id, &ppp_net_ops);
+	unregister_pernet_device(&ppp_net_ops);
 }
 
 /*
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index 88f03c9e9403..6d1a1b80cc3e 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -195,8 +195,8 @@ static void *mppe_alloc(unsigned char *options, int optlen)
 	struct ppp_mppe_state *state;
 	unsigned int digestsize;
 
-	if (optlen != CILEN_MPPE + sizeof(state->master_key)
-	    || options[0] != CI_MPPE || options[1] != CILEN_MPPE)
+	if (optlen != CILEN_MPPE + sizeof(state->master_key) ||
+	    options[0] != CI_MPPE || options[1] != CILEN_MPPE)
 		goto out;
 
 	state = kzalloc(sizeof(*state), GFP_KERNEL);
@@ -276,8 +276,8 @@ mppe_init(void *arg, unsigned char *options, int optlen, int unit, int debug,
 	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
 	unsigned char mppe_opts;
 
-	if (optlen != CILEN_MPPE
-	    || options[0] != CI_MPPE || options[1] != CILEN_MPPE)
+	if (optlen != CILEN_MPPE ||
+	    options[0] != CI_MPPE || options[1] != CILEN_MPPE)
 		return 0;
 
 	MPPE_CI_TO_OPTS(&options[2], mppe_opts);
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index d2fa2db13586..3a13cecae3e2 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -378,10 +378,7 @@ ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
 	return 0;
 }
 
-/*
- * This can now be called from hard interrupt level as well
- * as soft interrupt level or mainline.
- */
+/* May sleep, don't call from interrupt level or with interrupts disabled */
 static void
 ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
 		  char *cflags, int count)
@@ -665,8 +662,8 @@ ppp_sync_push(struct syncppp *ap)
 		}
 		/* haven't made any progress */
 		spin_unlock_bh(&ap->xmit_lock);
-		if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags)
-		      || (!tty_stuffed && ap->tpkt)))
+		if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) ||
+		      (!tty_stuffed && ap->tpkt)))
 			break;
 		if (!spin_trylock_bh(&ap->xmit_lock))
 			break;
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 2559991eea6a..cdd11ba100ea 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -97,7 +97,7 @@ static const struct proto_ops pppoe_ops;
 static struct ppp_channel_ops pppoe_chan_ops;
 
 /* per-net private data for this module */
-static int pppoe_net_id;
+static int pppoe_net_id __read_mostly;
 struct pppoe_net {
 	/*
 	 * we could use _single_ hash table for all
@@ -250,20 +250,19 @@ static inline struct pppox_sock *get_item_by_addr(struct net *net,
 {
 	struct net_device *dev;
 	struct pppoe_net *pn;
-	struct pppox_sock *pppox_sock;
+	struct pppox_sock *pppox_sock = NULL;
 
 	int ifindex;
 
-	dev = dev_get_by_name(net, sp->sa_addr.pppoe.dev);
-	if (!dev)
-		return NULL;
-
-	ifindex = dev->ifindex;
-	pn = net_generic(net, pppoe_net_id);
-	pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid,
+	rcu_read_lock();
+	dev = dev_get_by_name_rcu(net, sp->sa_addr.pppoe.dev);
+	if (dev) {
+		ifindex = dev->ifindex;
+		pn = net_generic(net, pppoe_net_id);
+		pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid,
 				sp->sa_addr.pppoe.remote, ifindex);
-	dev_put(dev);
-
+	}
+	rcu_read_unlock();
 	return pppox_sock;
 }
 
@@ -324,8 +323,8 @@ static void pppoe_flush_dev(struct net_device *dev)
 			write_unlock_bh(&pn->hash_lock);
 			lock_sock(sk);
 
-			if (po->pppoe_dev == dev
-			    && sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
+			if (po->pppoe_dev == dev &&
+			    sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
 				pppox_unbind_sock(sk);
 				sk->sk_state = PPPOX_ZOMBIE;
 				sk->sk_state_change(sk);
@@ -1140,59 +1139,37 @@ static struct pppox_proto pppoe_proto = {
 
 static __net_init int pppoe_init_net(struct net *net)
 {
-	struct pppoe_net *pn;
+	struct pppoe_net *pn = pppoe_pernet(net);
 	struct proc_dir_entry *pde;
-	int err;
-
-	pn = kzalloc(sizeof(*pn), GFP_KERNEL);
-	if (!pn)
-		return -ENOMEM;
 
 	rwlock_init(&pn->hash_lock);
 
-	err = net_assign_generic(net, pppoe_net_id, pn);
-	if (err)
-		goto out;
-
 	pde = proc_net_fops_create(net, "pppoe", S_IRUGO, &pppoe_seq_fops);
 #ifdef CONFIG_PROC_FS
-	if (!pde) {
-		err = -ENOMEM;
-		goto out;
-	}
+	if (!pde)
+		return -ENOMEM;
 #endif
 
 	return 0;
-
-out:
-	kfree(pn);
-	return err;
 }
 
 static __net_exit void pppoe_exit_net(struct net *net)
 {
-	struct pppoe_net *pn;
-
 	proc_net_remove(net, "pppoe");
-	pn = net_generic(net, pppoe_net_id);
-	/*
-	 * if someone has cached our net then
-	 * further net_generic call will return NULL
-	 */
-	net_assign_generic(net, pppoe_net_id, NULL);
-	kfree(pn);
 }
 
 static struct pernet_operations pppoe_net_ops = {
 	.init = pppoe_init_net,
 	.exit = pppoe_exit_net,
+	.id   = &pppoe_net_id,
+	.size = sizeof(struct pppoe_net),
 };
 
 static int __init pppoe_init(void)
 {
 	int err;
 
-	err = register_pernet_gen_device(&pppoe_net_id, &pppoe_net_ops);
+	err = register_pernet_device(&pppoe_net_ops);
 	if (err)
 		goto out;
 
@@ -1213,7 +1190,7 @@ static int __init pppoe_init(void)
 out_unregister_pppoe_proto:
 	proto_unregister(&pppoe_sk_proto);
 out_unregister_net_ops:
-	unregister_pernet_gen_device(pppoe_net_id, &pppoe_net_ops);
+	unregister_pernet_device(&pppoe_net_ops);
 out:
 	return err;
 }
@@ -1225,7 +1202,7 @@ static void __exit pppoe_exit(void)
 	dev_remove_pack(&pppoes_ptype);
 	unregister_pppox_proto(PX_PROTO_OE);
 	proto_unregister(&pppoe_sk_proto);
-	unregister_pernet_gen_device(pppoe_net_id, &pppoe_net_ops);
+	unregister_pernet_device(&pppoe_net_ops);
 }
 
 module_init(pppoe_init);
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 5910df60c93e..9fbb2eba9a06 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -232,7 +232,7 @@ static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL };
 static const struct proto_ops pppol2tp_ops;
 
 /* per-net private data for this module */
-static int pppol2tp_net_id;
+static int pppol2tp_net_id __read_mostly;
 struct pppol2tp_net {
 	struct list_head pppol2tp_tunnel_list;
 	rwlock_t pppol2tp_tunnel_list_lock;
@@ -516,7 +516,7 @@ static inline int pppol2tp_verify_udp_checksum(struct sock *sk,
 		return 0;
 
 	inet = inet_sk(sk);
-	psum = csum_tcpudp_nofold(inet->saddr, inet->daddr, ulen,
+	psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, ulen,
 				  IPPROTO_UDP, 0);
 
 	if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
@@ -949,8 +949,8 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
 	inet = inet_sk(sk_tun);
 	udp_len = hdr_len + sizeof(ppph) + total_len;
 	uh = (struct udphdr *) skb->data;
-	uh->source = inet->sport;
-	uh->dest = inet->dport;
+	uh->source = inet->inet_sport;
+	uh->dest = inet->inet_dport;
 	uh->len = htons(udp_len);
 	uh->check = 0;
 	skb_put(skb, sizeof(struct udphdr));
@@ -978,7 +978,8 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
 	else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
 		skb->ip_summed = CHECKSUM_COMPLETE;
 		csum = skb_checksum(skb, 0, udp_len, 0);
-		uh->check = csum_tcpudp_magic(inet->saddr, inet->daddr,
+		uh->check = csum_tcpudp_magic(inet->inet_saddr,
+					      inet->inet_daddr,
 					      udp_len, IPPROTO_UDP, csum);
 		if (uh->check == 0)
 			uh->check = CSUM_MANGLED_0;
@@ -986,7 +987,8 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
 		skb->ip_summed = CHECKSUM_PARTIAL;
 		skb->csum_start = skb_transport_header(skb) - skb->head;
 		skb->csum_offset = offsetof(struct udphdr, check);
-		uh->check = ~csum_tcpudp_magic(inet->saddr, inet->daddr,
+		uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
+					       inet->inet_daddr,
 					       udp_len, IPPROTO_UDP, 0);
 	}
 
@@ -1136,8 +1138,8 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
 	__skb_push(skb, sizeof(*uh));
 	skb_reset_transport_header(skb);
 	uh = udp_hdr(skb);
-	uh->source = inet->sport;
-	uh->dest = inet->dport;
+	uh->source = inet->inet_sport;
+	uh->dest = inet->inet_dport;
 	uh->len = htons(udp_len);
 	uh->check = 0;
 
@@ -1181,7 +1183,8 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
 	else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
 		skb->ip_summed = CHECKSUM_COMPLETE;
 		csum = skb_checksum(skb, 0, udp_len, 0);
-		uh->check = csum_tcpudp_magic(inet->saddr, inet->daddr,
+		uh->check = csum_tcpudp_magic(inet->inet_saddr,
+					      inet->inet_daddr,
 					      udp_len, IPPROTO_UDP, csum);
 		if (uh->check == 0)
 			uh->check = CSUM_MANGLED_0;
@@ -1189,7 +1192,8 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
 		skb->ip_summed = CHECKSUM_PARTIAL;
 		skb->csum_start = skb_transport_header(skb) - skb->head;
 		skb->csum_offset = offsetof(struct udphdr, check);
-		uh->check = ~csum_tcpudp_magic(inet->saddr, inet->daddr,
+		uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
+					       inet->inet_daddr,
 					       udp_len, IPPROTO_UDP, 0);
 	}
 
@@ -1533,7 +1537,7 @@ static struct sock *pppol2tp_prepare_tunnel_socket(struct net *net,
 	 * if the tunnel socket goes away.
 	 */
 	tunnel->old_sk_destruct = sk->sk_destruct;
-	sk->sk_destruct = &pppol2tp_tunnel_destruct;
+	sk->sk_destruct = pppol2tp_tunnel_destruct;
 
 	tunnel->sock = sk;
 	sk->sk_allocation = GFP_ATOMIC;
@@ -2601,53 +2605,31 @@ static struct pppox_proto pppol2tp_proto = {
 
 static __net_init int pppol2tp_init_net(struct net *net)
 {
-	struct pppol2tp_net *pn;
+	struct pppol2tp_net *pn = pppol2tp_pernet(net);
 	struct proc_dir_entry *pde;
-	int err;
-
-	pn = kzalloc(sizeof(*pn), GFP_KERNEL);
-	if (!pn)
-		return -ENOMEM;
 
 	INIT_LIST_HEAD(&pn->pppol2tp_tunnel_list);
 	rwlock_init(&pn->pppol2tp_tunnel_list_lock);
 
-	err = net_assign_generic(net, pppol2tp_net_id, pn);
-	if (err)
-		goto out;
-
 	pde = proc_net_fops_create(net, "pppol2tp", S_IRUGO, &pppol2tp_proc_fops);
 #ifdef CONFIG_PROC_FS
-	if (!pde) {
-		err = -ENOMEM;
-		goto out;
-	}
+	if (!pde)
+		return -ENOMEM;
 #endif
 
 	return 0;
-
-out:
-	kfree(pn);
-	return err;
 }
 
 static __net_exit void pppol2tp_exit_net(struct net *net)
 {
-	struct pppoe_net *pn;
-
 	proc_net_remove(net, "pppol2tp");
-	pn = net_generic(net, pppol2tp_net_id);
-	/*
-	 * if someone has cached our net then
-	 * further net_generic call will return NULL
-	 */
-	net_assign_generic(net, pppol2tp_net_id, NULL);
-	kfree(pn);
 }
 
 static struct pernet_operations pppol2tp_net_ops = {
 	.init = pppol2tp_init_net,
 	.exit = pppol2tp_exit_net,
+	.id   = &pppol2tp_net_id,
+	.size = sizeof(struct pppol2tp_net),
 };
 
 static int __init pppol2tp_init(void)
@@ -2661,7 +2643,7 @@ static int __init pppol2tp_init(void)
 	if (err)
 		goto out_unregister_pppol2tp_proto;
 
-	err = register_pernet_gen_device(&pppol2tp_net_id, &pppol2tp_net_ops);
+	err = register_pernet_device(&pppol2tp_net_ops);
 	if (err)
 		goto out_unregister_pppox_proto;
 
@@ -2680,7 +2662,7 @@ out_unregister_pppol2tp_proto:
 static void __exit pppol2tp_exit(void)
 {
 	unregister_pppox_proto(PX_PROTO_OL2TP);
-	unregister_pernet_gen_device(pppol2tp_net_id, &pppol2tp_net_ops);
+	unregister_pernet_device(&pppol2tp_net_ops);
 	proto_unregister(&pppol2tp_sk_proto);
 }
 
diff --git a/drivers/net/pppox.c b/drivers/net/pppox.c
index 4f6d33fbc673..ac806b27c658 100644
--- a/drivers/net/pppox.c
+++ b/drivers/net/pppox.c
@@ -104,7 +104,8 @@ int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 
 EXPORT_SYMBOL(pppox_ioctl);
 
-static int pppox_create(struct net *net, struct socket *sock, int protocol)
+static int pppox_create(struct net *net, struct socket *sock, int protocol,
+			int kern)
 {
 	int rc = -EPROTOTYPE;
 
@@ -125,7 +126,7 @@ out:
 	return rc;
 }
 
-static struct net_proto_family pppox_proto_family = {
+static const struct net_proto_family pppox_proto_family = {
 	.family	= PF_PPPOX,
 	.create	= pppox_create,
 	.owner	= THIS_MODULE,
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index b211613e9dbd..89c4948300a5 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -95,11 +95,11 @@ static void gelic_card_get_ether_port_status(struct gelic_card *card,
 
 	lv1_net_control(bus_id(card), dev_id(card),
 			GELIC_LV1_GET_ETH_PORT_STATUS,
-			GELIC_LV1_VLAN_TX_ETHERNET, 0, 0,
+			GELIC_LV1_VLAN_TX_ETHERNET_0, 0, 0,
 			&card->ether_port_status, &v2);
 
 	if (inform) {
-		ether_netdev = card->netdev[GELIC_PORT_ETHERNET];
+		ether_netdev = card->netdev[GELIC_PORT_ETHERNET_0];
 		if (card->ether_port_status & GELIC_LV1_ETHER_LINK_UP)
 			netif_carrier_on(ether_netdev);
 		else
@@ -107,6 +107,24 @@ static void gelic_card_get_ether_port_status(struct gelic_card *card,
 	}
 }
 
+static int gelic_card_set_link_mode(struct gelic_card *card, int mode)
+{
+	int status;
+	u64 v1, v2;
+
+	status = lv1_net_control(bus_id(card), dev_id(card),
+				 GELIC_LV1_SET_NEGOTIATION_MODE,
+				 GELIC_LV1_PHY_ETHERNET_0, mode, 0, &v1, &v2);
+	if (status) {
+		pr_info("%s: failed setting negotiation mode %d\n", __func__,
+			status);
+		return -EBUSY;
+	}
+
+	card->link_mode = mode;
+	return 0;
+}
+
 void gelic_card_up(struct gelic_card *card)
 {
 	pr_debug("%s: called\n", __func__);
@@ -451,14 +469,14 @@ static void gelic_descr_release_tx(struct gelic_card *card,
 
 static void gelic_card_stop_queues(struct gelic_card *card)
 {
-	netif_stop_queue(card->netdev[GELIC_PORT_ETHERNET]);
+	netif_stop_queue(card->netdev[GELIC_PORT_ETHERNET_0]);
 
 	if (card->netdev[GELIC_PORT_WIRELESS])
 		netif_stop_queue(card->netdev[GELIC_PORT_WIRELESS]);
 }
 static void gelic_card_wake_queues(struct gelic_card *card)
 {
-	netif_wake_queue(card->netdev[GELIC_PORT_ETHERNET]);
+	netif_wake_queue(card->netdev[GELIC_PORT_ETHERNET_0]);
 
 	if (card->netdev[GELIC_PORT_WIRELESS])
 		netif_wake_queue(card->netdev[GELIC_PORT_WIRELESS]);
@@ -999,7 +1017,7 @@ static int gelic_card_decode_one_descr(struct gelic_card *card)
 			goto refill;
 		}
 	} else
-		netdev = card->netdev[GELIC_PORT_ETHERNET];
+		netdev = card->netdev[GELIC_PORT_ETHERNET_0];
 
 	if ((status == GELIC_DESCR_DMA_RESPONSE_ERROR) ||
 	    (status == GELIC_DESCR_DMA_PROTECTION_ERROR) ||
@@ -1244,14 +1262,58 @@ static int gelic_ether_get_settings(struct net_device *netdev,
 	cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg |
 			SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
 			SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
-			SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full;
+			SUPPORTED_1000baseT_Full;
 	cmd->advertising = cmd->supported;
-	cmd->autoneg = AUTONEG_ENABLE; /* always enabled */
+	if (card->link_mode & GELIC_LV1_ETHER_AUTO_NEG) {
+		cmd->autoneg = AUTONEG_ENABLE;
+	} else {
+		cmd->autoneg = AUTONEG_DISABLE;
+		cmd->advertising &= ~ADVERTISED_Autoneg;
+	}
 	cmd->port = PORT_TP;
 
 	return 0;
 }
 
+static int gelic_ether_set_settings(struct net_device *netdev,
+				    struct ethtool_cmd *cmd)
+{
+	struct gelic_card *card = netdev_card(netdev);
+	u64 mode;
+	int ret;
+
+	if (cmd->autoneg == AUTONEG_ENABLE) {
+		mode = GELIC_LV1_ETHER_AUTO_NEG;
+	} else {
+		switch (cmd->speed) {
+		case SPEED_10:
+			mode = GELIC_LV1_ETHER_SPEED_10;
+			break;
+		case SPEED_100:
+			mode = GELIC_LV1_ETHER_SPEED_100;
+			break;
+		case SPEED_1000:
+			mode = GELIC_LV1_ETHER_SPEED_1000;
+			break;
+		default:
+			return -EINVAL;
+		}
+		if (cmd->duplex == DUPLEX_FULL)
+			mode |= GELIC_LV1_ETHER_FULL_DUPLEX;
+		else if (cmd->speed == SPEED_1000) {
+			pr_info("1000 half duplex is not supported.\n");
+			return -EINVAL;
+		}
+	}
+
+	ret = gelic_card_set_link_mode(card, mode);
+
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
 u32 gelic_net_get_rx_csum(struct net_device *netdev)
 {
 	struct gelic_card *card = netdev_card(netdev);
@@ -1349,6 +1411,7 @@ done:
 static const struct ethtool_ops gelic_ether_ethtool_ops = {
 	.get_drvinfo	= gelic_net_get_drvinfo,
 	.get_settings	= gelic_ether_get_settings,
+	.set_settings	= gelic_ether_set_settings,
 	.get_link	= ethtool_op_get_link,
 	.get_tx_csum	= ethtool_op_get_tx_csum,
 	.set_tx_csum	= ethtool_op_set_tx_csum,
@@ -1369,7 +1432,7 @@ static void gelic_net_tx_timeout_task(struct work_struct *work)
 {
 	struct gelic_card *card =
 		container_of(work, struct gelic_card, tx_timeout_task);
-	struct net_device *netdev = card->netdev[GELIC_PORT_ETHERNET];
+	struct net_device *netdev = card->netdev[GELIC_PORT_ETHERNET_0];
 
 	dev_info(ctodev(card), "%s:Timed out. Restarting... \n", __func__);
 
@@ -1531,10 +1594,10 @@ static struct gelic_card * __devinit gelic_alloc_card_net(struct net_device **ne
 	/* gelic_port */
 	port->netdev = *netdev;
 	port->card = card;
-	port->type = GELIC_PORT_ETHERNET;
+	port->type = GELIC_PORT_ETHERNET_0;
 
 	/* gelic_card */
-	card->netdev[GELIC_PORT_ETHERNET] = *netdev;
+	card->netdev[GELIC_PORT_ETHERNET_0] = *netdev;
 
 	INIT_WORK(&card->tx_timeout_task, gelic_net_tx_timeout_task);
 	init_waitqueue_head(&card->waitq);
@@ -1554,9 +1617,9 @@ static void __devinit gelic_card_get_vlan_info(struct gelic_card *card)
 		int tx;
 		int rx;
 	} vlan_id_ix[2] = {
-		[GELIC_PORT_ETHERNET] = {
-			.tx = GELIC_LV1_VLAN_TX_ETHERNET,
-			.rx = GELIC_LV1_VLAN_RX_ETHERNET
+		[GELIC_PORT_ETHERNET_0] = {
+			.tx = GELIC_LV1_VLAN_TX_ETHERNET_0,
+			.rx = GELIC_LV1_VLAN_RX_ETHERNET_0
 		},
 		[GELIC_PORT_WIRELESS] = {
 			.tx = GELIC_LV1_VLAN_TX_WIRELESS,
@@ -1601,7 +1664,7 @@ static void __devinit gelic_card_get_vlan_info(struct gelic_card *card)
 			i, card->vlan[i].tx, card->vlan[i].rx);
 	}
 
-	if (card->vlan[GELIC_PORT_ETHERNET].tx) {
+	if (card->vlan[GELIC_PORT_ETHERNET_0].tx) {
 		BUG_ON(!card->vlan[GELIC_PORT_WIRELESS].tx);
 		card->vlan_required = 1;
 	} else
@@ -1657,6 +1720,8 @@ static int __devinit ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
 	/* get internal vlan info */
 	gelic_card_get_vlan_info(card);
 
+	card->link_mode = GELIC_LV1_ETHER_AUTO_NEG;
+
 	/* setup interrupt */
 	result = lv1_net_set_interrupt_status_indicator(bus_id(card),
 							dev_id(card),
@@ -1773,6 +1838,9 @@ static int ps3_gelic_driver_remove(struct ps3_system_bus_device *dev)
 	struct net_device *netdev0;
 	pr_debug("%s: called\n", __func__);
 
+	/* set auto-negotiation */
+	gelic_card_set_link_mode(card, GELIC_LV1_ETHER_AUTO_NEG);
+
 #ifdef CONFIG_GELIC_WIRELESS
 	gelic_wl_driver_remove(card);
 #endif
@@ -1790,7 +1858,7 @@ static int ps3_gelic_driver_remove(struct ps3_system_bus_device *dev)
 	gelic_card_free_chain(card, card->tx_top);
 	gelic_card_free_chain(card, card->rx_top);
 
-	netdev0 = card->netdev[GELIC_PORT_ETHERNET];
+	netdev0 = card->netdev[GELIC_PORT_ETHERNET_0];
 	/* disconnect event port */
 	free_irq(card->irq, card);
 	netdev0->irq = NO_IRQ;
diff --git a/drivers/net/ps3_gelic_net.h b/drivers/net/ps3_gelic_net.h
index 8b413868bbe2..32521ae5e824 100644
--- a/drivers/net/ps3_gelic_net.h
+++ b/drivers/net/ps3_gelic_net.h
@@ -186,7 +186,7 @@ enum gelic_lv1_net_control_code {
 	GELIC_LV1_GET_CHANNEL           = 6,
 	GELIC_LV1_POST_WLAN_CMD		= 9,
 	GELIC_LV1_GET_WLAN_CMD_RESULT	= 10,
-	GELIC_LV1_GET_WLAN_EVENT	= 11
+	GELIC_LV1_GET_WLAN_EVENT	= 11,
 };
 
 /* for GELIC_LV1_SET_WOL */
@@ -217,24 +217,29 @@ enum gelic_lv1_ether_port_status {
 	GELIC_LV1_ETHER_SPEED_10	= 0x0000000000000010L,
 	GELIC_LV1_ETHER_SPEED_100	= 0x0000000000000020L,
 	GELIC_LV1_ETHER_SPEED_1000	= 0x0000000000000040L,
-	GELIC_LV1_ETHER_SPEED_MASK	= 0x0000000000000070L
+	GELIC_LV1_ETHER_SPEED_MASK	= 0x0000000000000070L,
 };
 
 enum gelic_lv1_vlan_index {
 	/* for outgoing packets */
-	GELIC_LV1_VLAN_TX_ETHERNET	= 0x0000000000000002L,
+	GELIC_LV1_VLAN_TX_ETHERNET_0	= 0x0000000000000002L,
 	GELIC_LV1_VLAN_TX_WIRELESS	= 0x0000000000000003L,
+
 	/* for incoming packets */
-	GELIC_LV1_VLAN_RX_ETHERNET	= 0x0000000000000012L,
-	GELIC_LV1_VLAN_RX_WIRELESS	= 0x0000000000000013L
+	GELIC_LV1_VLAN_RX_ETHERNET_0	= 0x0000000000000012L,
+	GELIC_LV1_VLAN_RX_WIRELESS	= 0x0000000000000013L,
+};
+
+enum gelic_lv1_phy {
+	GELIC_LV1_PHY_ETHERNET_0	= 0x0000000000000002L,
 };
 
 /* size of hardware part of gelic descriptor */
 #define GELIC_DESCR_SIZE	(32)
 
 enum gelic_port_type {
-	GELIC_PORT_ETHERNET = 0,
-	GELIC_PORT_WIRELESS = 1,
+	GELIC_PORT_ETHERNET_0	= 0,
+	GELIC_PORT_WIRELESS	= 1,
 	GELIC_PORT_MAX
 };
 
@@ -302,6 +307,8 @@ struct gelic_card {
 	atomic_t users;
 
 	u64 ether_port_status;
+	int link_mode;
+
 	/* original address returned by kzalloc */
 	void *unalign;
 
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 4c610511eb40..e3e6bc917c87 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -1969,8 +1969,8 @@ static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
 	struct ql_rcv_buf_cb *lrg_buf_cb;
 	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
 
-	if ((qdev->lrg_buf_free_count >= 8)
-	    && (qdev->lrg_buf_release_cnt >= 16)) {
+	if ((qdev->lrg_buf_free_count >= 8) &&
+	    (qdev->lrg_buf_release_cnt >= 16)) {
 
 		if (qdev->lrg_buf_skb_check)
 			if (!ql_populate_free_queue(qdev))
@@ -1978,8 +1978,8 @@ static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
 
 		lrg_buf_q_ele = qdev->lrg_buf_next_free;
 
-		while ((qdev->lrg_buf_release_cnt >= 16)
-		       && (qdev->lrg_buf_free_count >= 8)) {
+		while ((qdev->lrg_buf_release_cnt >= 16) &&
+		       (qdev->lrg_buf_free_count >= 8)) {
 
 			for (i = 0; i < 8; i++) {
 				lrg_buf_cb =
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index c2383adcd527..862c1aaf3860 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -16,7 +16,7 @@
  */
 #define DRV_NAME  	"qlge"
 #define DRV_STRING 	"QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION	"v1.00.00-b3"
+#define DRV_VERSION	"v1.00.00.23.00.00-01"
 
 #define PFX "qlge: "
 #define QPRINTK(qdev, nlevel, klevel, fmt, args...)     \
@@ -54,8 +54,10 @@
 #define RX_RING_SHADOW_SPACE	(sizeof(u64) + \
 		MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
 		MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
-#define SMALL_BUFFER_SIZE 256
-#define LARGE_BUFFER_SIZE	PAGE_SIZE
+#define SMALL_BUFFER_SIZE 512
+#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
+#define LARGE_BUFFER_MAX_SIZE 8192
+#define LARGE_BUFFER_MIN_SIZE 2048
 #define MAX_SPLIT_SIZE 1023
 #define QLGE_SB_PAD 32
 
@@ -795,6 +797,7 @@ enum {
 	MB_WOL_BCAST = (1 << 5),
 	MB_WOL_LINK_UP = (1 << 6),
 	MB_WOL_LINK_DOWN = (1 << 7),
+	MB_WOL_MODE_ON = (1 << 16),		/* Wake on Lan Mode on */
 	MB_CMD_SET_WOL_FLTR = 0x00000111,	/* Wake On Lan Filter */
 	MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */
 	MB_CMD_SET_WOL_MAGIC = 0x00000113,	/* Wake On Lan Magic Packet */
@@ -804,12 +807,27 @@ enum {
 	MB_CMD_SET_PORT_CFG = 0x00000122,
 	MB_CMD_GET_PORT_CFG = 0x00000123,
 	MB_CMD_GET_LINK_STS = 0x00000124,
+	MB_CMD_SET_LED_CFG = 0x00000125, /* Set LED Configuration Register */
+		QL_LED_BLINK = 0x03e803e8,
+	MB_CMD_GET_LED_CFG = 0x00000126, /* Get LED Configuration Register */
 	MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */
 	MB_SET_MPI_TFK_STOP = (1 << 0),
 	MB_SET_MPI_TFK_RESUME = (1 << 1),
 	MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */
 	MB_GET_MPI_TFK_STOPPED = (1 << 0),
 	MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1),
+	/* Sub-commands for IDC request.
+	 * This describes the reason for the
+	 * IDC request.
+	 */
+	MB_CMD_IOP_NONE = 0x0000,
+	MB_CMD_IOP_PREP_UPDATE_MPI	= 0x0001,
+	MB_CMD_IOP_COMP_UPDATE_MPI	= 0x0002,
+	MB_CMD_IOP_PREP_LINK_DOWN	= 0x0010,
+	MB_CMD_IOP_DVR_START	 = 0x0100,
+	MB_CMD_IOP_FLASH_ACC	 = 0x0101,
+	MB_CMD_IOP_RESTART_MPI	= 0x0102,
+	MB_CMD_IOP_CORE_DUMP_MPI	= 0x0103,
 
 	/* Mailbox Command Status. */
 	MB_CMD_STS_GOOD = 0x00004000,	/* Success. */
@@ -1201,9 +1219,17 @@ struct tx_ring_desc {
 	struct tx_ring_desc *next;
 };
 
+struct page_chunk {
+	struct page *page;	/* master page */
+	char *va;		/* virt addr for this chunk */
+	u64 map;		/* mapping for master */
+	unsigned int offset;	/* offset for this chunk */
+	unsigned int last_flag; /* flag set for last chunk in page */
+};
+
 struct bq_desc {
 	union {
-		struct page *lbq_page;
+		struct page_chunk pg_chunk;
 		struct sk_buff *skb;
 	} p;
 	__le64 *addr;
@@ -1237,6 +1263,9 @@ struct tx_ring {
 	atomic_t queue_stopped;	/* Turns queue off when full. */
 	struct delayed_work tx_work;
 	struct ql_adapter *qdev;
+	u64 tx_packets;
+	u64 tx_bytes;
+	u64 tx_errors;
 };
 
 /*
@@ -1272,6 +1301,7 @@ struct rx_ring {
 	dma_addr_t lbq_base_dma;
 	void *lbq_base_indirect;
 	dma_addr_t lbq_base_indirect_dma;
+	struct page_chunk pg_chunk; /* current page for chunks */
 	struct bq_desc *lbq;	/* array of control blocks */
 	void __iomem *lbq_prod_idx_db_reg;	/* PCI doorbell mem area + 0x18 */
 	u32 lbq_prod_idx;	/* current sw prod idx */
@@ -1302,6 +1332,11 @@ struct rx_ring {
 	struct napi_struct napi;
 	u8 reserved;
 	struct ql_adapter *qdev;
+	u64 rx_packets;
+	u64 rx_multicast;
+	u64 rx_bytes;
+	u64 rx_dropped;
+	u64 rx_errors;
 };
 
 /*
@@ -1363,6 +1398,174 @@ struct nic_stats {
 	u64 rx_1024_to_1518_pkts;
 	u64 rx_1519_to_max_pkts;
 	u64 rx_len_err_pkts;
+	/*
+	 * These stats come from offset 500h to 5C8h
+	 * in the XGMAC register.
+	 */
+	u64 tx_cbfc_pause_frames0;
+	u64 tx_cbfc_pause_frames1;
+	u64 tx_cbfc_pause_frames2;
+	u64 tx_cbfc_pause_frames3;
+	u64 tx_cbfc_pause_frames4;
+	u64 tx_cbfc_pause_frames5;
+	u64 tx_cbfc_pause_frames6;
+	u64 tx_cbfc_pause_frames7;
+	u64 rx_cbfc_pause_frames0;
+	u64 rx_cbfc_pause_frames1;
+	u64 rx_cbfc_pause_frames2;
+	u64 rx_cbfc_pause_frames3;
+	u64 rx_cbfc_pause_frames4;
+	u64 rx_cbfc_pause_frames5;
+	u64 rx_cbfc_pause_frames6;
+	u64 rx_cbfc_pause_frames7;
+	u64 rx_nic_fifo_drop;
+};
+
+/* Address/Length pairs for the coredump. */
+enum {
+	MPI_CORE_REGS_ADDR = 0x00030000,
+	MPI_CORE_REGS_CNT = 127,
+	MPI_CORE_SH_REGS_CNT = 16,
+	TEST_REGS_ADDR = 0x00001000,
+	TEST_REGS_CNT = 23,
+	RMII_REGS_ADDR = 0x00001040,
+	RMII_REGS_CNT = 64,
+	FCMAC1_REGS_ADDR = 0x00001080,
+	FCMAC2_REGS_ADDR = 0x000010c0,
+	FCMAC_REGS_CNT = 64,
+	FC1_MBX_REGS_ADDR = 0x00001100,
+	FC2_MBX_REGS_ADDR = 0x00001240,
+	FC_MBX_REGS_CNT = 64,
+	IDE_REGS_ADDR = 0x00001140,
+	IDE_REGS_CNT = 64,
+	NIC1_MBX_REGS_ADDR = 0x00001180,
+	NIC2_MBX_REGS_ADDR = 0x00001280,
+	NIC_MBX_REGS_CNT = 64,
+	SMBUS_REGS_ADDR = 0x00001200,
+	SMBUS_REGS_CNT = 64,
+	I2C_REGS_ADDR = 0x00001fc0,
+	I2C_REGS_CNT = 64,
+	MEMC_REGS_ADDR = 0x00003000,
+	MEMC_REGS_CNT = 256,
+	PBUS_REGS_ADDR = 0x00007c00,
+	PBUS_REGS_CNT = 256,
+	MDE_REGS_ADDR = 0x00010000,
+	MDE_REGS_CNT = 6,
+	CODE_RAM_ADDR = 0x00020000,
+	CODE_RAM_CNT = 0x2000,
+	MEMC_RAM_ADDR = 0x00100000,
+	MEMC_RAM_CNT = 0x2000,
+};
+
+#define MPI_COREDUMP_COOKIE 0x5555aaaa
+struct mpi_coredump_global_header {
+	u32	cookie;
+	u8	idString[16];
+	u32	timeLo;
+	u32	timeHi;
+	u32	imageSize;
+	u32	headerSize;
+	u8	info[220];
+};
+
+struct mpi_coredump_segment_header {
+	u32	cookie;
+	u32	segNum;
+	u32	segSize;
+	u32	extra;
+	u8	description[16];
+};
+
+/* Reg dump segment numbers. */
+enum {
+	CORE_SEG_NUM = 1,
+	TEST_LOGIC_SEG_NUM = 2,
+	RMII_SEG_NUM = 3,
+	FCMAC1_SEG_NUM = 4,
+	FCMAC2_SEG_NUM = 5,
+	FC1_MBOX_SEG_NUM = 6,
+	IDE_SEG_NUM = 7,
+	NIC1_MBOX_SEG_NUM = 8,
+	SMBUS_SEG_NUM = 9,
+	FC2_MBOX_SEG_NUM = 10,
+	NIC2_MBOX_SEG_NUM = 11,
+	I2C_SEG_NUM = 12,
+	MEMC_SEG_NUM = 13,
+	PBUS_SEG_NUM = 14,
+	MDE_SEG_NUM = 15,
+	NIC1_CONTROL_SEG_NUM = 16,
+	NIC2_CONTROL_SEG_NUM = 17,
+	NIC1_XGMAC_SEG_NUM = 18,
+	NIC2_XGMAC_SEG_NUM = 19,
+	WCS_RAM_SEG_NUM = 20,
+	MEMC_RAM_SEG_NUM = 21,
+	XAUI_AN_SEG_NUM = 22,
+	XAUI_HSS_PCS_SEG_NUM = 23,
+	XFI_AN_SEG_NUM = 24,
+	XFI_TRAIN_SEG_NUM = 25,
+	XFI_HSS_PCS_SEG_NUM = 26,
+	XFI_HSS_TX_SEG_NUM = 27,
+	XFI_HSS_RX_SEG_NUM = 28,
+	XFI_HSS_PLL_SEG_NUM = 29,
+	MISC_NIC_INFO_SEG_NUM = 30,
+	INTR_STATES_SEG_NUM = 31,
+	CAM_ENTRIES_SEG_NUM = 32,
+	ROUTING_WORDS_SEG_NUM = 33,
+	ETS_SEG_NUM = 34,
+	PROBE_DUMP_SEG_NUM = 35,
+	ROUTING_INDEX_SEG_NUM = 36,
+	MAC_PROTOCOL_SEG_NUM = 37,
+	XAUI2_AN_SEG_NUM = 38,
+	XAUI2_HSS_PCS_SEG_NUM = 39,
+	XFI2_AN_SEG_NUM = 40,
+	XFI2_TRAIN_SEG_NUM = 41,
+	XFI2_HSS_PCS_SEG_NUM = 42,
+	XFI2_HSS_TX_SEG_NUM = 43,
+	XFI2_HSS_RX_SEG_NUM = 44,
+	XFI2_HSS_PLL_SEG_NUM = 45,
+	SEM_REGS_SEG_NUM = 50
+
+};
+
+struct ql_nic_misc {
+	u32 rx_ring_count;
+	u32 tx_ring_count;
+	u32 intr_count;
+	u32 function;
+};
+
+struct ql_reg_dump {
+
+	/* segment 0 */
+	struct mpi_coredump_global_header mpi_global_header;
+
+	/* segment 16 */
+	struct mpi_coredump_segment_header nic_regs_seg_hdr;
+	u32 nic_regs[64];
+
+	/* segment 30 */
+	struct mpi_coredump_segment_header misc_nic_seg_hdr;
+	struct ql_nic_misc misc_nic_info;
+
+	/* segment 31 */
+	/* one interrupt state for each CQ */
+	struct mpi_coredump_segment_header intr_states_seg_hdr;
+	u32 intr_states[MAX_CPUS];
+
+	/* segment 32 */
+	/* 3 cam words each for 16 unicast,
+	 * 2 cam words for each of 32 multicast.
+	 */
+	struct mpi_coredump_segment_header cam_entries_seg_hdr;
+	u32 cam_entries[(16 * 3) + (32 * 3)];
+
+	/* segment 33 */
+	struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
+	u32 nic_routing_words[16];
+
+	/* segment 34 */
+	struct mpi_coredump_segment_header ets_seg_hdr;
+	u32 ets[8+2];
 };
 
 /*
@@ -1398,6 +1601,8 @@ enum {
 	QL_ALLMULTI = 6,
 	QL_PORT_CFG = 7,
 	QL_CAM_RT_SET = 8,
+	QL_SELFTEST = 9,
+	QL_LB_LINK_UP = 10,
 };
 
 /* link_status bit definitions */
@@ -1505,6 +1710,7 @@ struct ql_adapter {
 
 	struct rx_ring rx_ring[MAX_RX_RINGS];
 	struct tx_ring tx_ring[MAX_TX_RINGS];
+	unsigned int lbq_buf_order;
 
 	int rx_csum;
 	u32 default_rx_queue;
@@ -1519,11 +1725,11 @@ struct ql_adapter {
 	u32 port_init;
 	u32 link_status;
 	u32 link_config;
+	u32 led_config;
 	u32 max_frame_size;
 
 	union flash_params flash;
 
-	struct net_device_stats stats;
 	struct workqueue_struct *workqueue;
 	struct delayed_work asic_reset_work;
 	struct delayed_work mpi_reset_work;
@@ -1533,6 +1739,7 @@ struct ql_adapter {
 	struct completion ide_completion;
 	struct nic_operations *nic_ops;
 	u16 device_id;
+	atomic_t lb_count;
 };
 
 /*
@@ -1611,10 +1818,22 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev);
 int ql_cam_route_initialize(struct ql_adapter *qdev);
 int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
 int ql_mb_about_fw(struct ql_adapter *qdev);
+int ql_wol(struct ql_adapter *qdev);
+int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
+int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
+int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
+int ql_mb_get_led_cfg(struct ql_adapter *qdev);
 void ql_link_on(struct ql_adapter *qdev);
 void ql_link_off(struct ql_adapter *qdev);
 int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
+int ql_mb_get_port_cfg(struct ql_adapter *qdev);
+int ql_mb_set_port_cfg(struct ql_adapter *qdev);
 int ql_wait_fifo_empty(struct ql_adapter *qdev);
+void ql_gen_reg_dump(struct ql_adapter *qdev,
+			struct ql_reg_dump *mpi_coredump);
+netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
+void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
+int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
 
 #if 1
 #define QL_ALL_DUMP
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index aa88cb3f41c7..9f58c4710761 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1,5 +1,185 @@
 #include "qlge.h"
 
+
+static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
+{
+	int status = 0;
+	int i;
+
+	for (i = 0; i < 8; i++, buf++) {
+		ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
+		*buf = ql_read32(qdev, NIC_ETS);
+	}
+
+	for (i = 0; i < 2; i++, buf++) {
+		ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
+		*buf = ql_read32(qdev, CNA_ETS);
+	}
+
+	return status;
+}
+
+static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf)
+{
+	int i;
+
+	for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
+		ql_write32(qdev, INTR_EN,
+				qdev->intr_context[i].intr_read_mask);
+		*buf = ql_read32(qdev, INTR_EN);
+	}
+}
+
+static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
+{
+	int i, status;
+	u32 value[3];
+
+	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+	if (status)
+		return status;
+
+	for (i = 0; i < 16; i++) {
+		status = ql_get_mac_addr_reg(qdev,
+					MAC_ADDR_TYPE_CAM_MAC, i, value);
+		if (status) {
+			QPRINTK(qdev, DRV, ERR,
+				"Failed read of mac index register.\n");
+			goto err;
+		}
+		*buf++ = value[0];	/* lower MAC address */
+		*buf++ = value[1];	/* upper MAC address */
+		*buf++ = value[2];	/* output */
+	}
+	for (i = 0; i < 32; i++) {
+		status = ql_get_mac_addr_reg(qdev,
+					MAC_ADDR_TYPE_MULTI_MAC, i, value);
+		if (status) {
+			QPRINTK(qdev, DRV, ERR,
+				"Failed read of mac index register.\n");
+			goto err;
+		}
+		*buf++ = value[0];	/* lower Mcast address */
+		*buf++ = value[1];	/* upper Mcast address */
+	}
+err:
+	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+	return status;
+}
+
+static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf)
+{
+	int status;
+	u32 value, i;
+
+	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+	if (status)
+		return status;
+
+	for (i = 0; i < 16; i++) {
+		status = ql_get_routing_reg(qdev, i, &value);
+		if (status) {
+			QPRINTK(qdev, DRV, ERR,
+				"Failed read of routing index register.\n");
+			goto err;
+		} else {
+			*buf++ = value;
+		}
+	}
+err:
+	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+	return status;
+}
+
+/* Create a coredump segment header */
+static void ql_build_coredump_seg_header(
+		struct mpi_coredump_segment_header *seg_hdr,
+		u32 seg_number, u32 seg_size, u8 *desc)
+{
+	memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
+	seg_hdr->cookie = MPI_COREDUMP_COOKIE;
+	seg_hdr->segNum = seg_number;
+	seg_hdr->segSize = seg_size;
+	memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
+}
+
+void ql_gen_reg_dump(struct ql_adapter *qdev,
+			struct ql_reg_dump *mpi_coredump)
+{
+	int i, status;
+
+
+	memset(&(mpi_coredump->mpi_global_header), 0,
+		sizeof(struct mpi_coredump_global_header));
+	mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
+	mpi_coredump->mpi_global_header.headerSize =
+		sizeof(struct mpi_coredump_global_header);
+	mpi_coredump->mpi_global_header.imageSize =
+		sizeof(struct ql_reg_dump);
+	memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+		sizeof(mpi_coredump->mpi_global_header.idString));
+
+
+	/* segment 16 */
+	ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
+				MISC_NIC_INFO_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->misc_nic_info),
+				"MISC NIC INFO");
+	mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
+	mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
+	mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
+	mpi_coredump->misc_nic_info.function = qdev->func;
+
+	/* Segment 16, Rev C. Step 18 */
+	ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
+				NIC1_CONTROL_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->nic_regs),
+				"NIC Registers");
+	/* Get generic reg dump */
+	for (i = 0; i < 64; i++)
+		mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32));
+
+	/* Segment 31 */
+	/* Get indexed register values. */
+	ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
+				INTR_STATES_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->intr_states),
+				"INTR States");
+	ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
+
+	ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
+				CAM_ENTRIES_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->cam_entries),
+				"CAM Entries");
+	status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
+	if (status)
+		return;
+
+	ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
+				ROUTING_WORDS_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->nic_routing_words),
+				"Routing Words");
+	status = ql_get_routing_entries(qdev,
+			 &mpi_coredump->nic_routing_words[0]);
+	if (status)
+		return;
+
+	/* Segment 34 (Rev C. step 23) */
+	ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
+				ETS_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->ets),
+				"ETS Registers");
+	status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
+	if (status)
+		return;
+}
+
 #ifdef QL_REG_DUMP
 static void ql_dump_intr_states(struct ql_adapter *qdev)
 {
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 52073946bce3..058fa0a48c6f 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -36,6 +36,11 @@
 
 #include "qlge.h"
 
+static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
+	"Loopback test  (offline)"
+};
+#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
+
 static int ql_update_ring_coalescing(struct ql_adapter *qdev)
 {
 	int i, status = 0;
@@ -132,6 +137,41 @@ static void ql_update_stats(struct ql_adapter *qdev)
 		iter++;
 	}
 
+	/*
+	 * Get Per-priority TX pause frame counter statistics.
+	 */
+	for (i = 0x500; i < 0x540; i += 8) {
+		if (ql_read_xgmac_reg64(qdev, i, &data)) {
+			QPRINTK(qdev, DRV, ERR,
+				"Error reading status register 0x%.04x.\n", i);
+			goto end;
+		} else
+			*iter = data;
+		iter++;
+	}
+
+	/*
+	 * Get Per-priority RX pause frame counter statistics.
+	 */
+	for (i = 0x568; i < 0x5a8; i += 8) {
+		if (ql_read_xgmac_reg64(qdev, i, &data)) {
+			QPRINTK(qdev, DRV, ERR,
+				"Error reading status register 0x%.04x.\n", i);
+			goto end;
+		} else
+			*iter = data;
+		iter++;
+	}
+
+	/*
+	 * Get RX NIC FIFO DROP statistics.
+	 */
+	if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) {
+		QPRINTK(qdev, DRV, ERR,
+			"Error reading status register 0x%.04x.\n", i);
+		goto end;
+	} else
+		*iter = data;
 end:
 	ql_sem_unlock(qdev, qdev->xg_sem_mask);
 quit:
@@ -185,6 +225,23 @@ static char ql_stats_str_arr[][ETH_GSTRING_LEN] = {
 	{"rx_1024_to_1518_pkts"},
 	{"rx_1519_to_max_pkts"},
 	{"rx_len_err_pkts"},
+	{"tx_cbfc_pause_frames0"},
+	{"tx_cbfc_pause_frames1"},
+	{"tx_cbfc_pause_frames2"},
+	{"tx_cbfc_pause_frames3"},
+	{"tx_cbfc_pause_frames4"},
+	{"tx_cbfc_pause_frames5"},
+	{"tx_cbfc_pause_frames6"},
+	{"tx_cbfc_pause_frames7"},
+	{"rx_cbfc_pause_frames0"},
+	{"rx_cbfc_pause_frames1"},
+	{"rx_cbfc_pause_frames2"},
+	{"rx_cbfc_pause_frames3"},
+	{"rx_cbfc_pause_frames4"},
+	{"rx_cbfc_pause_frames5"},
+	{"rx_cbfc_pause_frames6"},
+	{"rx_cbfc_pause_frames7"},
+	{"rx_nic_fifo_drop"},
 };
 
 static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -199,6 +256,8 @@ static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 static int ql_get_sset_count(struct net_device *dev, int sset)
 {
 	switch (sset) {
+	case ETH_SS_TEST:
+		return QLGE_TEST_LEN;
 	case ETH_SS_STATS:
 		return ARRAY_SIZE(ql_stats_str_arr);
 	default:
@@ -257,6 +316,23 @@ ql_get_ethtool_stats(struct net_device *ndev,
 	*data++ = s->rx_1024_to_1518_pkts;
 	*data++ = s->rx_1519_to_max_pkts;
 	*data++ = s->rx_len_err_pkts;
+	*data++ = s->tx_cbfc_pause_frames0;
+	*data++ = s->tx_cbfc_pause_frames1;
+	*data++ = s->tx_cbfc_pause_frames2;
+	*data++ = s->tx_cbfc_pause_frames3;
+	*data++ = s->tx_cbfc_pause_frames4;
+	*data++ = s->tx_cbfc_pause_frames5;
+	*data++ = s->tx_cbfc_pause_frames6;
+	*data++ = s->tx_cbfc_pause_frames7;
+	*data++ = s->rx_cbfc_pause_frames0;
+	*data++ = s->rx_cbfc_pause_frames1;
+	*data++ = s->rx_cbfc_pause_frames2;
+	*data++ = s->rx_cbfc_pause_frames3;
+	*data++ = s->rx_cbfc_pause_frames4;
+	*data++ = s->rx_cbfc_pause_frames5;
+	*data++ = s->rx_cbfc_pause_frames6;
+	*data++ = s->rx_cbfc_pause_frames7;
+	*data++ = s->rx_nic_fifo_drop;
 }
 
 static int ql_get_settings(struct net_device *ndev,
@@ -302,6 +378,181 @@ static void ql_get_drvinfo(struct net_device *ndev,
 	drvinfo->eedump_len = 0;
 }
 
+static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	/* What we support. */
+	wol->supported = WAKE_MAGIC;
+	/* What we've currently got set. */
+	wol->wolopts = qdev->wol;
+}
+
+static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	int status;
+
+	if (wol->wolopts & ~WAKE_MAGIC)
+		return -EINVAL;
+	qdev->wol = wol->wolopts;
+
+	QPRINTK(qdev, DRV, INFO, "Set wol option 0x%x on %s\n",
+			 qdev->wol, ndev->name);
+	if (!qdev->wol) {
+		u32 wol = 0;
+		status = ql_mb_wol_mode(qdev, wol);
+		QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
+			(status == 0) ? "cleared sucessfully" : "clear failed",
+			wol, qdev->ndev->name);
+	}
+
+	return 0;
+}
+
+static int ql_phys_id(struct net_device *ndev, u32 data)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	u32 led_reg, i;
+	int status;
+
+	/* Save the current LED settings */
+	status = ql_mb_get_led_cfg(qdev);
+	if (status)
+		return status;
+	led_reg = qdev->led_config;
+
+	/* Start blinking the led */
+	if (!data || data > 300)
+		data = 300;
+
+	for (i = 0; i < (data * 10); i++)
+		ql_mb_set_led_cfg(qdev, QL_LED_BLINK);
+
+	/* Restore LED settings */
+	status = ql_mb_set_led_cfg(qdev, led_reg);
+	if (status)
+		return status;
+
+	return 0;
+}
+
+static int ql_start_loopback(struct ql_adapter *qdev)
+{
+	if (netif_carrier_ok(qdev->ndev)) {
+		set_bit(QL_LB_LINK_UP, &qdev->flags);
+		netif_carrier_off(qdev->ndev);
+	} else
+		clear_bit(QL_LB_LINK_UP, &qdev->flags);
+	qdev->link_config |= CFG_LOOPBACK_PCS;
+	return ql_mb_set_port_cfg(qdev);
+}
+
+static void ql_stop_loopback(struct ql_adapter *qdev)
+{
+	qdev->link_config &= ~CFG_LOOPBACK_PCS;
+	ql_mb_set_port_cfg(qdev);
+	if (test_bit(QL_LB_LINK_UP, &qdev->flags)) {
+		netif_carrier_on(qdev->ndev);
+		clear_bit(QL_LB_LINK_UP, &qdev->flags);
+	}
+}
+
+static void ql_create_lb_frame(struct sk_buff *skb,
+					unsigned int frame_size)
+{
+	memset(skb->data, 0xFF, frame_size);
+	frame_size &= ~1;
+	memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+	memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
+	memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+}
+
+void ql_check_lb_frame(struct ql_adapter *qdev,
+					struct sk_buff *skb)
+{
+	unsigned int frame_size = skb->len;
+
+	if ((*(skb->data + 3) == 0xFF) &&
+		(*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+		(*(skb->data + frame_size / 2 + 12) == 0xAF)) {
+			atomic_dec(&qdev->lb_count);
+			return;
+	}
+}
+
+static int ql_run_loopback_test(struct ql_adapter *qdev)
+{
+	int i;
+	netdev_tx_t rc;
+	struct sk_buff *skb;
+	unsigned int size = SMALL_BUF_MAP_SIZE;
+
+	for (i = 0; i < 64; i++) {
+		skb = netdev_alloc_skb(qdev->ndev, size);
+		if (!skb)
+			return -ENOMEM;
+
+		skb->queue_mapping = 0;
+		skb_put(skb, size);
+		ql_create_lb_frame(skb, size);
+		rc = ql_lb_send(skb, qdev->ndev);
+		if (rc != NETDEV_TX_OK)
+			return -EPIPE;
+		atomic_inc(&qdev->lb_count);
+	}
+
+	ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128);
+	return atomic_read(&qdev->lb_count) ? -EIO : 0;
+}
+
+static int ql_loopback_test(struct ql_adapter *qdev, u64 *data)
+{
+	*data = ql_start_loopback(qdev);
+	if (*data)
+		goto out;
+	*data = ql_run_loopback_test(qdev);
+out:
+	ql_stop_loopback(qdev);
+	return *data;
+}
+
+static void ql_self_test(struct net_device *ndev,
+				struct ethtool_test *eth_test, u64 *data)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	if (netif_running(ndev)) {
+		set_bit(QL_SELFTEST, &qdev->flags);
+		if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+			/* Offline tests */
+			if (ql_loopback_test(qdev, &data[0]))
+				eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		} else {
+			/* Online tests */
+			data[0] = 0;
+		}
+		clear_bit(QL_SELFTEST, &qdev->flags);
+	} else {
+		QPRINTK(qdev, DRV, ERR,
+			"%s: is down, Loopback test will fail.\n", ndev->name);
+		eth_test->flags |= ETH_TEST_FL_FAILED;
+	}
+}
+
+static int ql_get_regs_len(struct net_device *ndev)
+{
+	return sizeof(struct ql_reg_dump);
+}
+
+static void ql_get_regs(struct net_device *ndev,
+			struct ethtool_regs *regs, void *p)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	ql_gen_reg_dump(qdev, p);
+}
+
 static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
 {
 	struct ql_adapter *qdev = netdev_priv(dev);
@@ -355,6 +606,37 @@ static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
 	return ql_update_ring_coalescing(qdev);
 }
 
+static void ql_get_pauseparam(struct net_device *netdev,
+			struct ethtool_pauseparam *pause)
+{
+	struct ql_adapter *qdev = netdev_priv(netdev);
+
+	ql_mb_get_port_cfg(qdev);
+	if (qdev->link_config & CFG_PAUSE_STD) {
+		pause->rx_pause = 1;
+		pause->tx_pause = 1;
+	}
+}
+
+static int ql_set_pauseparam(struct net_device *netdev,
+			struct ethtool_pauseparam *pause)
+{
+	struct ql_adapter *qdev = netdev_priv(netdev);
+	int status = 0;
+
+	if ((pause->rx_pause) && (pause->tx_pause))
+		qdev->link_config |= CFG_PAUSE_STD;
+	else if (!pause->rx_pause && !pause->tx_pause)
+		qdev->link_config &= ~CFG_PAUSE_STD;
+	else
+		return -EINVAL;
+
+	status = ql_mb_set_port_cfg(qdev);
+	if (status)
+		return status;
+	return status;
+}
+
 static u32 ql_get_rx_csum(struct net_device *netdev)
 {
 	struct ql_adapter *qdev = netdev_priv(netdev);
@@ -396,9 +678,17 @@ static void ql_set_msglevel(struct net_device *ndev, u32 value)
 const struct ethtool_ops qlge_ethtool_ops = {
 	.get_settings = ql_get_settings,
 	.get_drvinfo = ql_get_drvinfo,
+	.get_wol = ql_get_wol,
+	.set_wol = ql_set_wol,
+	.get_regs_len	= ql_get_regs_len,
+	.get_regs = ql_get_regs,
 	.get_msglevel = ql_get_msglevel,
 	.set_msglevel = ql_set_msglevel,
 	.get_link = ethtool_op_get_link,
+	.phys_id		 = ql_phys_id,
+	.self_test		 = ql_self_test,
+	.get_pauseparam		 = ql_get_pauseparam,
+	.set_pauseparam		 = ql_set_pauseparam,
 	.get_rx_csum = ql_get_rx_csum,
 	.set_rx_csum = ql_set_rx_csum,
 	.get_tx_csum = ethtool_op_get_tx_csum,
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index a2fc70a0d0cc..707b391afa02 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -69,9 +69,9 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 #define MSIX_IRQ 0
 #define MSI_IRQ 1
 #define LEG_IRQ 2
-static int irq_type = MSIX_IRQ;
-module_param(irq_type, int, MSIX_IRQ);
-MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
+static int qlge_irq_type = MSIX_IRQ;
+module_param(qlge_irq_type, int, MSIX_IRQ);
+MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
 
 static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
 	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
@@ -1025,6 +1025,11 @@ end:
 	return status;
 }
 
+static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
+{
+	return PAGE_SIZE << qdev->lbq_buf_order;
+}
+
 /* Get the next large buffer. */
 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
 {
@@ -1036,6 +1041,28 @@ static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
 	return lbq_desc;
 }
 
+static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
+		struct rx_ring *rx_ring)
+{
+	struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
+
+	pci_dma_sync_single_for_cpu(qdev->pdev,
+					pci_unmap_addr(lbq_desc, mapaddr),
+				    rx_ring->lbq_buf_size,
+					PCI_DMA_FROMDEVICE);
+
+	/* If it's the last chunk of our master page then
+	 * we unmap it.
+	 */
+	if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
+					== ql_lbq_block_size(qdev))
+		pci_unmap_page(qdev->pdev,
+				lbq_desc->p.pg_chunk.map,
+				ql_lbq_block_size(qdev),
+				PCI_DMA_FROMDEVICE);
+	return lbq_desc;
+}
+
 /* Get the next small buffer. */
 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
 {
@@ -1063,6 +1090,53 @@ static void ql_write_cq_idx(struct rx_ring *rx_ring)
 	ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
 }
 
+static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
+						struct bq_desc *lbq_desc)
+{
+	if (!rx_ring->pg_chunk.page) {
+		u64 map;
+		rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
+						GFP_ATOMIC,
+						qdev->lbq_buf_order);
+		if (unlikely(!rx_ring->pg_chunk.page)) {
+			QPRINTK(qdev, DRV, ERR,
+				"page allocation failed.\n");
+			return -ENOMEM;
+		}
+		rx_ring->pg_chunk.offset = 0;
+		map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
+					0, ql_lbq_block_size(qdev),
+					PCI_DMA_FROMDEVICE);
+		if (pci_dma_mapping_error(qdev->pdev, map)) {
+			__free_pages(rx_ring->pg_chunk.page,
+					qdev->lbq_buf_order);
+			QPRINTK(qdev, DRV, ERR,
+				"PCI mapping failed.\n");
+			return -ENOMEM;
+		}
+		rx_ring->pg_chunk.map = map;
+		rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
+	}
+
+	/* Copy the current master pg_chunk info
+	 * to the current descriptor.
+	 */
+	lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
+
+	/* Adjust the master page chunk for next
+	 * buffer get.
+	 */
+	rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
+	if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
+		rx_ring->pg_chunk.page = NULL;
+		lbq_desc->p.pg_chunk.last_flag = 1;
+	} else {
+		rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
+		get_page(rx_ring->pg_chunk.page);
+		lbq_desc->p.pg_chunk.last_flag = 0;
+	}
+	return 0;
+}
 /* Process (refill) a large buffer queue. */
 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
 {
@@ -1072,39 +1146,28 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
 	u64 map;
 	int i;
 
-	while (rx_ring->lbq_free_cnt > 16) {
+	while (rx_ring->lbq_free_cnt > 32) {
 		for (i = 0; i < 16; i++) {
 			QPRINTK(qdev, RX_STATUS, DEBUG,
 				"lbq: try cleaning clean_idx = %d.\n",
 				clean_idx);
 			lbq_desc = &rx_ring->lbq[clean_idx];
-			if (lbq_desc->p.lbq_page == NULL) {
-				QPRINTK(qdev, RX_STATUS, DEBUG,
-					"lbq: getting new page for index %d.\n",
-					lbq_desc->index);
-				lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
-				if (lbq_desc->p.lbq_page == NULL) {
-					rx_ring->lbq_clean_idx = clean_idx;
-					QPRINTK(qdev, RX_STATUS, ERR,
-						"Couldn't get a page.\n");
-					return;
-				}
-				map = pci_map_page(qdev->pdev,
-						   lbq_desc->p.lbq_page,
-						   0, PAGE_SIZE,
-						   PCI_DMA_FROMDEVICE);
-				if (pci_dma_mapping_error(qdev->pdev, map)) {
-					rx_ring->lbq_clean_idx = clean_idx;
-					put_page(lbq_desc->p.lbq_page);
-					lbq_desc->p.lbq_page = NULL;
-					QPRINTK(qdev, RX_STATUS, ERR,
-						"PCI mapping failed.\n");
+			if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
+				QPRINTK(qdev, IFUP, ERR,
+					"Could not get a page chunk.\n");
 					return;
 				}
+
+			map = lbq_desc->p.pg_chunk.map +
+				lbq_desc->p.pg_chunk.offset;
 				pci_unmap_addr_set(lbq_desc, mapaddr, map);
-				pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
+			pci_unmap_len_set(lbq_desc, maplen,
+					rx_ring->lbq_buf_size);
 				*lbq_desc->addr = cpu_to_le64(map);
-			}
+
+			pci_dma_sync_single_for_device(qdev->pdev, map,
+						rx_ring->lbq_buf_size,
+						PCI_DMA_FROMDEVICE);
 			clean_idx++;
 			if (clean_idx == rx_ring->lbq_len)
 				clean_idx = 0;
@@ -1147,7 +1210,7 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
 					sbq_desc->index);
 				sbq_desc->p.skb =
 				    netdev_alloc_skb(qdev->ndev,
-						     rx_ring->sbq_buf_size);
+						     SMALL_BUFFER_SIZE);
 				if (sbq_desc->p.skb == NULL) {
 					QPRINTK(qdev, PROBE, ERR,
 						"Couldn't get an skb.\n");
@@ -1157,8 +1220,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
 				skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
 				map = pci_map_single(qdev->pdev,
 						     sbq_desc->p.skb->data,
-						     rx_ring->sbq_buf_size /
-						     2, PCI_DMA_FROMDEVICE);
+						     rx_ring->sbq_buf_size,
+						     PCI_DMA_FROMDEVICE);
 				if (pci_dma_mapping_error(qdev->pdev, map)) {
 					QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
 					rx_ring->sbq_clean_idx = clean_idx;
@@ -1168,7 +1231,7 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
 				}
 				pci_unmap_addr_set(sbq_desc, mapaddr, map);
 				pci_unmap_len_set(sbq_desc, maplen,
-						  rx_ring->sbq_buf_size / 2);
+						  rx_ring->sbq_buf_size);
 				*sbq_desc->addr = cpu_to_le64(map);
 			}
 
@@ -1480,27 +1543,24 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
 			 * chain it to the header buffer's skb and let
 			 * it rip.
 			 */
-			lbq_desc = ql_get_curr_lbuf(rx_ring);
-			pci_unmap_page(qdev->pdev,
-				       pci_unmap_addr(lbq_desc,
-						      mapaddr),
-				       pci_unmap_len(lbq_desc, maplen),
-				       PCI_DMA_FROMDEVICE);
+			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
 			QPRINTK(qdev, RX_STATUS, DEBUG,
-				"Chaining page to skb.\n");
-			skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
-					   0, length);
+				"Chaining page at offset = %d,"
+				"for %d bytes  to skb.\n",
+				lbq_desc->p.pg_chunk.offset, length);
+			skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
+						lbq_desc->p.pg_chunk.offset,
+						length);
 			skb->len += length;
 			skb->data_len += length;
 			skb->truesize += length;
-			lbq_desc->p.lbq_page = NULL;
 		} else {
 			/*
 			 * The headers and data are in a single large buffer. We
 			 * copy it to a new skb and let it go. This can happen with
 			 * jumbo mtu on a non-TCP/UDP frame.
 			 */
-			lbq_desc = ql_get_curr_lbuf(rx_ring);
+			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
 			skb = netdev_alloc_skb(qdev->ndev, length);
 			if (skb == NULL) {
 				QPRINTK(qdev, PROBE, DEBUG,
@@ -1515,13 +1575,14 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
 			skb_reserve(skb, NET_IP_ALIGN);
 			QPRINTK(qdev, RX_STATUS, DEBUG,
 				"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
-			skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
-					   0, length);
+			skb_fill_page_desc(skb, 0,
+						lbq_desc->p.pg_chunk.page,
+						lbq_desc->p.pg_chunk.offset,
+						length);
 			skb->len += length;
 			skb->data_len += length;
 			skb->truesize += length;
 			length -= length;
-			lbq_desc->p.lbq_page = NULL;
 			__pskb_pull_tail(skb,
 				(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
 				VLAN_ETH_HLEN : ETH_HLEN);
@@ -1538,8 +1599,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
 		 *         frames.  If the MTU goes up we could
 		 *          eventually be in trouble.
 		 */
-		int size, offset, i = 0;
-		__le64 *bq, bq_array[8];
+		int size, i = 0;
 		sbq_desc = ql_get_curr_sbuf(rx_ring);
 		pci_unmap_single(qdev->pdev,
 				 pci_unmap_addr(sbq_desc, mapaddr),
@@ -1558,37 +1618,25 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
 			QPRINTK(qdev, RX_STATUS, DEBUG,
 				"%d bytes of headers & data in chain of large.\n", length);
 			skb = sbq_desc->p.skb;
-			bq = &bq_array[0];
-			memcpy(bq, skb->data, sizeof(bq_array));
 			sbq_desc->p.skb = NULL;
 			skb_reserve(skb, NET_IP_ALIGN);
-		} else {
-			QPRINTK(qdev, RX_STATUS, DEBUG,
-				"Headers in small, %d bytes of data in chain of large.\n", length);
-			bq = (__le64 *)sbq_desc->p.skb->data;
 		}
 		while (length > 0) {
-			lbq_desc = ql_get_curr_lbuf(rx_ring);
-			pci_unmap_page(qdev->pdev,
-				       pci_unmap_addr(lbq_desc,
-						      mapaddr),
-				       pci_unmap_len(lbq_desc,
-						     maplen),
-				       PCI_DMA_FROMDEVICE);
-			size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
-			offset = 0;
+			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+			size = (length < rx_ring->lbq_buf_size) ? length :
+				rx_ring->lbq_buf_size;
 
 			QPRINTK(qdev, RX_STATUS, DEBUG,
 				"Adding page %d to skb for %d bytes.\n",
 				i, size);
-			skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page,
-					   offset, size);
+			skb_fill_page_desc(skb, i,
+						lbq_desc->p.pg_chunk.page,
+						lbq_desc->p.pg_chunk.offset,
+						size);
 			skb->len += size;
 			skb->data_len += size;
 			skb->truesize += size;
 			length -= size;
-			lbq_desc->p.lbq_page = NULL;
-			bq++;
 			i++;
 		}
 		__pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
@@ -1613,6 +1661,7 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
 	if (unlikely(!skb)) {
 		QPRINTK(qdev, RX_STATUS, DEBUG,
 			"No skb available, drop packet.\n");
+		rx_ring->rx_dropped++;
 		return;
 	}
 
@@ -1621,6 +1670,7 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
 		QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
 					ib_mac_rsp->flags2);
 		dev_kfree_skb_any(skb);
+		rx_ring->rx_errors++;
 		return;
 	}
 
@@ -1629,6 +1679,14 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
 	 */
 	if (skb->len > ndev->mtu + ETH_HLEN) {
 		dev_kfree_skb_any(skb);
+		rx_ring->rx_dropped++;
+		return;
+	}
+
+	/* loopback self test for ethtool */
+	if (test_bit(QL_SELFTEST, &qdev->flags)) {
+		ql_check_lb_frame(qdev, skb);
+		dev_kfree_skb_any(skb);
 		return;
 	}
 
@@ -1642,6 +1700,7 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
 			IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
 			(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
 			IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+		rx_ring->rx_multicast++;
 	}
 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
 		QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
@@ -1673,8 +1732,8 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
 		}
 	}
 
-	qdev->stats.rx_packets++;
-	qdev->stats.rx_bytes += skb->len;
+	rx_ring->rx_packets++;
+	rx_ring->rx_bytes += skb->len;
 	skb_record_rx_queue(skb, rx_ring->cq_id);
 	if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
 		if (qdev->vlgrp &&
@@ -1705,8 +1764,8 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
 	tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
 	tx_ring_desc = &tx_ring->q[mac_rsp->tid];
 	ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
-	qdev->stats.tx_bytes += (tx_ring_desc->skb)->len;
-	qdev->stats.tx_packets++;
+	tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
+	tx_ring->tx_packets++;
 	dev_kfree_skb(tx_ring_desc->skb);
 	tx_ring_desc->skb = NULL;
 
@@ -1929,7 +1988,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
 	return work_done;
 }
 
-static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
+static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
 {
 	struct ql_adapter *qdev = netdev_priv(ndev);
 
@@ -1945,7 +2004,7 @@ static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
 	}
 }
 
-static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
+static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
 {
 	struct ql_adapter *qdev = netdev_priv(ndev);
 	u32 enable_bit = MAC_ADDR_E;
@@ -1961,7 +2020,7 @@ static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
 }
 
-static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
+static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
 {
 	struct ql_adapter *qdev = netdev_priv(ndev);
 	u32 enable_bit = 0;
@@ -2046,12 +2105,12 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
 	 */
 	var = ql_read32(qdev, ISR1);
 	if (var & intr_context->irq_mask) {
-				QPRINTK(qdev, INTR, INFO,
+		QPRINTK(qdev, INTR, INFO,
 			"Waking handler for rx_ring[0].\n");
 		ql_disable_completion_interrupt(qdev, intr_context->intr);
-					napi_schedule(&rx_ring->napi);
-				work_done++;
-			}
+		napi_schedule(&rx_ring->napi);
+		work_done++;
+	}
 	ql_enable_completion_interrupt(qdev, intr_context->intr);
 	return work_done ? IRQ_HANDLED : IRQ_NONE;
 }
@@ -2149,6 +2208,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
 			__func__, tx_ring_idx);
 		netif_stop_subqueue(ndev, tx_ring->wq_id);
 		atomic_inc(&tx_ring->queue_stopped);
+		tx_ring->tx_errors++;
 		return NETDEV_TX_BUSY;
 	}
 	tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
@@ -2183,6 +2243,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
 			NETDEV_TX_OK) {
 		QPRINTK(qdev, TX_QUEUED, ERR,
 				"Could not map the segments.\n");
+		tx_ring->tx_errors++;
 		return NETDEV_TX_BUSY;
 	}
 	QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
@@ -2199,6 +2260,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
 	return NETDEV_TX_OK;
 }
 
+
 static void ql_free_shadow_space(struct ql_adapter *qdev)
 {
 	if (qdev->rx_ring_shadow_reg_area) {
@@ -2285,8 +2347,8 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
 	    pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
 				 &tx_ring->wq_base_dma);
 
-	if ((tx_ring->wq_base == NULL)
-		|| tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
+	if ((tx_ring->wq_base == NULL) ||
+	    tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
 		QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
 		return -ENOMEM;
 	}
@@ -2304,20 +2366,29 @@ err:
 
 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
 {
-	int i;
 	struct bq_desc *lbq_desc;
 
-	for (i = 0; i < rx_ring->lbq_len; i++) {
-		lbq_desc = &rx_ring->lbq[i];
-		if (lbq_desc->p.lbq_page) {
+	uint32_t  curr_idx, clean_idx;
+
+	curr_idx = rx_ring->lbq_curr_idx;
+	clean_idx = rx_ring->lbq_clean_idx;
+	while (curr_idx != clean_idx) {
+		lbq_desc = &rx_ring->lbq[curr_idx];
+
+		if (lbq_desc->p.pg_chunk.last_flag) {
 			pci_unmap_page(qdev->pdev,
-				       pci_unmap_addr(lbq_desc, mapaddr),
-				       pci_unmap_len(lbq_desc, maplen),
+				lbq_desc->p.pg_chunk.map,
+				ql_lbq_block_size(qdev),
 				       PCI_DMA_FROMDEVICE);
-
-			put_page(lbq_desc->p.lbq_page);
-			lbq_desc->p.lbq_page = NULL;
+			lbq_desc->p.pg_chunk.last_flag = 0;
 		}
+
+		put_page(lbq_desc->p.pg_chunk.page);
+		lbq_desc->p.pg_chunk.page = NULL;
+
+		if (++curr_idx == rx_ring->lbq_len)
+			curr_idx = 0;
+
 	}
 }
 
@@ -2615,6 +2686,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
 	/* Set up the shadow registers for this ring. */
 	rx_ring->prod_idx_sh_reg = shadow_reg;
 	rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
+	*rx_ring->prod_idx_sh_reg = 0;
 	shadow_reg += sizeof(u64);
 	shadow_reg_dma += sizeof(u64);
 	rx_ring->lbq_base_indirect = shadow_reg;
@@ -2692,7 +2764,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
 		cqicb->sbq_addr =
 		    cpu_to_le64(rx_ring->sbq_base_indirect_dma);
 		cqicb->sbq_buf_size =
-		    cpu_to_le16((u16)(rx_ring->sbq_buf_size/2));
+		    cpu_to_le16((u16)(rx_ring->sbq_buf_size));
 		bq_len = (rx_ring->sbq_len == 65536) ? 0 :
 			(u16) rx_ring->sbq_len;
 		cqicb->sbq_len = cpu_to_le16(bq_len);
@@ -2798,7 +2870,7 @@ static void ql_enable_msix(struct ql_adapter *qdev)
 	int i, err;
 
 	/* Get the MSIX vectors. */
-	if (irq_type == MSIX_IRQ) {
+	if (qlge_irq_type == MSIX_IRQ) {
 		/* Try to alloc space for the msix struct,
 		 * if it fails then go to MSI/legacy.
 		 */
@@ -2806,7 +2878,7 @@ static void ql_enable_msix(struct ql_adapter *qdev)
 					    sizeof(struct msix_entry),
 					    GFP_KERNEL);
 		if (!qdev->msi_x_entry) {
-			irq_type = MSI_IRQ;
+			qlge_irq_type = MSI_IRQ;
 			goto msi;
 		}
 
@@ -2829,7 +2901,7 @@ static void ql_enable_msix(struct ql_adapter *qdev)
 			QPRINTK(qdev, IFUP, WARNING,
 				"MSI-X Enable failed, trying MSI.\n");
 			qdev->intr_count = 1;
-			irq_type = MSI_IRQ;
+			qlge_irq_type = MSI_IRQ;
 		} else if (err == 0) {
 			set_bit(QL_MSIX_ENABLED, &qdev->flags);
 			QPRINTK(qdev, IFUP, INFO,
@@ -2840,7 +2912,7 @@ static void ql_enable_msix(struct ql_adapter *qdev)
 	}
 msi:
 	qdev->intr_count = 1;
-	if (irq_type == MSI_IRQ) {
+	if (qlge_irq_type == MSI_IRQ) {
 		if (!pci_enable_msi(qdev->pdev)) {
 			set_bit(QL_MSI_ENABLED, &qdev->flags);
 			QPRINTK(qdev, IFUP, INFO,
@@ -2848,7 +2920,7 @@ msi:
 			return;
 		}
 	}
-	irq_type = LEG_IRQ;
+	qlge_irq_type = LEG_IRQ;
 	QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
 }
 
@@ -3268,7 +3340,7 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
 	ql_write32(qdev, FSC, mask | value);
 
 	ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
-		min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
+		min(SMALL_BUF_MAP_SIZE, MAX_SPLIT_SIZE));
 
 	/* Set RX packet routing to use port/pci function on which the
 	 * packet arrived on in addition to usual frame routing.
@@ -3276,6 +3348,22 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
 	 * the same MAC address.
 	 */
 	ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
+	/* Reroute all packets to our Interface.
+	 * They may have been routed to MPI firmware
+	 * due to WOL.
+	 */
+	value = ql_read32(qdev, MGMT_RCV_CFG);
+	value &= ~MGMT_RCV_CFG_RM;
+	mask = 0xffff0000;
+
+	/* Sticky reg needs clearing due to WOL. */
+	ql_write32(qdev, MGMT_RCV_CFG, mask);
+	ql_write32(qdev, MGMT_RCV_CFG, mask | value);
+
+	/* Default WOL is enable on Mezz cards */
+	if (qdev->pdev->subsystem_device == 0x0068 ||
+			qdev->pdev->subsystem_device == 0x0180)
+		qdev->wol = WAKE_MAGIC;
 
 	/* Start up the rx queues. */
 	for (i = 0; i < qdev->rx_ring_count; i++) {
@@ -3310,10 +3398,8 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
 
 	/* Initialize the port and set the max framesize. */
 	status = qdev->nic_ops->port_initialize(qdev);
-       if (status) {
-              QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
-              return status;
-       }
+	if (status)
+		QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
 
 	/* Set up the MAC address and frame routing filter. */
 	status = ql_cam_route_initialize(qdev);
@@ -3392,6 +3478,52 @@ static void ql_display_dev_info(struct net_device *ndev)
 	QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
 }
 
+int ql_wol(struct ql_adapter *qdev)
+{
+	int status = 0;
+	u32 wol = MB_WOL_DISABLE;
+
+	/* The CAM is still intact after a reset, but if we
+	 * are doing WOL, then we may need to program the
+	 * routing regs. We would also need to issue the mailbox
+	 * commands to instruct the MPI what to do per the ethtool
+	 * settings.
+	 */
+
+	if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
+			WAKE_MCAST | WAKE_BCAST)) {
+		QPRINTK(qdev, IFDOWN, ERR,
+			"Unsupported WOL paramter. qdev->wol = 0x%x.\n",
+			qdev->wol);
+		return -EINVAL;
+	}
+
+	if (qdev->wol & WAKE_MAGIC) {
+		status = ql_mb_wol_set_magic(qdev, 1);
+		if (status) {
+			QPRINTK(qdev, IFDOWN, ERR,
+				"Failed to set magic packet on %s.\n",
+				qdev->ndev->name);
+			return status;
+		} else
+			QPRINTK(qdev, DRV, INFO,
+				"Enabled magic packet successfully on %s.\n",
+				qdev->ndev->name);
+
+		wol |= MB_WOL_MAGIC_PKT;
+	}
+
+	if (qdev->wol) {
+		wol |= MB_WOL_MODE_ON;
+		status = ql_mb_wol_mode(qdev, wol);
+		QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
+			(status == 0) ? "Sucessfully set" : "Failed", wol,
+			qdev->ndev->name);
+	}
+
+	return status;
+}
+
 static int ql_adapter_down(struct ql_adapter *qdev)
 {
 	int i, status = 0;
@@ -3497,6 +3629,10 @@ static int ql_configure_rings(struct ql_adapter *qdev)
 	struct rx_ring *rx_ring;
 	struct tx_ring *tx_ring;
 	int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
+	unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
+		LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
+
+	qdev->lbq_buf_order = get_order(lbq_buf_len);
 
 	/* In a perfect world we have one RSS ring for each CPU
 	 * and each has it's own vector.  To do that we ask for
@@ -3544,11 +3680,14 @@ static int ql_configure_rings(struct ql_adapter *qdev)
 			rx_ring->lbq_len = NUM_LARGE_BUFFERS;
 			rx_ring->lbq_size =
 			    rx_ring->lbq_len * sizeof(__le64);
-			rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
+			rx_ring->lbq_buf_size = (u16)lbq_buf_len;
+			QPRINTK(qdev, IFUP, DEBUG,
+				"lbq_buf_size %d, order = %d\n",
+				rx_ring->lbq_buf_size, qdev->lbq_buf_order);
 			rx_ring->sbq_len = NUM_SMALL_BUFFERS;
 			rx_ring->sbq_size =
 			    rx_ring->sbq_len * sizeof(__le64);
-			rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
+			rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
 			rx_ring->type = RX_Q;
 		} else {
 			/*
@@ -3575,6 +3714,10 @@ static int qlge_open(struct net_device *ndev)
 	int err = 0;
 	struct ql_adapter *qdev = netdev_priv(ndev);
 
+	err = ql_adapter_reset(qdev);
+	if (err)
+		return err;
+
 	err = ql_configure_rings(qdev);
 	if (err)
 		return err;
@@ -3594,14 +3737,63 @@ error_up:
 	return err;
 }
 
+static int ql_change_rx_buffers(struct ql_adapter *qdev)
+{
+	struct rx_ring *rx_ring;
+	int i, status;
+	u32 lbq_buf_len;
+
+	/* Wait for an oustanding reset to complete. */
+	if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
+		int i = 3;
+		while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
+			QPRINTK(qdev, IFUP, ERR,
+				 "Waiting for adapter UP...\n");
+			ssleep(1);
+		}
+
+		if (!i) {
+			QPRINTK(qdev, IFUP, ERR,
+			 "Timed out waiting for adapter UP\n");
+			return -ETIMEDOUT;
+		}
+	}
+
+	status = ql_adapter_down(qdev);
+	if (status)
+		goto error;
+
+	/* Get the new rx buffer size. */
+	lbq_buf_len = (qdev->ndev->mtu > 1500) ?
+		LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
+	qdev->lbq_buf_order = get_order(lbq_buf_len);
+
+	for (i = 0; i < qdev->rss_ring_count; i++) {
+		rx_ring = &qdev->rx_ring[i];
+		/* Set the new size. */
+		rx_ring->lbq_buf_size = lbq_buf_len;
+	}
+
+	status = ql_adapter_up(qdev);
+	if (status)
+		goto error;
+
+	return status;
+error:
+	QPRINTK(qdev, IFUP, ALERT,
+		"Driver up/down cycle failed, closing device.\n");
+	set_bit(QL_ADAPTER_UP, &qdev->flags);
+	dev_close(qdev->ndev);
+	return status;
+}
+
 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
 {
 	struct ql_adapter *qdev = netdev_priv(ndev);
+	int status;
 
 	if (ndev->mtu == 1500 && new_mtu == 9000) {
 		QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
-		queue_delayed_work(qdev->workqueue,
-				&qdev->mpi_port_cfg_work, 0);
 	} else if (ndev->mtu == 9000 && new_mtu == 1500) {
 		QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
 	} else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
@@ -3609,15 +3801,60 @@ static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
 		return 0;
 	} else
 		return -EINVAL;
+
+	queue_delayed_work(qdev->workqueue,
+			&qdev->mpi_port_cfg_work, 3*HZ);
+
+	if (!netif_running(qdev->ndev)) {
+		ndev->mtu = new_mtu;
+		return 0;
+	}
+
 	ndev->mtu = new_mtu;
-	return 0;
+	status = ql_change_rx_buffers(qdev);
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR,
+			"Changing MTU failed.\n");
+	}
+
+	return status;
 }
 
 static struct net_device_stats *qlge_get_stats(struct net_device
 					       *ndev)
 {
 	struct ql_adapter *qdev = netdev_priv(ndev);
-	return &qdev->stats;
+	struct rx_ring *rx_ring = &qdev->rx_ring[0];
+	struct tx_ring *tx_ring = &qdev->tx_ring[0];
+	unsigned long pkts, mcast, dropped, errors, bytes;
+	int i;
+
+	/* Get RX stats. */
+	pkts = mcast = dropped = errors = bytes = 0;
+	for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
+			pkts += rx_ring->rx_packets;
+			bytes += rx_ring->rx_bytes;
+			dropped += rx_ring->rx_dropped;
+			errors += rx_ring->rx_errors;
+			mcast += rx_ring->rx_multicast;
+	}
+	ndev->stats.rx_packets = pkts;
+	ndev->stats.rx_bytes = bytes;
+	ndev->stats.rx_dropped = dropped;
+	ndev->stats.rx_errors = errors;
+	ndev->stats.multicast = mcast;
+
+	/* Get TX stats. */
+	pkts = errors = bytes = 0;
+	for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
+			pkts += tx_ring->tx_packets;
+			bytes += tx_ring->tx_bytes;
+			errors += tx_ring->tx_errors;
+	}
+	ndev->stats.tx_packets = pkts;
+	ndev->stats.tx_bytes = bytes;
+	ndev->stats.tx_errors = errors;
+	return &ndev->stats;
 }
 
 static void qlge_set_multicast_list(struct net_device *ndev)
@@ -3714,9 +3951,6 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
 	struct sockaddr *addr = p;
 	int status;
 
-	if (netif_running(ndev))
-		return -EBUSY;
-
 	if (!is_valid_ether_addr(addr->sa_data))
 		return -EADDRNOTAVAIL;
 	memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
@@ -3868,8 +4102,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
 				    struct net_device *ndev, int cards_found)
 {
 	struct ql_adapter *qdev = netdev_priv(ndev);
-	int pos, err = 0;
-	u16 val16;
+	int err = 0;
 
 	memset((void *)qdev, 0, sizeof(*qdev));
 	err = pci_enable_device(pdev);
@@ -3881,18 +4114,12 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
 	qdev->ndev = ndev;
 	qdev->pdev = pdev;
 	pci_set_drvdata(pdev, ndev);
-	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
-	if (pos <= 0) {
-		dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
-			"aborting.\n");
-		return pos;
-	} else {
-		pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
-		val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
-		val16 |= (PCI_EXP_DEVCTL_CERE |
-			  PCI_EXP_DEVCTL_NFERE |
-			  PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
-		pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
+
+	/* Set PCIe read request size */
+	err = pcie_set_readrq(pdev, 4096);
+	if (err) {
+		dev_err(&pdev->dev, "Set readrq failed.\n");
+		goto err_out;
 	}
 
 	err = pci_request_regions(pdev, DRV_NAME);
@@ -3991,7 +4218,6 @@ err_out:
 	return err;
 }
 
-
 static const struct net_device_ops qlge_netdev_ops = {
 	.ndo_open		= qlge_open,
 	.ndo_stop		= qlge_close,
@@ -4002,9 +4228,9 @@ static const struct net_device_ops qlge_netdev_ops = {
 	.ndo_set_mac_address	= qlge_set_mac_address,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_tx_timeout		= qlge_tx_timeout,
-	.ndo_vlan_rx_register	= ql_vlan_rx_register,
-	.ndo_vlan_rx_add_vid	= ql_vlan_rx_add_vid,
-	.ndo_vlan_rx_kill_vid	= ql_vlan_rx_kill_vid,
+	.ndo_vlan_rx_register	= qlge_vlan_rx_register,
+	.ndo_vlan_rx_add_vid	= qlge_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid	= qlge_vlan_rx_kill_vid,
 };
 
 static int __devinit qlge_probe(struct pci_dev *pdev,
@@ -4060,10 +4286,21 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
 	}
 	ql_link_off(qdev);
 	ql_display_dev_info(ndev);
+	atomic_set(&qdev->lb_count, 0);
 	cards_found++;
 	return 0;
 }
 
+netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
+{
+	return qlge_send(skb, ndev);
+}
+
+int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
+{
+	return ql_clean_inbound_rx_ring(rx_ring, budget);
+}
+
 static void __devexit qlge_remove(struct pci_dev *pdev)
 {
 	struct net_device *ndev = pci_get_drvdata(pdev);
@@ -4193,6 +4430,7 @@ static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
 			return err;
 	}
 
+	ql_wol(qdev);
 	err = pci_save_state(pdev);
 	if (err)
 		return err;
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index aec05f266107..e2b2286102d4 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -1,25 +1,5 @@
 #include "qlge.h"
 
-static void ql_display_mb_sts(struct ql_adapter *qdev,
-						struct mbox_params *mbcp)
-{
-	int i;
-	static char *err_sts[] = {
-		"Command Complete",
-		"Command Not Supported",
-		"Host Interface Error",
-		"Checksum Error",
-		"Unused Completion Status",
-		"Test Failed",
-		"Command Parameter Error"};
-
-	QPRINTK(qdev, DRV, DEBUG, "%s.\n",
-		err_sts[mbcp->mbox_out[0] & 0x0000000f]);
-	for (i = 0; i < mbcp->out_count; i++)
-		QPRINTK(qdev, DRV, DEBUG, "mbox_out[%d] = 0x%.08x.\n",
-				i, mbcp->mbox_out[i]);
-}
-
 int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
 {
 	int status;
@@ -317,6 +297,7 @@ static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
 	} else {
 		QPRINTK(qdev, DRV, ERR, "Firmware Revision  = 0x%.08x.\n",
 			mbcp->mbox_out[1]);
+		qdev->fw_rev_id = mbcp->mbox_out[1];
 		status = ql_cam_route_initialize(qdev);
 		if (status)
 			QPRINTK(qdev, IFUP, ERR,
@@ -446,6 +427,9 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
 		ql_aen_lost(qdev, mbcp);
 		break;
 
+	case AEN_DCBX_CHG:
+		/* Need to support AEN 8110 */
+		break;
 	default:
 		QPRINTK(qdev, DRV, ERR,
 			"Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
@@ -537,7 +521,6 @@ done:
 					MB_CMD_STS_GOOD) &&
 		((mbcp->mbox_out[0] & 0x0000f000) !=
 					MB_CMD_STS_INTRMDT)) {
-		ql_display_mb_sts(qdev, mbcp);
 		status = -EIO;
 	}
 end:
@@ -655,7 +638,7 @@ int ql_mb_idc_ack(struct ql_adapter *qdev)
  * for the current port.
  * Most likely will block.
  */
-static int ql_mb_set_port_cfg(struct ql_adapter *qdev)
+int ql_mb_set_port_cfg(struct ql_adapter *qdev)
 {
 	struct mbox_params mbc;
 	struct mbox_params *mbcp = &mbc;
@@ -690,7 +673,7 @@ static int ql_mb_set_port_cfg(struct ql_adapter *qdev)
  * for the current port.
  * Most likely will block.
  */
-static int ql_mb_get_port_cfg(struct ql_adapter *qdev)
+int ql_mb_get_port_cfg(struct ql_adapter *qdev)
 {
 	struct mbox_params mbc;
 	struct mbox_params *mbcp = &mbc;
@@ -720,6 +703,76 @@ static int ql_mb_get_port_cfg(struct ql_adapter *qdev)
 	return status;
 }
 
+int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
+{
+	struct mbox_params mbc;
+	struct mbox_params *mbcp = &mbc;
+	int status;
+
+	memset(mbcp, 0, sizeof(struct mbox_params));
+
+	mbcp->in_count = 2;
+	mbcp->out_count = 1;
+
+	mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE;
+	mbcp->mbox_in[1] = wol;
+
+
+	status = ql_mailbox_command(qdev, mbcp);
+	if (status)
+		return status;
+
+	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+		QPRINTK(qdev, DRV, ERR,
+			"Failed to set WOL mode.\n");
+		status = -EIO;
+	}
+	return status;
+}
+
+int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
+{
+	struct mbox_params mbc;
+	struct mbox_params *mbcp = &mbc;
+	int status;
+	u8 *addr = qdev->ndev->dev_addr;
+
+	memset(mbcp, 0, sizeof(struct mbox_params));
+
+	mbcp->in_count = 8;
+	mbcp->out_count = 1;
+
+	mbcp->mbox_in[0] = MB_CMD_SET_WOL_MAGIC;
+	if (enable_wol) {
+		mbcp->mbox_in[1] = (u32)addr[0];
+		mbcp->mbox_in[2] = (u32)addr[1];
+		mbcp->mbox_in[3] = (u32)addr[2];
+		mbcp->mbox_in[4] = (u32)addr[3];
+		mbcp->mbox_in[5] = (u32)addr[4];
+		mbcp->mbox_in[6] = (u32)addr[5];
+		mbcp->mbox_in[7] = 0;
+	} else {
+		mbcp->mbox_in[1] = 0;
+		mbcp->mbox_in[2] = 1;
+		mbcp->mbox_in[3] = 1;
+		mbcp->mbox_in[4] = 1;
+		mbcp->mbox_in[5] = 1;
+		mbcp->mbox_in[6] = 1;
+		mbcp->mbox_in[7] = 0;
+	}
+
+	status = ql_mailbox_command(qdev, mbcp);
+	if (status)
+		return status;
+
+	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+		QPRINTK(qdev, DRV, ERR,
+			"Failed to set WOL mode.\n");
+		status = -EIO;
+	}
+	return status;
+}
+
 /* IDC - Inter Device Communication...
  * Some firmware commands require consent of adjacent FCOE
  * function.  This function waits for the OK, or a
@@ -769,6 +822,61 @@ static int ql_idc_wait(struct ql_adapter *qdev)
 	return status;
 }
 
+int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
+{
+	struct mbox_params mbc;
+	struct mbox_params *mbcp = &mbc;
+	int status;
+
+	memset(mbcp, 0, sizeof(struct mbox_params));
+
+	mbcp->in_count = 2;
+	mbcp->out_count = 1;
+
+	mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG;
+	mbcp->mbox_in[1] = led_config;
+
+
+	status = ql_mailbox_command(qdev, mbcp);
+	if (status)
+		return status;
+
+	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+		QPRINTK(qdev, DRV, ERR,
+			"Failed to set LED Configuration.\n");
+		status = -EIO;
+	}
+
+	return status;
+}
+
+int ql_mb_get_led_cfg(struct ql_adapter *qdev)
+{
+	struct mbox_params mbc;
+	struct mbox_params *mbcp = &mbc;
+	int status;
+
+	memset(mbcp, 0, sizeof(struct mbox_params));
+
+	mbcp->in_count = 1;
+	mbcp->out_count = 2;
+
+	mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG;
+
+	status = ql_mailbox_command(qdev, mbcp);
+	if (status)
+		return status;
+
+	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+		QPRINTK(qdev, DRV, ERR,
+			"Failed to get LED Configuration.\n");
+		status = -EIO;
+	} else
+		qdev->led_config = mbcp->mbox_out[1];
+
+	return status;
+}
+
 int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
 {
 	struct mbox_params mbc;
@@ -930,8 +1038,11 @@ void ql_mpi_idc_work(struct work_struct *work)
 	int status;
 	struct mbox_params *mbcp = &qdev->idc_mbc;
 	u32 aen;
+	int timeout;
 
+	rtnl_lock();
 	aen = mbcp->mbox_out[1] >> 16;
+	timeout = (mbcp->mbox_out[1] >> 8) & 0xf;
 
 	switch (aen) {
 	default:
@@ -939,22 +1050,61 @@ void ql_mpi_idc_work(struct work_struct *work)
 			"Bug: Unhandled IDC action.\n");
 		break;
 	case MB_CMD_PORT_RESET:
-	case MB_CMD_SET_PORT_CFG:
 	case MB_CMD_STOP_FW:
 		ql_link_off(qdev);
+	case MB_CMD_SET_PORT_CFG:
 		/* Signal the resulting link up AEN
 		 * that the frame routing and mac addr
 		 * needs to be set.
 		 * */
 		set_bit(QL_CAM_RT_SET, &qdev->flags);
-		rtnl_lock();
-		status = ql_mb_idc_ack(qdev);
-		rtnl_unlock();
-		if (status) {
-			QPRINTK(qdev, DRV, ERR,
-			"Bug: No pending IDC!\n");
+		/* Do ACK if required */
+		if (timeout) {
+			status = ql_mb_idc_ack(qdev);
+			if (status)
+				QPRINTK(qdev, DRV, ERR,
+					"Bug: No pending IDC!\n");
+		} else {
+			QPRINTK(qdev, DRV, DEBUG,
+				    "IDC ACK not required\n");
+			status = 0; /* success */
 		}
+		break;
+
+	/* These sub-commands issued by another (FCoE)
+	 * function are requesting to do an operation
+	 * on the shared resource (MPI environment).
+	 * We currently don't issue these so we just
+	 * ACK the request.
+	 */
+	case MB_CMD_IOP_RESTART_MPI:
+	case MB_CMD_IOP_PREP_LINK_DOWN:
+		/* Drop the link, reload the routing
+		 * table when link comes up.
+		 */
+		ql_link_off(qdev);
+		set_bit(QL_CAM_RT_SET, &qdev->flags);
+		/* Fall through. */
+	case MB_CMD_IOP_DVR_START:
+	case MB_CMD_IOP_FLASH_ACC:
+	case MB_CMD_IOP_CORE_DUMP_MPI:
+	case MB_CMD_IOP_PREP_UPDATE_MPI:
+	case MB_CMD_IOP_COMP_UPDATE_MPI:
+	case MB_CMD_IOP_NONE:	/*  an IDC without params */
+		/* Do ACK if required */
+		if (timeout) {
+			status = ql_mb_idc_ack(qdev);
+			if (status)
+				QPRINTK(qdev, DRV, ERR,
+				    "Bug: No pending IDC!\n");
+		} else {
+			QPRINTK(qdev, DRV, DEBUG,
+			    "IDC ACK not required\n");
+			status = 0; /* success */
+		}
+		break;
 	}
+	rtnl_unlock();
 }
 
 void ql_mpi_work(struct work_struct *work)
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 8b14c6eda7c3..f03e2e4a15a8 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -842,7 +842,7 @@ static int r6040_open(struct net_device *dev)
 	int ret;
 
 	/* Request IRQ and Register interrupt handler */
-	ret = request_irq(dev->irq, &r6040_interrupt,
+	ret = request_irq(dev->irq, r6040_interrupt,
 		IRQF_SHARED, dev->name, dev);
 	if (ret)
 		return ret;
@@ -958,8 +958,7 @@ static void r6040_multicast_list(struct net_device *dev)
 	}
 	/* Too many multicast addresses
 	 * accept all traffic */
-	else if ((dev->mc_count > MCAST_MAX)
-		|| (dev->flags & IFF_ALLMULTI))
+	else if ((dev->mc_count > MCAST_MAX) || (dev->flags & IFF_ALLMULTI))
 		reg |= 0x0020;
 
 	iowrite16(reg, ioaddr);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 0fe2fc90f207..acfc5a3aa490 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -794,7 +794,7 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 	struct rtl8169_private *tp = netdev_priv(dev);
 	void __iomem *ioaddr = tp->mmio_addr;
 	unsigned int i;
-	static struct {
+	static const struct {
 		u32 opt;
 		u16 reg;
 		u8  mask;
@@ -1277,7 +1277,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
 	 *
 	 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
 	 */
-	const struct {
+	static const struct {
 		u32 mask;
 		u32 val;
 		int mac_version;
@@ -1351,7 +1351,7 @@ struct phy_reg {
 	u16 val;
 };
 
-static void rtl_phy_write(void __iomem *ioaddr, struct phy_reg *regs, int len)
+static void rtl_phy_write(void __iomem *ioaddr, const struct phy_reg *regs, int len)
 {
 	while (len-- > 0) {
 		mdio_write(ioaddr, regs->reg, regs->val);
@@ -1361,7 +1361,7 @@ static void rtl_phy_write(void __iomem *ioaddr, struct phy_reg *regs, int len)
 
 static void rtl8169s_hw_phy_config(void __iomem *ioaddr)
 {
-	struct phy_reg phy_reg_init[] = {
+	static const struct phy_reg phy_reg_init[] = {
 		{ 0x1f, 0x0001 },
 		{ 0x06, 0x006e },
 		{ 0x08, 0x0708 },
@@ -1428,7 +1428,7 @@ static void rtl8169s_hw_phy_config(void __iomem *ioaddr)
 
 static void rtl8169sb_hw_phy_config(void __iomem *ioaddr)
 {
-	struct phy_reg phy_reg_init[] = {
+	static const struct phy_reg phy_reg_init[] = {
 		{ 0x1f, 0x0002 },
 		{ 0x01, 0x90d0 },
 		{ 0x1f, 0x0000 }
@@ -1457,7 +1457,7 @@ static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp,
 static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp,
 				     void __iomem *ioaddr)
 {
-	struct phy_reg phy_reg_init[] = {
+	static const struct phy_reg phy_reg_init[] = {
 		{ 0x1f, 0x0001 },
 		{ 0x04, 0x0000 },
 		{ 0x03, 0x00a1 },
@@ -1504,7 +1504,7 @@ static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp,
 
 static void rtl8169sce_hw_phy_config(void __iomem *ioaddr)
 {
-	struct phy_reg phy_reg_init[] = {
+	static const struct phy_reg phy_reg_init[] = {
 		{ 0x1f, 0x0001 },
 		{ 0x04, 0x0000 },
 		{ 0x03, 0x00a1 },
@@ -1557,7 +1557,7 @@ static void rtl8169sce_hw_phy_config(void __iomem *ioaddr)
 
 static void rtl8168bb_hw_phy_config(void __iomem *ioaddr)
 {
-	struct phy_reg phy_reg_init[] = {
+	static const struct phy_reg phy_reg_init[] = {
 		{ 0x10, 0xf41b },
 		{ 0x1f, 0x0000 }
 	};
@@ -1570,7 +1570,7 @@ static void rtl8168bb_hw_phy_config(void __iomem *ioaddr)
 
 static void rtl8168bef_hw_phy_config(void __iomem *ioaddr)
 {
-	struct phy_reg phy_reg_init[] = {
+	static const struct phy_reg phy_reg_init[] = {
 		{ 0x1f, 0x0001 },
 		{ 0x10, 0xf41b },
 		{ 0x1f, 0x0000 }
@@ -1581,7 +1581,7 @@ static void rtl8168bef_hw_phy_config(void __iomem *ioaddr)
 
 static void rtl8168cp_1_hw_phy_config(void __iomem *ioaddr)
 {
-	struct phy_reg phy_reg_init[] = {
+	static const struct phy_reg phy_reg_init[] = {
 		{ 0x1f, 0x0000 },
 		{ 0x1d, 0x0f00 },
 		{ 0x1f, 0x0002 },
@@ -1594,7 +1594,7 @@ static void rtl8168cp_1_hw_phy_config(void __iomem *ioaddr)
 
 static void rtl8168cp_2_hw_phy_config(void __iomem *ioaddr)
 {
-	struct phy_reg phy_reg_init[] = {
+	static const struct phy_reg phy_reg_init[] = {
 		{ 0x1f, 0x0001 },
 		{ 0x1d, 0x3d98 },
 		{ 0x1f, 0x0000 }
@@ -1609,7 +1609,7 @@ static void rtl8168cp_2_hw_phy_config(void __iomem *ioaddr)
 
 static void rtl8168c_1_hw_phy_config(void __iomem *ioaddr)
 {
-	struct phy_reg phy_reg_init[] = {
+	static const struct phy_reg phy_reg_init[] = {
 		{ 0x1f, 0x0001 },
 		{ 0x12, 0x2300 },
 		{ 0x1f, 0x0002 },
@@ -1638,7 +1638,7 @@ static void rtl8168c_1_hw_phy_config(void __iomem *ioaddr)
 
 static void rtl8168c_2_hw_phy_config(void __iomem *ioaddr)
 {
-	struct phy_reg phy_reg_init[] = {
+	static const struct phy_reg phy_reg_init[] = {
 		{ 0x1f, 0x0001 },
 		{ 0x12, 0x2300 },
 		{ 0x03, 0x802f },
@@ -1666,7 +1666,7 @@ static void rtl8168c_2_hw_phy_config(void __iomem *ioaddr)
 
 static void rtl8168c_3_hw_phy_config(void __iomem *ioaddr)
 {
-	struct phy_reg phy_reg_init[] = {
+	static const struct phy_reg phy_reg_init[] = {
 		{ 0x1f, 0x0001 },
 		{ 0x12, 0x2300 },
 		{ 0x1d, 0x3d98 },
@@ -1693,7 +1693,7 @@ static void rtl8168c_4_hw_phy_config(void __iomem *ioaddr)
 
 static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr)
 {
-	static struct phy_reg phy_reg_init_0[] = {
+	static const struct phy_reg phy_reg_init_0[] = {
 		{ 0x1f, 0x0001 },
 		{ 0x06, 0x4064 },
 		{ 0x07, 0x2863 },
@@ -1712,14 +1712,14 @@ static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr)
 		{ 0x1a, 0x05ad },
 		{ 0x14, 0x94c0 }
 	};
-	static struct phy_reg phy_reg_init_1[] = {
+	static const struct phy_reg phy_reg_init_1[] = {
 		{ 0x1f, 0x0002 },
 		{ 0x06, 0x5561 },
 		{ 0x1f, 0x0005 },
 		{ 0x05, 0x8332 },
 		{ 0x06, 0x5561 }
 	};
-	static struct phy_reg phy_reg_init_2[] = {
+	static const struct phy_reg phy_reg_init_2[] = {
 		{ 0x1f, 0x0005 },
 		{ 0x05, 0xffc2 },
 		{ 0x1f, 0x0005 },
@@ -2084,7 +2084,7 @@ static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr)
 	rtl_phy_write(ioaddr, phy_reg_init_1, ARRAY_SIZE(phy_reg_init_1));
 
 	if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
-		struct phy_reg phy_reg_init[] = {
+		static const struct phy_reg phy_reg_init[] = {
 			{ 0x1f, 0x0002 },
 			{ 0x05, 0x669a },
 			{ 0x1f, 0x0005 },
@@ -2099,7 +2099,7 @@ static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr)
 		val = mdio_read(ioaddr, 0x0d);
 
 		if ((val & 0x00ff) != 0x006c) {
-			u32 set[] = {
+			static const u32 set[] = {
 				0x0065, 0x0066, 0x0067, 0x0068,
 				0x0069, 0x006a, 0x006b, 0x006c
 			};
@@ -2112,7 +2112,7 @@ static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr)
 				mdio_write(ioaddr, 0x0d, val | set[i]);
 		}
 	} else {
-		struct phy_reg phy_reg_init[] = {
+		static const struct phy_reg phy_reg_init[] = {
 			{ 0x1f, 0x0002 },
 			{ 0x05, 0x6662 },
 			{ 0x1f, 0x0005 },
@@ -2136,7 +2136,7 @@ static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr)
 
 static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr)
 {
-	static struct phy_reg phy_reg_init_0[] = {
+	static const struct phy_reg phy_reg_init_0[] = {
 		{ 0x1f, 0x0001 },
 		{ 0x06, 0x4064 },
 		{ 0x07, 0x2863 },
@@ -2161,7 +2161,7 @@ static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr)
 		{ 0x05, 0x8332 },
 		{ 0x06, 0x5561 }
 	};
-	static struct phy_reg phy_reg_init_1[] = {
+	static const struct phy_reg phy_reg_init_1[] = {
 		{ 0x1f, 0x0005 },
 		{ 0x05, 0xffc2 },
 		{ 0x1f, 0x0005 },
@@ -2477,7 +2477,7 @@ static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr)
 	rtl_phy_write(ioaddr, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
 
 	if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
-		struct phy_reg phy_reg_init[] = {
+		static const struct phy_reg phy_reg_init[] = {
 			{ 0x1f, 0x0002 },
 			{ 0x05, 0x669a },
 			{ 0x1f, 0x0005 },
@@ -2505,7 +2505,7 @@ static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr)
 				mdio_write(ioaddr, 0x0d, val | set[i]);
 		}
 	} else {
-		struct phy_reg phy_reg_init[] = {
+		static const struct phy_reg phy_reg_init[] = {
 			{ 0x1f, 0x0002 },
 			{ 0x05, 0x2642 },
 			{ 0x1f, 0x0005 },
@@ -2531,7 +2531,7 @@ static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr)
 
 static void rtl8168d_3_hw_phy_config(void __iomem *ioaddr)
 {
-	struct phy_reg phy_reg_init[] = {
+	static const struct phy_reg phy_reg_init[] = {
 		{ 0x1f, 0x0002 },
 		{ 0x10, 0x0008 },
 		{ 0x0d, 0x006c },
@@ -2592,7 +2592,7 @@ static void rtl8168d_3_hw_phy_config(void __iomem *ioaddr)
 
 static void rtl8102e_hw_phy_config(void __iomem *ioaddr)
 {
-	struct phy_reg phy_reg_init[] = {
+	static const struct phy_reg phy_reg_init[] = {
 		{ 0x1f, 0x0003 },
 		{ 0x08, 0x441d },
 		{ 0x01, 0x9100 },
@@ -3388,7 +3388,7 @@ static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
 
 static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
 {
-	struct {
+	static const struct {
 		u32 mac_version;
 		u32 clk;
 		u32 val;
@@ -3512,7 +3512,7 @@ struct ephy_info {
 	u16 bits;
 };
 
-static void rtl_ephy_init(void __iomem *ioaddr, struct ephy_info *e, int len)
+static void rtl_ephy_init(void __iomem *ioaddr, const struct ephy_info *e, int len)
 {
 	u16 w;
 
@@ -3583,7 +3583,7 @@ static void __rtl_hw_start_8168cp(void __iomem *ioaddr, struct pci_dev *pdev)
 
 static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev)
 {
-	static struct ephy_info e_info_8168cp[] = {
+	static const struct ephy_info e_info_8168cp[] = {
 		{ 0x01, 0,	0x0001 },
 		{ 0x02, 0x0800,	0x1000 },
 		{ 0x03, 0,	0x0042 },
@@ -3627,7 +3627,7 @@ static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev)
 
 static void rtl_hw_start_8168c_1(void __iomem *ioaddr, struct pci_dev *pdev)
 {
-	static struct ephy_info e_info_8168c_1[] = {
+	static const struct ephy_info e_info_8168c_1[] = {
 		{ 0x02, 0x0800,	0x1000 },
 		{ 0x03, 0,	0x0002 },
 		{ 0x06, 0x0080,	0x0000 }
@@ -3644,7 +3644,7 @@ static void rtl_hw_start_8168c_1(void __iomem *ioaddr, struct pci_dev *pdev)
 
 static void rtl_hw_start_8168c_2(void __iomem *ioaddr, struct pci_dev *pdev)
 {
-	static struct ephy_info e_info_8168c_2[] = {
+	static const struct ephy_info e_info_8168c_2[] = {
 		{ 0x01, 0,	0x0001 },
 		{ 0x03, 0x0400,	0x0220 }
 	};
@@ -3787,7 +3787,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
 
 static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
 {
-	static struct ephy_info e_info_8102e_1[] = {
+	static const struct ephy_info e_info_8102e_1[] = {
 		{ 0x01,	0, 0x6e65 },
 		{ 0x02,	0, 0x091f },
 		{ 0x03,	0, 0xc2f9 },
@@ -4447,13 +4447,12 @@ static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff,
 	if (pkt_size >= rx_copybreak)
 		goto out;
 
-	skb = netdev_alloc_skb(tp->dev, pkt_size + NET_IP_ALIGN);
+	skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
 	if (!skb)
 		goto out;
 
 	pci_dma_sync_single_for_cpu(tp->pci_dev, addr, pkt_size,
 				    PCI_DMA_FROMDEVICE);
-	skb_reserve(skb, NET_IP_ALIGN);
 	skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size);
 	*sk_buff = skb;
 	done = true;
@@ -4764,8 +4763,8 @@ static void rtl_set_rx_mode(struct net_device *dev)
 		    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
 		    AcceptAllPhys;
 		mc_filter[1] = mc_filter[0] = 0xffffffff;
-	} else if ((dev->mc_count > multicast_filter_limit)
-		   || (dev->flags & IFF_ALLMULTI)) {
+	} else if ((dev->mc_count > multicast_filter_limit) ||
+		   (dev->flags & IFF_ALLMULTI)) {
 		/* Too many to filter perfectly -- accept all multicasts. */
 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
 		mc_filter[1] = mc_filter[0] = 0xffffffff;
diff --git a/drivers/net/s6gmac.c b/drivers/net/s6gmac.c
index 4525cbe8dd69..45f26344b368 100644
--- a/drivers/net/s6gmac.c
+++ b/drivers/net/s6gmac.c
@@ -373,9 +373,9 @@ struct s6gmac {
 static void s6gmac_rx_fillfifo(struct s6gmac *pd)
 {
 	struct sk_buff *skb;
-	while ((((u8)(pd->rx_skb_i - pd->rx_skb_o)) < S6_NUM_RX_SKB)
-			&& (!s6dmac_fifo_full(pd->rx_dma, pd->rx_chan))
-			&& (skb = dev_alloc_skb(S6_MAX_FRLEN + 2))) {
+	while ((((u8)(pd->rx_skb_i - pd->rx_skb_o)) < S6_NUM_RX_SKB) &&
+	       (!s6dmac_fifo_full(pd->rx_dma, pd->rx_chan)) &&
+	       (skb = dev_alloc_skb(S6_MAX_FRLEN + 2))) {
 		pd->rx_skb[(pd->rx_skb_i++) % S6_NUM_RX_SKB] = skb;
 		s6dmac_put_fifo_cache(pd->rx_dma, pd->rx_chan,
 			pd->io, (u32)skb->data, S6_MAX_FRLEN);
@@ -984,7 +984,7 @@ static int __devinit s6gmac_probe(struct platform_device *pdev)
 	pd->rx_dma = DMA_MASK_DMAC(i);
 	pd->rx_chan = DMA_INDEX_CHNL(i);
 	pd->io = platform_get_resource(pdev, IORESOURCE_IO, 0)->start;
-	res = request_irq(dev->irq, &s6gmac_interrupt, 0, dev->name, dev);
+	res = request_irq(dev->irq, s6gmac_interrupt, 0, dev->name, dev);
 	if (res) {
 		printk(KERN_ERR DRV_PRMT "irq request failed: %d\n", dev->irq);
 		goto errirq;
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index c9c70ab0cce0..9f83a1197375 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -973,7 +973,7 @@ sb1000_open(struct net_device *dev)
 	lp->rx_frame_id[1] = 0;
 	lp->rx_frame_id[2] = 0;
 	lp->rx_frame_id[3] = 0;
-	if (request_irq(dev->irq, &sb1000_interrupt, 0, "sb1000", dev)) {
+	if (request_irq(dev->irq, sb1000_interrupt, 0, "sb1000", dev)) {
 		return -EAGAIN;
 	}
 
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 508551f1b3fc..564d4d7f855b 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -1476,7 +1476,6 @@ static void sbmac_channel_start(struct sbmac_softc *s)
 		V_MAC_TX_RL_THRSH(4) |
 		V_MAC_RX_PL_THRSH(4) |
 		V_MAC_RX_RD_THRSH(4) |	/* Must be '4' */
-		V_MAC_RX_PL_THRSH(4) |
 		V_MAC_RX_RL_THRSH(8) |
 		0;
 
@@ -2411,7 +2410,7 @@ static int sbmac_open(struct net_device *dev)
 	 */
 
 	__raw_readq(sc->sbm_isr);
-	err = request_irq(dev->irq, &sbmac_intr, IRQF_SHARED, dev->name, dev);
+	err = request_irq(dev->irq, sbmac_intr, IRQF_SHARED, dev->name, dev);
 	if (err) {
 		printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name,
 		       dev->irq);
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 8d6030022d14..e35050322f97 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -428,9 +428,9 @@ static void _sc92031_set_mar(struct net_device *dev)
 	void __iomem *port_base = priv->port_base;
 	u32 mar0 = 0, mar1 = 0;
 
-	if ((dev->flags & IFF_PROMISC)
-			|| dev->mc_count > multicast_filter_limit
-			|| (dev->flags & IFF_ALLMULTI))
+	if ((dev->flags & IFF_PROMISC) ||
+	    dev->mc_count > multicast_filter_limit ||
+	    (dev->flags & IFF_ALLMULTI))
 		mar0 = mar1 = 0xffffffff;
 	else if (dev->flags & IFF_MULTICAST) {
 		struct dev_mc_list *mc_list;
@@ -777,10 +777,10 @@ static void _sc92031_rx_tasklet(struct net_device *dev)
 
 		rx_ring_offset = (rx_ring_offset + 4) % RX_BUF_LEN;
 
-		if (unlikely(rx_status == 0
-				|| rx_size > (MAX_ETH_FRAME_SIZE + 4)
-				|| rx_size < 16
-				|| !(rx_status & RxStatesOK))) {
+		if (unlikely(rx_status == 0 ||
+			     rx_size > (MAX_ETH_FRAME_SIZE + 4) ||
+			     rx_size < 16 ||
+			     !(rx_status & RxStatesOK))) {
 			_sc92031_rx_tasklet_error(dev, rx_status, rx_size);
 			break;
 		}
@@ -793,7 +793,7 @@ static void _sc92031_rx_tasklet(struct net_device *dev)
 
 		rx_len -= rx_size_align + 4;
 
-		skb = netdev_alloc_skb(dev, pkt_size + NET_IP_ALIGN);
+		skb = netdev_alloc_skb_ip_align(dev, pkt_size);
 		if (unlikely(!skb)) {
 			if (printk_ratelimit())
 				printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n",
@@ -801,8 +801,6 @@ static void _sc92031_rx_tasklet(struct net_device *dev)
 			goto next;
 		}
 
-		skb_reserve(skb, NET_IP_ALIGN);
-
 		if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) {
 			memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset),
 				rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset);
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c
index 39246d457ac2..fe806bd9b95f 100644
--- a/drivers/net/seeq8005.c
+++ b/drivers/net/seeq8005.c
@@ -335,7 +335,7 @@ static int __init seeq8005_probe1(struct net_device *dev, int ioaddr)
 
 #if 0
 	{
-		 int irqval = request_irq(dev->irq, &seeq8005_interrupt, 0, "seeq8005", dev);
+		 int irqval = request_irq(dev->irq, seeq8005_interrupt, 0, "seeq8005", dev);
 		 if (irqval) {
 			 printk ("%s: unable to get IRQ %d (irqval=%d).\n", dev->name,
 					 dev->irq, irqval);
@@ -367,7 +367,7 @@ static int seeq8005_open(struct net_device *dev)
 	struct net_local *lp = netdev_priv(dev);
 
 	{
-		 int irqval = request_irq(dev->irq, &seeq8005_interrupt, 0, "seeq8005", dev);
+		 int irqval = request_irq(dev->irq, seeq8005_interrupt, 0, "seeq8005", dev);
 		 if (irqval) {
 			 printk ("%s: unable to get IRQ %d (irqval=%d).\n", dev->name,
 					 dev->irq, irqval);
diff --git a/drivers/net/sfc/Kconfig b/drivers/net/sfc/Kconfig
index 260aafaac235..a65c98638398 100644
--- a/drivers/net/sfc/Kconfig
+++ b/drivers/net/sfc/Kconfig
@@ -1,5 +1,5 @@
 config SFC
-	tristate "Solarflare Solarstorm SFC4000 support"
+	tristate "Solarflare Solarstorm SFC4000/SFC9000-family support"
 	depends on PCI && INET
 	select MDIO
 	select CRC32
@@ -7,15 +7,16 @@ config SFC
 	select I2C_ALGOBIT
 	help
 	  This driver supports 10-gigabit Ethernet cards based on
-	  the Solarflare Communications Solarstorm SFC4000 controller.
+	  the Solarflare Communications Solarstorm SFC4000 and
+	  SFC9000-family controllers.
 
 	  To compile this driver as a module, choose M here.  The module
 	  will be called sfc.
 config SFC_MTD
-	bool "Solarflare Solarstorm SFC4000 flash MTD support"
+	bool "Solarflare Solarstorm SFC4000/SFC9000-family MTD support"
 	depends on SFC && MTD && !(SFC=y && MTD=m)
 	default y
 	help
-	  This exposes the on-board flash memory as an MTD device (e.g.
-          /dev/mtd1).  This makes it possible to upload new boot code
-          to the NIC.
+	  This exposes the on-board flash memory as MTD devices (e.g.
+	  /dev/mtd1).  This makes it possible to upload new firmware
+	  to the NIC.
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
index b89f9be3cb13..1047b19c60a5 100644
--- a/drivers/net/sfc/Makefile
+++ b/drivers/net/sfc/Makefile
@@ -1,6 +1,7 @@
-sfc-y			+= efx.o falcon.o tx.o rx.o falcon_gmac.o \
-			   falcon_xmac.o selftest.o ethtool.o xfp_phy.o \
-			   mdio_10g.o tenxpress.o boards.o sfe4001.o
+sfc-y			+= efx.o nic.o falcon.o siena.o tx.o rx.o \
+			   falcon_gmac.o falcon_xmac.o mcdi_mac.o \
+			   selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
+			   tenxpress.o falcon_boards.o mcdi.o mcdi_phy.o
 sfc-$(CONFIG_SFC_MTD)	+= mtd.o
 
 obj-$(CONFIG_SFC)	+= sfc.o
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h
index d54d84c267b9..098ac2ad757d 100644
--- a/drivers/net/sfc/bitfield.h
+++ b/drivers/net/sfc/bitfield.h
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2008 Solarflare Communications Inc.
+ * Copyright 2006-2009 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -37,6 +37,8 @@
 #define EFX_DWORD_2_WIDTH 32
 #define EFX_DWORD_3_LBN 96
 #define EFX_DWORD_3_WIDTH 32
+#define EFX_QWORD_0_LBN 0
+#define EFX_QWORD_0_WIDTH 64
 
 /* Specified attribute (e.g. LBN) of the specified field */
 #define EFX_VAL(field, attribute) field ## _ ## attribute
@@ -520,19 +522,6 @@ typedef union efx_oword {
 #define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32
 #endif
 
-#define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \
-	if (falcon_rev(efx) >= FALCON_REV_B0) {			   \
-		EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \
-	} else { \
-		EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \
-	} \
-} while (0)
-
-#define EFX_QWORD_FIELD_VER(efx, qword, field)	\
-	(falcon_rev(efx) >= FALCON_REV_B0 ?	\
-	 EFX_QWORD_FIELD((qword), field##_B0) :	\
-	 EFX_QWORD_FIELD((qword), field##_A1))
-
 /* Used to avoid compiler warnings about shift range exceeding width
  * of the data types when dma_addr_t is only 32 bits wide.
  */
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c
deleted file mode 100644
index 4a4c74c891b7..000000000000
--- a/drivers/net/sfc/boards.c
+++ /dev/null
@@ -1,328 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2008 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#include "net_driver.h"
-#include "phy.h"
-#include "boards.h"
-#include "efx.h"
-#include "workarounds.h"
-
-/* Macros for unpacking the board revision */
-/* The revision info is in host byte order. */
-#define BOARD_TYPE(_rev) (_rev >> 8)
-#define BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf)
-#define BOARD_MINOR(_rev) (_rev & 0xf)
-
-/* Blink support. If the PHY has no auto-blink mode so we hang it off a timer */
-#define BLINK_INTERVAL (HZ/2)
-
-static void blink_led_timer(unsigned long context)
-{
-	struct efx_nic *efx = (struct efx_nic *)context;
-	struct efx_blinker *bl = &efx->board_info.blinker;
-	efx->board_info.set_id_led(efx, bl->state);
-	bl->state = !bl->state;
-	if (bl->resubmit)
-		mod_timer(&bl->timer, jiffies + BLINK_INTERVAL);
-}
-
-static void board_blink(struct efx_nic *efx, bool blink)
-{
-	struct efx_blinker *blinker = &efx->board_info.blinker;
-
-	/* The rtnl mutex serialises all ethtool ioctls, so
-	 * nothing special needs doing here. */
-	if (blink) {
-		blinker->resubmit = true;
-		blinker->state = false;
-		setup_timer(&blinker->timer, blink_led_timer,
-			    (unsigned long)efx);
-		mod_timer(&blinker->timer, jiffies + BLINK_INTERVAL);
-	} else {
-		blinker->resubmit = false;
-		if (blinker->timer.function)
-			del_timer_sync(&blinker->timer);
-		efx->board_info.init_leds(efx);
-	}
-}
-
-/*****************************************************************************
- * Support for LM87 sensor chip used on several boards
- */
-#define LM87_REG_ALARMS1		0x41
-#define LM87_REG_ALARMS2		0x42
-#define LM87_IN_LIMITS(nr, _min, _max)			\
-	0x2B + (nr) * 2, _max, 0x2C + (nr) * 2, _min
-#define LM87_AIN_LIMITS(nr, _min, _max)			\
-	0x3B + (nr), _max, 0x1A + (nr), _min
-#define LM87_TEMP_INT_LIMITS(_min, _max)		\
-	0x39, _max, 0x3A, _min
-#define LM87_TEMP_EXT1_LIMITS(_min, _max)		\
-	0x37, _max, 0x38, _min
-
-#define LM87_ALARM_TEMP_INT		0x10
-#define LM87_ALARM_TEMP_EXT1		0x20
-
-#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
-
-static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
-			 const u8 *reg_values)
-{
-	struct i2c_client *client = i2c_new_device(&efx->i2c_adap, info);
-	int rc;
-
-	if (!client)
-		return -EIO;
-
-	while (*reg_values) {
-		u8 reg = *reg_values++;
-		u8 value = *reg_values++;
-		rc = i2c_smbus_write_byte_data(client, reg, value);
-		if (rc)
-			goto err;
-	}
-
-	efx->board_info.hwmon_client = client;
-	return 0;
-
-err:
-	i2c_unregister_device(client);
-	return rc;
-}
-
-static void efx_fini_lm87(struct efx_nic *efx)
-{
-	i2c_unregister_device(efx->board_info.hwmon_client);
-}
-
-static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
-{
-	struct i2c_client *client = efx->board_info.hwmon_client;
-	s32 alarms1, alarms2;
-
-	/* If link is up then do not monitor temperature */
-	if (EFX_WORKAROUND_7884(efx) && efx->link_up)
-		return 0;
-
-	alarms1 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
-	alarms2 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
-	if (alarms1 < 0)
-		return alarms1;
-	if (alarms2 < 0)
-		return alarms2;
-	alarms1 &= mask;
-	alarms2 &= mask >> 8;
-	if (alarms1 || alarms2) {
-		EFX_ERR(efx,
-			"LM87 detected a hardware failure (status %02x:%02x)"
-			"%s%s\n",
-			alarms1, alarms2,
-			(alarms1 & LM87_ALARM_TEMP_INT) ? " INTERNAL" : "",
-			(alarms1 & LM87_ALARM_TEMP_EXT1) ? " EXTERNAL" : "");
-		return -ERANGE;
-	}
-
-	return 0;
-}
-
-#else /* !CONFIG_SENSORS_LM87 */
-
-static inline int
-efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
-	      const u8 *reg_values)
-{
-	return 0;
-}
-static inline void efx_fini_lm87(struct efx_nic *efx)
-{
-}
-static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask)
-{
-	return 0;
-}
-
-#endif /* CONFIG_SENSORS_LM87 */
-
-/*****************************************************************************
- * Support for the SFE4002
- *
- */
-static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */
-
-static const u8 sfe4002_lm87_regs[] = {
-	LM87_IN_LIMITS(0, 0x83, 0x91),		/* 2.5V:  1.8V +/- 5% */
-	LM87_IN_LIMITS(1, 0x51, 0x5a),		/* Vccp1: 1.2V +/- 5% */
-	LM87_IN_LIMITS(2, 0xb6, 0xca),		/* 3.3V:  3.3V +/- 5% */
-	LM87_IN_LIMITS(3, 0xb0, 0xc9),		/* 5V:    4.6-5.2V */
-	LM87_IN_LIMITS(4, 0xb0, 0xe0),		/* 12V:   11-14V */
-	LM87_IN_LIMITS(5, 0x44, 0x4b),		/* Vccp2: 1.0V +/- 5% */
-	LM87_AIN_LIMITS(0, 0xa0, 0xb2),		/* AIN1:  1.66V +/- 5% */
-	LM87_AIN_LIMITS(1, 0x91, 0xa1),		/* AIN2:  1.5V +/- 5% */
-	LM87_TEMP_INT_LIMITS(10, 60),		/* board */
-	LM87_TEMP_EXT1_LIMITS(10, 70),		/* Falcon */
-	0
-};
-
-static struct i2c_board_info sfe4002_hwmon_info = {
-	I2C_BOARD_INFO("lm87", 0x2e),
-	.platform_data	= &sfe4002_lm87_channel,
-};
-
-/****************************************************************************/
-/* LED allocations. Note that on rev A0 boards the schematic and the reality
- * differ: red and green are swapped. Below is the fixed (A1) layout (there
- * are only 3 A0 boards in existence, so no real reason to make this
- * conditional).
- */
-#define SFE4002_FAULT_LED (2)	/* Red */
-#define SFE4002_RX_LED    (0)	/* Green */
-#define SFE4002_TX_LED    (1)	/* Amber */
-
-static void sfe4002_init_leds(struct efx_nic *efx)
-{
-	/* Set the TX and RX LEDs to reflect status and activity, and the
-	 * fault LED off */
-	xfp_set_led(efx, SFE4002_TX_LED,
-		    QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT);
-	xfp_set_led(efx, SFE4002_RX_LED,
-		    QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT);
-	xfp_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF);
-}
-
-static void sfe4002_set_id_led(struct efx_nic *efx, bool state)
-{
-	xfp_set_led(efx, SFE4002_FAULT_LED, state ? QUAKE_LED_ON :
-			QUAKE_LED_OFF);
-}
-
-static int sfe4002_check_hw(struct efx_nic *efx)
-{
-	/* A0 board rev. 4002s report a temperature fault the whole time
-	 * (bad sensor) so we mask it out. */
-	unsigned alarm_mask =
-		(efx->board_info.major == 0 && efx->board_info.minor == 0) ?
-		~LM87_ALARM_TEMP_EXT1 : ~0;
-
-	return efx_check_lm87(efx, alarm_mask);
-}
-
-static int sfe4002_init(struct efx_nic *efx)
-{
-	int rc = efx_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs);
-	if (rc)
-		return rc;
-	efx->board_info.monitor = sfe4002_check_hw;
-	efx->board_info.init_leds = sfe4002_init_leds;
-	efx->board_info.set_id_led = sfe4002_set_id_led;
-	efx->board_info.blink = board_blink;
-	efx->board_info.fini = efx_fini_lm87;
-	return 0;
-}
-
-/*****************************************************************************
- * Support for the SFN4112F
- *
- */
-static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */
-
-static const u8 sfn4112f_lm87_regs[] = {
-	LM87_IN_LIMITS(0, 0x83, 0x91),		/* 2.5V:  1.8V +/- 5% */
-	LM87_IN_LIMITS(1, 0x51, 0x5a),		/* Vccp1: 1.2V +/- 5% */
-	LM87_IN_LIMITS(2, 0xb6, 0xca),		/* 3.3V:  3.3V +/- 5% */
-	LM87_IN_LIMITS(4, 0xb0, 0xe0),		/* 12V:   11-14V */
-	LM87_IN_LIMITS(5, 0x44, 0x4b),		/* Vccp2: 1.0V +/- 5% */
-	LM87_AIN_LIMITS(1, 0x91, 0xa1),		/* AIN2:  1.5V +/- 5% */
-	LM87_TEMP_INT_LIMITS(10, 60),		/* board */
-	LM87_TEMP_EXT1_LIMITS(10, 70),		/* Falcon */
-	0
-};
-
-static struct i2c_board_info sfn4112f_hwmon_info = {
-	I2C_BOARD_INFO("lm87", 0x2e),
-	.platform_data	= &sfn4112f_lm87_channel,
-};
-
-#define SFN4112F_ACT_LED	0
-#define SFN4112F_LINK_LED	1
-
-static void sfn4112f_init_leds(struct efx_nic *efx)
-{
-	xfp_set_led(efx, SFN4112F_ACT_LED,
-		    QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACT);
-	xfp_set_led(efx, SFN4112F_LINK_LED,
-		    QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT);
-}
-
-static void sfn4112f_set_id_led(struct efx_nic *efx, bool state)
-{
-	xfp_set_led(efx, SFN4112F_LINK_LED,
-		    state ? QUAKE_LED_ON : QUAKE_LED_OFF);
-}
-
-static int sfn4112f_check_hw(struct efx_nic *efx)
-{
-	/* Mask out unused sensors */
-	return efx_check_lm87(efx, ~0x48);
-}
-
-static int sfn4112f_init(struct efx_nic *efx)
-{
-	int rc = efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs);
-	if (rc)
-		return rc;
-	efx->board_info.monitor = sfn4112f_check_hw;
-	efx->board_info.init_leds = sfn4112f_init_leds;
-	efx->board_info.set_id_led = sfn4112f_set_id_led;
-	efx->board_info.blink = board_blink;
-	efx->board_info.fini = efx_fini_lm87;
-	return 0;
-}
-
-/* This will get expanded as board-specific details get moved out of the
- * PHY drivers. */
-struct efx_board_data {
-	enum efx_board_type type;
-	const char *ref_model;
-	const char *gen_type;
-	int (*init) (struct efx_nic *nic);
-};
-
-
-static struct efx_board_data board_data[] = {
-	{ EFX_BOARD_SFE4001, "SFE4001", "10GBASE-T adapter", sfe4001_init },
-	{ EFX_BOARD_SFE4002, "SFE4002", "XFP adapter", sfe4002_init },
-	{ EFX_BOARD_SFN4111T, "SFN4111T", "100/1000/10GBASE-T adapter",
-	  sfn4111t_init },
-	{ EFX_BOARD_SFN4112F, "SFN4112F", "SFP+ adapter",
-	  sfn4112f_init },
-};
-
-void efx_set_board_info(struct efx_nic *efx, u16 revision_info)
-{
-	struct efx_board_data *data = NULL;
-	int i;
-
-	efx->board_info.type = BOARD_TYPE(revision_info);
-	efx->board_info.major = BOARD_MAJOR(revision_info);
-	efx->board_info.minor = BOARD_MINOR(revision_info);
-
-	for (i = 0; i < ARRAY_SIZE(board_data); i++)
-		if (board_data[i].type == efx->board_info.type)
-			data = &board_data[i];
-
-	if (data) {
-		EFX_INFO(efx, "board is %s rev %c%d\n",
-			 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
-			 ? data->ref_model : data->gen_type,
-			 'A' + efx->board_info.major, efx->board_info.minor);
-		efx->board_info.init = data->init;
-	} else {
-		EFX_ERR(efx, "unknown board type %d\n", efx->board_info.type);
-	}
-}
diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h
deleted file mode 100644
index 44942de0e080..000000000000
--- a/drivers/net/sfc/boards.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2008 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#ifndef EFX_BOARDS_H
-#define EFX_BOARDS_H
-
-/* Board IDs (must fit in 8 bits) */
-enum efx_board_type {
-	EFX_BOARD_SFE4001 = 1,
-	EFX_BOARD_SFE4002 = 2,
-	EFX_BOARD_SFN4111T = 0x51,
-	EFX_BOARD_SFN4112F = 0x52,
-};
-
-extern void efx_set_board_info(struct efx_nic *efx, u16 revision_info);
-
-/* SFE4001 (10GBASE-T) */
-extern int sfe4001_init(struct efx_nic *efx);
-/* SFN4111T (100/1000/10GBASE-T) */
-extern int sfn4111t_init(struct efx_nic *efx);
-
-#endif
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index cc4b2f99989d..f983e3b507cc 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2008 Solarflare Communications Inc.
+ * Copyright 2005-2009 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -21,12 +21,73 @@
 #include <linux/ethtool.h>
 #include <linux/topology.h>
 #include "net_driver.h"
-#include "ethtool.h"
-#include "tx.h"
-#include "rx.h"
 #include "efx.h"
 #include "mdio_10g.h"
-#include "falcon.h"
+#include "nic.h"
+
+#include "mcdi.h"
+
+/**************************************************************************
+ *
+ * Type name strings
+ *
+ **************************************************************************
+ */
+
+/* Loopback mode names (see LOOPBACK_MODE()) */
+const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
+const char *efx_loopback_mode_names[] = {
+	[LOOPBACK_NONE]		= "NONE",
+	[LOOPBACK_DATA]		= "DATAPATH",
+	[LOOPBACK_GMAC]		= "GMAC",
+	[LOOPBACK_XGMII]	= "XGMII",
+	[LOOPBACK_XGXS]		= "XGXS",
+	[LOOPBACK_XAUI]  	= "XAUI",
+	[LOOPBACK_GMII] 	= "GMII",
+	[LOOPBACK_SGMII] 	= "SGMII",
+	[LOOPBACK_XGBR]		= "XGBR",
+	[LOOPBACK_XFI]		= "XFI",
+	[LOOPBACK_XAUI_FAR]	= "XAUI_FAR",
+	[LOOPBACK_GMII_FAR]	= "GMII_FAR",
+	[LOOPBACK_SGMII_FAR]	= "SGMII_FAR",
+	[LOOPBACK_XFI_FAR]	= "XFI_FAR",
+	[LOOPBACK_GPHY]		= "GPHY",
+	[LOOPBACK_PHYXS]	= "PHYXS",
+	[LOOPBACK_PCS]	 	= "PCS",
+	[LOOPBACK_PMAPMD] 	= "PMA/PMD",
+	[LOOPBACK_XPORT]	= "XPORT",
+	[LOOPBACK_XGMII_WS]	= "XGMII_WS",
+	[LOOPBACK_XAUI_WS]  	= "XAUI_WS",
+	[LOOPBACK_XAUI_WS_FAR]  = "XAUI_WS_FAR",
+	[LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
+	[LOOPBACK_GMII_WS] 	= "GMII_WS",
+	[LOOPBACK_XFI_WS]	= "XFI_WS",
+	[LOOPBACK_XFI_WS_FAR]	= "XFI_WS_FAR",
+	[LOOPBACK_PHYXS_WS]  	= "PHYXS_WS",
+};
+
+/* Interrupt mode names (see INT_MODE())) */
+const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX;
+const char *efx_interrupt_mode_names[] = {
+	[EFX_INT_MODE_MSIX]   = "MSI-X",
+	[EFX_INT_MODE_MSI]    = "MSI",
+	[EFX_INT_MODE_LEGACY] = "legacy",
+};
+
+const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
+const char *efx_reset_type_names[] = {
+	[RESET_TYPE_INVISIBLE]     = "INVISIBLE",
+	[RESET_TYPE_ALL]           = "ALL",
+	[RESET_TYPE_WORLD]         = "WORLD",
+	[RESET_TYPE_DISABLE]       = "DISABLE",
+	[RESET_TYPE_TX_WATCHDOG]   = "TX_WATCHDOG",
+	[RESET_TYPE_INT_ERROR]     = "INT_ERROR",
+	[RESET_TYPE_RX_RECOVERY]   = "RX_RECOVERY",
+	[RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
+	[RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
+	[RESET_TYPE_TX_SKIP]       = "TX_SKIP",
+	[RESET_TYPE_MC_FAILURE]    = "MC_FAILURE",
+};
 
 #define EFX_MAX_MTU (9 * 1024)
 
@@ -145,7 +206,8 @@ static void efx_fini_channels(struct efx_nic *efx);
 
 #define EFX_ASSERT_RESET_SERIALISED(efx)		\
 	do {						\
-		if (efx->state == STATE_RUNNING)	\
+		if ((efx->state == STATE_RUNNING) ||	\
+		    (efx->state == STATE_DISABLED))	\
 			ASSERT_RTNL();			\
 	} while (0)
 
@@ -171,7 +233,7 @@ static int efx_process_channel(struct efx_channel *channel, int rx_quota)
 		     !channel->enabled))
 		return 0;
 
-	rx_packets = falcon_process_eventq(channel, rx_quota);
+	rx_packets = efx_nic_process_eventq(channel, rx_quota);
 	if (rx_packets == 0)
 		return 0;
 
@@ -203,7 +265,7 @@ static inline void efx_channel_processed(struct efx_channel *channel)
 	channel->work_pending = false;
 	smp_wmb();
 
-	falcon_eventq_read_ack(channel);
+	efx_nic_eventq_read_ack(channel);
 }
 
 /* NAPI poll handler
@@ -228,26 +290,20 @@ static int efx_poll(struct napi_struct *napi, int budget)
 		if (channel->used_flags & EFX_USED_BY_RX &&
 		    efx->irq_rx_adaptive &&
 		    unlikely(++channel->irq_count == 1000)) {
-			unsigned old_irq_moderation = channel->irq_moderation;
-
 			if (unlikely(channel->irq_mod_score <
 				     irq_adapt_low_thresh)) {
-				channel->irq_moderation =
-					max_t(int,
-					      channel->irq_moderation -
-					      FALCON_IRQ_MOD_RESOLUTION,
-					      FALCON_IRQ_MOD_RESOLUTION);
+				if (channel->irq_moderation > 1) {
+					channel->irq_moderation -= 1;
+					efx->type->push_irq_moderation(channel);
+				}
 			} else if (unlikely(channel->irq_mod_score >
 					    irq_adapt_high_thresh)) {
-				channel->irq_moderation =
-					min(channel->irq_moderation +
-					    FALCON_IRQ_MOD_RESOLUTION,
-					    efx->irq_rx_moderation);
+				if (channel->irq_moderation <
+				    efx->irq_rx_moderation) {
+					channel->irq_moderation += 1;
+					efx->type->push_irq_moderation(channel);
+				}
 			}
-
-			if (channel->irq_moderation != old_irq_moderation)
-				falcon_set_int_moderation(channel);
-
 			channel->irq_count = 0;
 			channel->irq_mod_score = 0;
 		}
@@ -280,7 +336,7 @@ void efx_process_channel_now(struct efx_channel *channel)
 	BUG_ON(!channel->enabled);
 
 	/* Disable interrupts and wait for ISRs to complete */
-	falcon_disable_interrupts(efx);
+	efx_nic_disable_interrupts(efx);
 	if (efx->legacy_irq)
 		synchronize_irq(efx->legacy_irq);
 	if (channel->irq)
@@ -290,14 +346,14 @@ void efx_process_channel_now(struct efx_channel *channel)
 	napi_disable(&channel->napi_str);
 
 	/* Poll the channel */
-	efx_process_channel(channel, efx->type->evq_size);
+	efx_process_channel(channel, EFX_EVQ_SIZE);
 
 	/* Ack the eventq. This may cause an interrupt to be generated
 	 * when they are reenabled */
 	efx_channel_processed(channel);
 
 	napi_enable(&channel->napi_str);
-	falcon_enable_interrupts(efx);
+	efx_nic_enable_interrupts(efx);
 }
 
 /* Create event queue
@@ -309,7 +365,7 @@ static int efx_probe_eventq(struct efx_channel *channel)
 {
 	EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel);
 
-	return falcon_probe_eventq(channel);
+	return efx_nic_probe_eventq(channel);
 }
 
 /* Prepare channel's event queue */
@@ -319,21 +375,21 @@ static void efx_init_eventq(struct efx_channel *channel)
 
 	channel->eventq_read_ptr = 0;
 
-	falcon_init_eventq(channel);
+	efx_nic_init_eventq(channel);
 }
 
 static void efx_fini_eventq(struct efx_channel *channel)
 {
 	EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel);
 
-	falcon_fini_eventq(channel);
+	efx_nic_fini_eventq(channel);
 }
 
 static void efx_remove_eventq(struct efx_channel *channel)
 {
 	EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel);
 
-	falcon_remove_eventq(channel);
+	efx_nic_remove_eventq(channel);
 }
 
 /**************************************************************************
@@ -499,7 +555,7 @@ static void efx_fini_channels(struct efx_nic *efx)
 	EFX_ASSERT_RESET_SERIALISED(efx);
 	BUG_ON(efx->port_enabled);
 
-	rc = falcon_flush_queues(efx);
+	rc = efx_nic_flush_queues(efx);
 	if (rc)
 		EFX_ERR(efx, "failed to flush queues\n");
 	else
@@ -547,8 +603,10 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
  * netif_carrier_on/off) of the link status, and also maintains the
  * link status's stop on the port's TX queue.
  */
-static void efx_link_status_changed(struct efx_nic *efx)
+void efx_link_status_changed(struct efx_nic *efx)
 {
+	struct efx_link_state *link_state = &efx->link_state;
+
 	/* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
 	 * that no events are triggered between unregister_netdev() and the
 	 * driver unloading. A more general condition is that NETDEV_CHANGE
@@ -561,19 +619,19 @@ static void efx_link_status_changed(struct efx_nic *efx)
 		return;
 	}
 
-	if (efx->link_up != netif_carrier_ok(efx->net_dev)) {
+	if (link_state->up != netif_carrier_ok(efx->net_dev)) {
 		efx->n_link_state_changes++;
 
-		if (efx->link_up)
+		if (link_state->up)
 			netif_carrier_on(efx->net_dev);
 		else
 			netif_carrier_off(efx->net_dev);
 	}
 
 	/* Status message for kernel log */
-	if (efx->link_up) {
+	if (link_state->up) {
 		EFX_INFO(efx, "link up at %uMbps %s-duplex (MTU %d)%s\n",
-			 efx->link_speed, efx->link_fd ? "full" : "half",
+			 link_state->speed, link_state->fd ? "full" : "half",
 			 efx->net_dev->mtu,
 			 (efx->promiscuous ? " [PROMISC]" : ""));
 	} else {
@@ -582,16 +640,49 @@ static void efx_link_status_changed(struct efx_nic *efx)
 
 }
 
+void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
+{
+	efx->link_advertising = advertising;
+	if (advertising) {
+		if (advertising & ADVERTISED_Pause)
+			efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
+		else
+			efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
+		if (advertising & ADVERTISED_Asym_Pause)
+			efx->wanted_fc ^= EFX_FC_TX;
+	}
+}
+
+void efx_link_set_wanted_fc(struct efx_nic *efx, enum efx_fc_type wanted_fc)
+{
+	efx->wanted_fc = wanted_fc;
+	if (efx->link_advertising) {
+		if (wanted_fc & EFX_FC_RX)
+			efx->link_advertising |= (ADVERTISED_Pause |
+						  ADVERTISED_Asym_Pause);
+		else
+			efx->link_advertising &= ~(ADVERTISED_Pause |
+						   ADVERTISED_Asym_Pause);
+		if (wanted_fc & EFX_FC_TX)
+			efx->link_advertising ^= ADVERTISED_Asym_Pause;
+	}
+}
+
 static void efx_fini_port(struct efx_nic *efx);
 
-/* This call reinitialises the MAC to pick up new PHY settings. The
- * caller must hold the mac_lock */
-void __efx_reconfigure_port(struct efx_nic *efx)
+/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
+ * the MAC appropriately. All other PHY configuration changes are pushed
+ * through phy_op->set_settings(), and pushed asynchronously to the MAC
+ * through efx_monitor().
+ *
+ * Callers must hold the mac_lock
+ */
+int __efx_reconfigure_port(struct efx_nic *efx)
 {
-	WARN_ON(!mutex_is_locked(&efx->mac_lock));
+	enum efx_phy_mode phy_mode;
+	int rc;
 
-	EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n",
-		raw_smp_processor_id());
+	WARN_ON(!mutex_is_locked(&efx->mac_lock));
 
 	/* Serialise the promiscuous flag with efx_set_multicast_list. */
 	if (efx_dev_registered(efx)) {
@@ -599,61 +690,48 @@ void __efx_reconfigure_port(struct efx_nic *efx)
 		netif_addr_unlock_bh(efx->net_dev);
 	}
 
-	falcon_deconfigure_mac_wrapper(efx);
-
-	/* Reconfigure the PHY, disabling transmit in mac level loopback. */
+	/* Disable PHY transmit in mac level loopbacks */
+	phy_mode = efx->phy_mode;
 	if (LOOPBACK_INTERNAL(efx))
 		efx->phy_mode |= PHY_MODE_TX_DISABLED;
 	else
 		efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
-	efx->phy_op->reconfigure(efx);
 
-	if (falcon_switch_mac(efx))
-		goto fail;
+	rc = efx->type->reconfigure_port(efx);
 
-	efx->mac_op->reconfigure(efx);
-
-	/* Inform kernel of loss/gain of carrier */
-	efx_link_status_changed(efx);
-	return;
+	if (rc)
+		efx->phy_mode = phy_mode;
 
-fail:
-	EFX_ERR(efx, "failed to reconfigure MAC\n");
-	efx->port_enabled = false;
-	efx_fini_port(efx);
+	return rc;
 }
 
 /* Reinitialise the MAC to pick up new PHY settings, even if the port is
  * disabled. */
-void efx_reconfigure_port(struct efx_nic *efx)
+int efx_reconfigure_port(struct efx_nic *efx)
 {
+	int rc;
+
 	EFX_ASSERT_RESET_SERIALISED(efx);
 
 	mutex_lock(&efx->mac_lock);
-	__efx_reconfigure_port(efx);
+	rc = __efx_reconfigure_port(efx);
 	mutex_unlock(&efx->mac_lock);
-}
-
-/* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all()
- * we don't efx_reconfigure_port() if the port is disabled. Care is taken
- * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */
-static void efx_phy_work(struct work_struct *data)
-{
-	struct efx_nic *efx = container_of(data, struct efx_nic, phy_work);
 
-	mutex_lock(&efx->mac_lock);
-	if (efx->port_enabled)
-		__efx_reconfigure_port(efx);
-	mutex_unlock(&efx->mac_lock);
+	return rc;
 }
 
+/* Asynchronous work item for changing MAC promiscuity and multicast
+ * hash.  Avoid a drain/rx_ingress enable by reconfiguring the current
+ * MAC directly. */
 static void efx_mac_work(struct work_struct *data)
 {
 	struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
 
 	mutex_lock(&efx->mac_lock);
-	if (efx->port_enabled)
-		efx->mac_op->irq(efx);
+	if (efx->port_enabled) {
+		efx->type->push_multicast_hash(efx);
+		efx->mac_op->reconfigure(efx);
+	}
 	mutex_unlock(&efx->mac_lock);
 }
 
@@ -663,8 +741,8 @@ static int efx_probe_port(struct efx_nic *efx)
 
 	EFX_LOG(efx, "create port\n");
 
-	/* Connect up MAC/PHY operations table and read MAC address */
-	rc = falcon_probe_port(efx);
+	/* Connect up MAC/PHY operations table */
+	rc = efx->type->probe_port(efx);
 	if (rc)
 		goto err;
 
@@ -699,29 +777,33 @@ static int efx_init_port(struct efx_nic *efx)
 
 	EFX_LOG(efx, "init port\n");
 
-	rc = efx->phy_op->init(efx);
-	if (rc)
-		return rc;
 	mutex_lock(&efx->mac_lock);
-	efx->phy_op->reconfigure(efx);
-	rc = falcon_switch_mac(efx);
-	mutex_unlock(&efx->mac_lock);
+
+	rc = efx->phy_op->init(efx);
 	if (rc)
-		goto fail;
-	efx->mac_op->reconfigure(efx);
+		goto fail1;
 
 	efx->port_initialized = true;
-	efx_stats_enable(efx);
+
+	/* Reconfigure the MAC before creating dma queues (required for
+	 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
+	efx->mac_op->reconfigure(efx);
+
+	/* Ensure the PHY advertises the correct flow control settings */
+	rc = efx->phy_op->reconfigure(efx);
+	if (rc)
+		goto fail2;
+
+	mutex_unlock(&efx->mac_lock);
 	return 0;
 
-fail:
+fail2:
 	efx->phy_op->fini(efx);
+fail1:
+	mutex_unlock(&efx->mac_lock);
 	return rc;
 }
 
-/* Allow efx_reconfigure_port() to be scheduled, and close the window
- * between efx_stop_port and efx_flush_all whereby a previously scheduled
- * efx_phy_work()/efx_mac_work() may have been cancelled */
 static void efx_start_port(struct efx_nic *efx)
 {
 	EFX_LOG(efx, "start port\n");
@@ -729,15 +811,16 @@ static void efx_start_port(struct efx_nic *efx)
 
 	mutex_lock(&efx->mac_lock);
 	efx->port_enabled = true;
-	__efx_reconfigure_port(efx);
-	efx->mac_op->irq(efx);
+
+	/* efx_mac_work() might have been scheduled after efx_stop_port(),
+	 * and then cancelled by efx_flush_all() */
+	efx->type->push_multicast_hash(efx);
+	efx->mac_op->reconfigure(efx);
+
 	mutex_unlock(&efx->mac_lock);
 }
 
-/* Prevent efx_phy_work, efx_mac_work, and efx_monitor() from executing,
- * and efx_set_multicast_list() from scheduling efx_phy_work. efx_phy_work
- * and efx_mac_work may still be scheduled via NAPI processing until
- * efx_flush_all() is called */
+/* Prevent efx_mac_work() and efx_monitor() from working */
 static void efx_stop_port(struct efx_nic *efx)
 {
 	EFX_LOG(efx, "stop port\n");
@@ -760,11 +843,10 @@ static void efx_fini_port(struct efx_nic *efx)
 	if (!efx->port_initialized)
 		return;
 
-	efx_stats_disable(efx);
 	efx->phy_op->fini(efx);
 	efx->port_initialized = false;
 
-	efx->link_up = false;
+	efx->link_state.up = false;
 	efx_link_status_changed(efx);
 }
 
@@ -772,7 +854,7 @@ static void efx_remove_port(struct efx_nic *efx)
 {
 	EFX_LOG(efx, "destroying port\n");
 
-	falcon_remove_port(efx);
+	efx->type->remove_port(efx);
 }
 
 /**************************************************************************
@@ -824,9 +906,8 @@ static int efx_init_io(struct efx_nic *efx)
 		goto fail2;
 	}
 
-	efx->membase_phys = pci_resource_start(efx->pci_dev,
-					       efx->type->mem_bar);
-	rc = pci_request_region(pci_dev, efx->type->mem_bar, "sfc");
+	efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
+	rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
 	if (rc) {
 		EFX_ERR(efx, "request for memory BAR failed\n");
 		rc = -EIO;
@@ -835,21 +916,20 @@ static int efx_init_io(struct efx_nic *efx)
 	efx->membase = ioremap_nocache(efx->membase_phys,
 				       efx->type->mem_map_size);
 	if (!efx->membase) {
-		EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n",
-			efx->type->mem_bar,
+		EFX_ERR(efx, "could not map memory BAR at %llx+%x\n",
 			(unsigned long long)efx->membase_phys,
 			efx->type->mem_map_size);
 		rc = -ENOMEM;
 		goto fail4;
 	}
-	EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n",
-		efx->type->mem_bar, (unsigned long long)efx->membase_phys,
+	EFX_LOG(efx, "memory BAR at %llx+%x (virtual %p)\n",
+		(unsigned long long)efx->membase_phys,
 		efx->type->mem_map_size, efx->membase);
 
 	return 0;
 
  fail4:
-	pci_release_region(efx->pci_dev, efx->type->mem_bar);
+	pci_release_region(efx->pci_dev, EFX_MEM_BAR);
  fail3:
 	efx->membase_phys = 0;
  fail2:
@@ -868,7 +948,7 @@ static void efx_fini_io(struct efx_nic *efx)
 	}
 
 	if (efx->membase_phys) {
-		pci_release_region(efx->pci_dev, efx->type->mem_bar);
+		pci_release_region(efx->pci_dev, EFX_MEM_BAR);
 		efx->membase_phys = 0;
 	}
 
@@ -1011,7 +1091,7 @@ static int efx_probe_nic(struct efx_nic *efx)
 	EFX_LOG(efx, "creating NIC\n");
 
 	/* Carry out hardware-type specific initialisation */
-	rc = falcon_probe_nic(efx);
+	rc = efx->type->probe(efx);
 	if (rc)
 		return rc;
 
@@ -1032,7 +1112,7 @@ static void efx_remove_nic(struct efx_nic *efx)
 	EFX_LOG(efx, "destroying NIC\n");
 
 	efx_remove_interrupts(efx);
-	falcon_remove_nic(efx);
+	efx->type->remove(efx);
 }
 
 /**************************************************************************
@@ -1112,12 +1192,31 @@ static void efx_start_all(struct efx_nic *efx)
 	efx_for_each_channel(channel, efx)
 		efx_start_channel(channel);
 
-	falcon_enable_interrupts(efx);
-
-	/* Start hardware monitor if we're in RUNNING */
-	if (efx->state == STATE_RUNNING)
+	efx_nic_enable_interrupts(efx);
+
+	/* Switch to event based MCDI completions after enabling interrupts.
+	 * If a reset has been scheduled, then we need to stay in polled mode.
+	 * Rather than serialising efx_mcdi_mode_event() [which sleeps] and
+	 * reset_pending [modified from an atomic context], we instead guarantee
+	 * that efx_mcdi_mode_poll() isn't reverted erroneously */
+	efx_mcdi_mode_event(efx);
+	if (efx->reset_pending != RESET_TYPE_NONE)
+		efx_mcdi_mode_poll(efx);
+
+	/* Start the hardware monitor if there is one. Otherwise (we're link
+	 * event driven), we have to poll the PHY because after an event queue
+	 * flush, we could have a missed a link state change */
+	if (efx->type->monitor != NULL) {
 		queue_delayed_work(efx->workqueue, &efx->monitor_work,
 				   efx_monitor_interval);
+	} else {
+		mutex_lock(&efx->mac_lock);
+		if (efx->phy_op->poll(efx))
+			efx_link_status_changed(efx);
+		mutex_unlock(&efx->mac_lock);
+	}
+
+	efx->type->start_stats(efx);
 }
 
 /* Flush all delayed work. Should only be called when no more delayed work
@@ -1136,8 +1235,6 @@ static void efx_flush_all(struct efx_nic *efx)
 
 	/* Stop scheduled port reconfigurations */
 	cancel_work_sync(&efx->mac_work);
-	cancel_work_sync(&efx->phy_work);
-
 }
 
 /* Quiesce hardware and software without bringing the link down.
@@ -1155,8 +1252,13 @@ static void efx_stop_all(struct efx_nic *efx)
 	if (!efx->port_enabled)
 		return;
 
+	efx->type->stop_stats(efx);
+
+	/* Switch to MCDI polling on Siena before disabling interrupts */
+	efx_mcdi_mode_poll(efx);
+
 	/* Disable interrupts and wait for ISR to complete */
-	falcon_disable_interrupts(efx);
+	efx_nic_disable_interrupts(efx);
 	if (efx->legacy_irq)
 		synchronize_irq(efx->legacy_irq);
 	efx_for_each_channel(channel, efx) {
@@ -1173,15 +1275,9 @@ static void efx_stop_all(struct efx_nic *efx)
 	 * window to loose phy events */
 	efx_stop_port(efx);
 
-	/* Flush efx_phy_work, efx_mac_work, refill_workqueue, monitor_work */
+	/* Flush efx_mac_work(), refill_workqueue, monitor_work */
 	efx_flush_all(efx);
 
-	/* Isolate the MAC from the TX and RX engines, so that queue
-	 * flushes will complete in a timely fashion. */
-	falcon_deconfigure_mac_wrapper(efx);
-	msleep(10); /* Let the Rx FIFO drain */
-	falcon_drain_tx_fifo(efx);
-
 	/* Stop the kernel transmit interface late, so the watchdog
 	 * timer isn't ticking over the flush */
 	if (efx_dev_registered(efx)) {
@@ -1201,41 +1297,39 @@ static void efx_remove_all(struct efx_nic *efx)
 	efx_remove_nic(efx);
 }
 
-/* A convinience function to safely flush all the queues */
-void efx_flush_queues(struct efx_nic *efx)
-{
-	EFX_ASSERT_RESET_SERIALISED(efx);
-
-	efx_stop_all(efx);
-
-	efx_fini_channels(efx);
-	efx_init_channels(efx);
-
-	efx_start_all(efx);
-}
-
 /**************************************************************************
  *
  * Interrupt moderation
  *
  **************************************************************************/
 
+static unsigned irq_mod_ticks(int usecs, int resolution)
+{
+	if (usecs <= 0)
+		return 0; /* cannot receive interrupts ahead of time :-) */
+	if (usecs < resolution)
+		return 1; /* never round down to 0 */
+	return usecs / resolution;
+}
+
 /* Set interrupt moderation parameters */
 void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
 			     bool rx_adaptive)
 {
 	struct efx_tx_queue *tx_queue;
 	struct efx_rx_queue *rx_queue;
+	unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION);
+	unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION);
 
 	EFX_ASSERT_RESET_SERIALISED(efx);
 
 	efx_for_each_tx_queue(tx_queue, efx)
-		tx_queue->channel->irq_moderation = tx_usecs;
+		tx_queue->channel->irq_moderation = tx_ticks;
 
 	efx->irq_rx_adaptive = rx_adaptive;
-	efx->irq_rx_moderation = rx_usecs;
+	efx->irq_rx_moderation = rx_ticks;
 	efx_for_each_rx_queue(rx_queue, efx)
-		rx_queue->channel->irq_moderation = rx_usecs;
+		rx_queue->channel->irq_moderation = rx_ticks;
 }
 
 /**************************************************************************
@@ -1250,10 +1344,10 @@ static void efx_monitor(struct work_struct *data)
 {
 	struct efx_nic *efx = container_of(data, struct efx_nic,
 					   monitor_work.work);
-	int rc;
 
 	EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
 		  raw_smp_processor_id());
+	BUG_ON(efx->type->monitor == NULL);
 
 	/* If the mac_lock is already held then it is likely a port
 	 * reconfiguration is already in place, which will likely do
@@ -1262,15 +1356,7 @@ static void efx_monitor(struct work_struct *data)
 		goto out_requeue;
 	if (!efx->port_enabled)
 		goto out_unlock;
-	rc = efx->board_info.monitor(efx);
-	if (rc) {
-		EFX_ERR(efx, "Board sensor %s; shutting down PHY\n",
-			(rc == -ERANGE) ? "reported fault" : "failed");
-		efx->phy_mode |= PHY_MODE_LOW_POWER;
-		falcon_sim_phy_event(efx);
-	}
-	efx->phy_op->poll(efx);
-	efx->mac_op->poll(efx);
+	efx->type->monitor(efx);
 
 out_unlock:
 	mutex_unlock(&efx->mac_lock);
@@ -1374,6 +1460,12 @@ static int efx_net_open(struct net_device *net_dev)
 		return -EIO;
 	if (efx->phy_mode & PHY_MODE_SPECIAL)
 		return -EBUSY;
+	if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
+		return -EIO;
+
+	/* Notify the kernel of the link state polled during driver load,
+	 * before the monitor starts running */
+	efx_link_status_changed(efx);
 
 	efx_start_all(efx);
 	return 0;
@@ -1400,20 +1492,6 @@ static int efx_net_stop(struct net_device *net_dev)
 	return 0;
 }
 
-void efx_stats_disable(struct efx_nic *efx)
-{
-	spin_lock(&efx->stats_lock);
-	++efx->stats_disable_count;
-	spin_unlock(&efx->stats_lock);
-}
-
-void efx_stats_enable(struct efx_nic *efx)
-{
-	spin_lock(&efx->stats_lock);
-	--efx->stats_disable_count;
-	spin_unlock(&efx->stats_lock);
-}
-
 /* Context: process, dev_base_lock or RTNL held, non-blocking. */
 static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
 {
@@ -1421,17 +1499,9 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
 	struct efx_mac_stats *mac_stats = &efx->mac_stats;
 	struct net_device_stats *stats = &net_dev->stats;
 
-	/* Update stats if possible, but do not wait if another thread
-	 * is updating them or if MAC stats fetches are temporarily
-	 * disabled; slightly stale stats are acceptable.
-	 */
-	if (!spin_trylock(&efx->stats_lock))
-		return stats;
-	if (!efx->stats_disable_count) {
-		efx->mac_op->update_stats(efx);
-		falcon_update_nic_stats(efx);
-	}
-	spin_unlock(&efx->stats_lock);
+	spin_lock_bh(&efx->stats_lock);
+	efx->type->update_stats(efx);
+	spin_unlock_bh(&efx->stats_lock);
 
 	stats->rx_packets = mac_stats->rx_packets;
 	stats->tx_packets = mac_stats->tx_packets;
@@ -1490,7 +1560,14 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
 	EFX_LOG(efx, "changing MTU to %d\n", new_mtu);
 
 	efx_fini_channels(efx);
+
+	mutex_lock(&efx->mac_lock);
+	/* Reconfigure the MAC before enabling the dma queues so that
+	 * the RX buffers don't overflow */
 	net_dev->mtu = new_mtu;
+	efx->mac_op->reconfigure(efx);
+	mutex_unlock(&efx->mac_lock);
+
 	efx_init_channels(efx);
 
 	efx_start_all(efx);
@@ -1514,7 +1591,9 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
 	memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
 
 	/* Reconfigure the MAC */
-	efx_reconfigure_port(efx);
+	mutex_lock(&efx->mac_lock);
+	efx->mac_op->reconfigure(efx);
+	mutex_unlock(&efx->mac_lock);
 
 	return 0;
 }
@@ -1525,16 +1604,14 @@ static void efx_set_multicast_list(struct net_device *net_dev)
 	struct efx_nic *efx = netdev_priv(net_dev);
 	struct dev_mc_list *mc_list = net_dev->mc_list;
 	union efx_multicast_hash *mc_hash = &efx->multicast_hash;
-	bool promiscuous = !!(net_dev->flags & IFF_PROMISC);
-	bool changed = (efx->promiscuous != promiscuous);
 	u32 crc;
 	int bit;
 	int i;
 
-	efx->promiscuous = promiscuous;
+	efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
 
 	/* Build multicast hash table */
-	if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
+	if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
 		memset(mc_hash, 0xff, sizeof(*mc_hash));
 	} else {
 		memset(mc_hash, 0x00, sizeof(*mc_hash));
@@ -1544,17 +1621,17 @@ static void efx_set_multicast_list(struct net_device *net_dev)
 			set_bit_le(bit, mc_hash->byte);
 			mc_list = mc_list->next;
 		}
-	}
-
-	if (!efx->port_enabled)
-		/* Delay pushing settings until efx_start_port() */
-		return;
 
-	if (changed)
-		queue_work(efx->workqueue, &efx->phy_work);
+		/* Broadcast packets go through the multicast hash filter.
+		 * ether_crc_le() of the broadcast address is 0xbe2612ff
+		 * so we always add bit 0xff to the mask.
+		 */
+		set_bit_le(0xff, mc_hash->byte);
+	}
 
-	/* Create and activate new global multicast hash table */
-	falcon_set_multicast_hash(efx);
+	if (efx->port_enabled)
+		queue_work(efx->workqueue, &efx->mac_work);
+	/* Otherwise efx_start_port() will do this */
 }
 
 static const struct net_device_ops efx_netdev_ops = {
@@ -1683,21 +1760,18 @@ static void efx_unregister_netdev(struct efx_nic *efx)
 
 /* Tears down the entire software state and most of the hardware state
  * before reset.  */
-void efx_reset_down(struct efx_nic *efx, enum reset_type method,
-		    struct ethtool_cmd *ecmd)
+void efx_reset_down(struct efx_nic *efx, enum reset_type method)
 {
 	EFX_ASSERT_RESET_SERIALISED(efx);
 
-	efx_stats_disable(efx);
 	efx_stop_all(efx);
 	mutex_lock(&efx->mac_lock);
 	mutex_lock(&efx->spi_lock);
 
-	efx->phy_op->get_settings(efx, ecmd);
-
 	efx_fini_channels(efx);
 	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
 		efx->phy_op->fini(efx);
+	efx->type->fini(efx);
 }
 
 /* This function will always ensure that the locks acquired in
@@ -1705,79 +1779,67 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method,
  * that we were unable to reinitialise the hardware, and the
  * driver should be disabled. If ok is false, then the rx and tx
  * engines are not restarted, pending a RESET_DISABLE. */
-int efx_reset_up(struct efx_nic *efx, enum reset_type method,
-		 struct ethtool_cmd *ecmd, bool ok)
+int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
 {
 	int rc;
 
 	EFX_ASSERT_RESET_SERIALISED(efx);
 
-	rc = falcon_init_nic(efx);
+	rc = efx->type->init(efx);
 	if (rc) {
 		EFX_ERR(efx, "failed to initialise NIC\n");
-		ok = false;
+		goto fail;
 	}
 
+	if (!ok)
+		goto fail;
+
 	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
-		if (ok) {
-			rc = efx->phy_op->init(efx);
-			if (rc)
-				ok = false;
-		}
-		if (!ok)
-			efx->port_initialized = false;
+		rc = efx->phy_op->init(efx);
+		if (rc)
+			goto fail;
+		if (efx->phy_op->reconfigure(efx))
+			EFX_ERR(efx, "could not restore PHY settings\n");
 	}
 
-	if (ok) {
-		efx_init_channels(efx);
+	efx->mac_op->reconfigure(efx);
 
-		if (efx->phy_op->set_settings(efx, ecmd))
-			EFX_ERR(efx, "could not restore PHY settings\n");
-	}
+	efx_init_channels(efx);
+
+	mutex_unlock(&efx->spi_lock);
+	mutex_unlock(&efx->mac_lock);
+
+	efx_start_all(efx);
+
+	return 0;
+
+fail:
+	efx->port_initialized = false;
 
 	mutex_unlock(&efx->spi_lock);
 	mutex_unlock(&efx->mac_lock);
 
-	if (ok) {
-		efx_start_all(efx);
-		efx_stats_enable(efx);
-	}
 	return rc;
 }
 
-/* Reset the NIC as transparently as possible. Do not reset the PHY
- * Note that the reset may fail, in which case the card will be left
- * in a most-probably-unusable state.
+/* Reset the NIC using the specified method.  Note that the reset may
+ * fail, in which case the card will be left in an unusable state.
  *
- * This function will sleep.  You cannot reset from within an atomic
- * state; use efx_schedule_reset() instead.
- *
- * Grabs the rtnl_lock.
+ * Caller must hold the rtnl_lock.
  */
-static int efx_reset(struct efx_nic *efx)
+int efx_reset(struct efx_nic *efx, enum reset_type method)
 {
-	struct ethtool_cmd ecmd;
-	enum reset_type method = efx->reset_pending;
-	int rc = 0;
+	int rc, rc2;
+	bool disabled;
 
-	/* Serialise with kernel interfaces */
-	rtnl_lock();
+	EFX_INFO(efx, "resetting (%s)\n", RESET_TYPE(method));
 
-	/* If we're not RUNNING then don't reset. Leave the reset_pending
-	 * flag set so that efx_pci_probe_main will be retried */
-	if (efx->state != STATE_RUNNING) {
-		EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
-		goto out_unlock;
-	}
+	efx_reset_down(efx, method);
 
-	EFX_INFO(efx, "resetting (%d)\n", method);
-
-	efx_reset_down(efx, method, &ecmd);
-
-	rc = falcon_reset_hw(efx, method);
+	rc = efx->type->reset(efx, method);
 	if (rc) {
 		EFX_ERR(efx, "failed to reset hardware\n");
-		goto out_disable;
+		goto out;
 	}
 
 	/* Allow resets to be rescheduled. */
@@ -1789,25 +1851,22 @@ static int efx_reset(struct efx_nic *efx)
 	 * can respond to requests. */
 	pci_set_master(efx->pci_dev);
 
+out:
 	/* Leave device stopped if necessary */
-	if (method == RESET_TYPE_DISABLE) {
-		efx_reset_up(efx, method, &ecmd, false);
-		rc = -EIO;
-	} else {
-		rc = efx_reset_up(efx, method, &ecmd, true);
+	disabled = rc || method == RESET_TYPE_DISABLE;
+	rc2 = efx_reset_up(efx, method, !disabled);
+	if (rc2) {
+		disabled = true;
+		if (!rc)
+			rc = rc2;
 	}
 
-out_disable:
-	if (rc) {
+	if (disabled) {
 		EFX_ERR(efx, "has been disabled\n");
 		efx->state = STATE_DISABLED;
-		dev_close(efx->net_dev);
 	} else {
 		EFX_LOG(efx, "reset complete\n");
 	}
-
-out_unlock:
-	rtnl_unlock();
 	return rc;
 }
 
@@ -1816,9 +1875,19 @@ out_unlock:
  */
 static void efx_reset_work(struct work_struct *data)
 {
-	struct efx_nic *nic = container_of(data, struct efx_nic, reset_work);
+	struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
+
+	/* If we're not RUNNING then don't reset. Leave the reset_pending
+	 * flag set so that efx_pci_probe_main will be retried */
+	if (efx->state != STATE_RUNNING) {
+		EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
+		return;
+	}
 
-	efx_reset(nic);
+	rtnl_lock();
+	if (efx_reset(efx, efx->reset_pending))
+		dev_close(efx->net_dev);
+	rtnl_unlock();
 }
 
 void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
@@ -1843,18 +1912,24 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
 	case RESET_TYPE_TX_SKIP:
 		method = RESET_TYPE_INVISIBLE;
 		break;
+	case RESET_TYPE_MC_FAILURE:
 	default:
 		method = RESET_TYPE_ALL;
 		break;
 	}
 
 	if (method != type)
-		EFX_LOG(efx, "scheduling reset (%d:%d)\n", type, method);
+		EFX_LOG(efx, "scheduling %s reset for %s\n",
+			RESET_TYPE(method), RESET_TYPE(type));
 	else
-		EFX_LOG(efx, "scheduling reset (%d)\n", method);
+		EFX_LOG(efx, "scheduling %s reset\n", RESET_TYPE(method));
 
 	efx->reset_pending = method;
 
+	/* efx_process_channel() will no longer read events once a
+	 * reset is scheduled. So switch back to poll'd MCDI completions. */
+	efx_mcdi_mode_poll(efx);
+
 	queue_work(reset_workqueue, &efx->reset_work);
 }
 
@@ -1867,15 +1942,19 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
 /* PCI device ID table */
 static struct pci_device_id efx_pci_table[] __devinitdata = {
 	{PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
-	 .driver_data = (unsigned long) &falcon_a_nic_type},
+	 .driver_data = (unsigned long) &falcon_a1_nic_type},
 	{PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
-	 .driver_data = (unsigned long) &falcon_b_nic_type},
+	 .driver_data = (unsigned long) &falcon_b0_nic_type},
+	{PCI_DEVICE(EFX_VENDID_SFC, BETHPAGE_A_P_DEVID),
+	 .driver_data = (unsigned long) &siena_a0_nic_type},
+	{PCI_DEVICE(EFX_VENDID_SFC, SIENA_A_P_DEVID),
+	 .driver_data = (unsigned long) &siena_a0_nic_type},
 	{0}			/* end of list */
 };
 
 /**************************************************************************
  *
- * Dummy PHY/MAC/Board operations
+ * Dummy PHY/MAC operations
  *
  * Can be used for some unimplemented operations
  * Needed so all function pointers are valid and do not have to be tested
@@ -1887,29 +1966,19 @@ int efx_port_dummy_op_int(struct efx_nic *efx)
 	return 0;
 }
 void efx_port_dummy_op_void(struct efx_nic *efx) {}
-void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink) {}
-
-static struct efx_mac_operations efx_dummy_mac_operations = {
-	.reconfigure	= efx_port_dummy_op_void,
-	.poll		= efx_port_dummy_op_void,
-	.irq		= efx_port_dummy_op_void,
-};
+void efx_port_dummy_op_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+{
+}
+bool efx_port_dummy_op_poll(struct efx_nic *efx)
+{
+	return false;
+}
 
 static struct efx_phy_operations efx_dummy_phy_operations = {
 	.init		 = efx_port_dummy_op_int,
-	.reconfigure	 = efx_port_dummy_op_void,
-	.poll		 = efx_port_dummy_op_void,
+	.reconfigure	 = efx_port_dummy_op_int,
+	.poll		 = efx_port_dummy_op_poll,
 	.fini		 = efx_port_dummy_op_void,
-	.clear_interrupt = efx_port_dummy_op_void,
-};
-
-static struct efx_board efx_dummy_board_info = {
-	.init		= efx_port_dummy_op_int,
-	.init_leds	= efx_port_dummy_op_void,
-	.set_id_led	= efx_port_dummy_op_blink,
-	.monitor	= efx_port_dummy_op_int,
-	.blink		= efx_port_dummy_op_blink,
-	.fini		= efx_port_dummy_op_void,
 };
 
 /**************************************************************************
@@ -1932,26 +2001,26 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
 	/* Initialise common structures */
 	memset(efx, 0, sizeof(*efx));
 	spin_lock_init(&efx->biu_lock);
-	spin_lock_init(&efx->phy_lock);
+	mutex_init(&efx->mdio_lock);
 	mutex_init(&efx->spi_lock);
+#ifdef CONFIG_SFC_MTD
+	INIT_LIST_HEAD(&efx->mtd_list);
+#endif
 	INIT_WORK(&efx->reset_work, efx_reset_work);
 	INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
 	efx->pci_dev = pci_dev;
 	efx->state = STATE_INIT;
 	efx->reset_pending = RESET_TYPE_NONE;
 	strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
-	efx->board_info = efx_dummy_board_info;
 
 	efx->net_dev = net_dev;
 	efx->rx_checksum_enabled = true;
 	spin_lock_init(&efx->netif_stop_lock);
 	spin_lock_init(&efx->stats_lock);
-	efx->stats_disable_count = 1;
 	mutex_init(&efx->mac_lock);
-	efx->mac_op = &efx_dummy_mac_operations;
+	efx->mac_op = type->default_mac_ops;
 	efx->phy_op = &efx_dummy_phy_operations;
 	efx->mdio.dev = net_dev;
-	INIT_WORK(&efx->phy_work, efx_phy_work);
 	INIT_WORK(&efx->mac_work, efx_mac_work);
 	atomic_set(&efx->netif_stop_count, 1);
 
@@ -1981,17 +2050,9 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
 
 	efx->type = type;
 
-	/* Sanity-check NIC type */
-	EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask &
-			    (efx->type->txd_ring_mask + 1));
-	EFX_BUG_ON_PARANOID(efx->type->rxd_ring_mask &
-			    (efx->type->rxd_ring_mask + 1));
-	EFX_BUG_ON_PARANOID(efx->type->evq_size &
-			    (efx->type->evq_size - 1));
 	/* As close as we can get to guaranteeing that we don't overflow */
-	EFX_BUG_ON_PARANOID(efx->type->evq_size <
-			    (efx->type->txd_ring_mask + 1 +
-			     efx->type->rxd_ring_mask + 1));
+	BUILD_BUG_ON(EFX_EVQ_SIZE < EFX_TXQ_SIZE + EFX_RXQ_SIZE);
+
 	EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
 
 	/* Higher numbered interrupt modes are less capable! */
@@ -2027,19 +2088,10 @@ static void efx_fini_struct(struct efx_nic *efx)
  */
 static void efx_pci_remove_main(struct efx_nic *efx)
 {
-	EFX_ASSERT_RESET_SERIALISED(efx);
-
-	/* Skip everything if we never obtained a valid membase */
-	if (!efx->membase)
-		return;
-
+	efx_nic_fini_interrupt(efx);
 	efx_fini_channels(efx);
 	efx_fini_port(efx);
-
-	/* Shutdown the board, then the NIC and board state */
-	efx->board_info.fini(efx);
-	falcon_fini_interrupt(efx);
-
+	efx->type->fini(efx);
 	efx_fini_napi(efx);
 	efx_remove_all(efx);
 }
@@ -2063,9 +2115,6 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
 	/* Allow any queued efx_resets() to complete */
 	rtnl_unlock();
 
-	if (efx->membase == NULL)
-		goto out;
-
 	efx_unregister_netdev(efx);
 
 	efx_mtd_remove(efx);
@@ -2078,7 +2127,6 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
 
 	efx_pci_remove_main(efx);
 
-out:
 	efx_fini_io(efx);
 	EFX_LOG(efx, "shutdown successful\n");
 
@@ -2103,39 +2151,31 @@ static int efx_pci_probe_main(struct efx_nic *efx)
 	if (rc)
 		goto fail2;
 
-	/* Initialise the board */
-	rc = efx->board_info.init(efx);
-	if (rc) {
-		EFX_ERR(efx, "failed to initialise board\n");
-		goto fail3;
-	}
-
-	rc = falcon_init_nic(efx);
+	rc = efx->type->init(efx);
 	if (rc) {
 		EFX_ERR(efx, "failed to initialise NIC\n");
-		goto fail4;
+		goto fail3;
 	}
 
 	rc = efx_init_port(efx);
 	if (rc) {
 		EFX_ERR(efx, "failed to initialise port\n");
-		goto fail5;
+		goto fail4;
 	}
 
 	efx_init_channels(efx);
 
-	rc = falcon_init_interrupt(efx);
+	rc = efx_nic_init_interrupt(efx);
 	if (rc)
-		goto fail6;
+		goto fail5;
 
 	return 0;
 
- fail6:
+ fail5:
 	efx_fini_channels(efx);
 	efx_fini_port(efx);
- fail5:
  fail4:
-	efx->board_info.fini(efx);
+	efx->type->fini(efx);
  fail3:
 	efx_fini_napi(efx);
  fail2:
@@ -2165,9 +2205,11 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
 	net_dev = alloc_etherdev(sizeof(*efx));
 	if (!net_dev)
 		return -ENOMEM;
-	net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG |
+	net_dev->features |= (type->offload_features | NETIF_F_SG |
 			      NETIF_F_HIGHDMA | NETIF_F_TSO |
 			      NETIF_F_GRO);
+	if (type->offload_features & NETIF_F_V6_CSUM)
+		net_dev->features |= NETIF_F_TSO6;
 	/* Mask for features that also apply to VLAN devices */
 	net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
 				   NETIF_F_HIGHDMA | NETIF_F_TSO);
@@ -2219,18 +2261,19 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
 		goto fail4;
 	}
 
-	/* Switch to the running state before we expose the device to
-	 * the OS.  This is to ensure that the initial gathering of
-	 * MAC stats succeeds. */
+	/* Switch to the running state before we expose the device to the OS,
+	 * so that dev_open()|efx_start_all() will actually start the device */
 	efx->state = STATE_RUNNING;
 
-	efx_mtd_probe(efx); /* allowed to fail */
-
 	rc = efx_register_netdev(efx);
 	if (rc)
 		goto fail5;
 
 	EFX_LOG(efx, "initialisation successful\n");
+
+	rtnl_lock();
+	efx_mtd_probe(efx); /* allowed to fail */
+	rtnl_unlock();
 	return 0;
 
  fail5:
@@ -2246,11 +2289,107 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
 	return rc;
 }
 
+static int efx_pm_freeze(struct device *dev)
+{
+	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+
+	efx->state = STATE_FINI;
+
+	netif_device_detach(efx->net_dev);
+
+	efx_stop_all(efx);
+	efx_fini_channels(efx);
+
+	return 0;
+}
+
+static int efx_pm_thaw(struct device *dev)
+{
+	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+
+	efx->state = STATE_INIT;
+
+	efx_init_channels(efx);
+
+	mutex_lock(&efx->mac_lock);
+	efx->phy_op->reconfigure(efx);
+	mutex_unlock(&efx->mac_lock);
+
+	efx_start_all(efx);
+
+	netif_device_attach(efx->net_dev);
+
+	efx->state = STATE_RUNNING;
+
+	efx->type->resume_wol(efx);
+
+	return 0;
+}
+
+static int efx_pm_poweroff(struct device *dev)
+{
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	struct efx_nic *efx = pci_get_drvdata(pci_dev);
+
+	efx->type->fini(efx);
+
+	efx->reset_pending = RESET_TYPE_NONE;
+
+	pci_save_state(pci_dev);
+	return pci_set_power_state(pci_dev, PCI_D3hot);
+}
+
+/* Used for both resume and restore */
+static int efx_pm_resume(struct device *dev)
+{
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	struct efx_nic *efx = pci_get_drvdata(pci_dev);
+	int rc;
+
+	rc = pci_set_power_state(pci_dev, PCI_D0);
+	if (rc)
+		return rc;
+	pci_restore_state(pci_dev);
+	rc = pci_enable_device(pci_dev);
+	if (rc)
+		return rc;
+	pci_set_master(efx->pci_dev);
+	rc = efx->type->reset(efx, RESET_TYPE_ALL);
+	if (rc)
+		return rc;
+	rc = efx->type->init(efx);
+	if (rc)
+		return rc;
+	efx_pm_thaw(dev);
+	return 0;
+}
+
+static int efx_pm_suspend(struct device *dev)
+{
+	int rc;
+
+	efx_pm_freeze(dev);
+	rc = efx_pm_poweroff(dev);
+	if (rc)
+		efx_pm_resume(dev);
+	return rc;
+}
+
+static struct dev_pm_ops efx_pm_ops = {
+	.suspend	= efx_pm_suspend,
+	.resume		= efx_pm_resume,
+	.freeze		= efx_pm_freeze,
+	.thaw		= efx_pm_thaw,
+	.poweroff	= efx_pm_poweroff,
+	.restore	= efx_pm_resume,
+};
+
 static struct pci_driver efx_pci_driver = {
 	.name		= EFX_DRIVER_NAME,
 	.id_table	= efx_pci_table,
 	.probe		= efx_pci_probe,
 	.remove		= efx_pci_remove,
+	.driver.pm	= &efx_pm_ops,
 };
 
 /**************************************************************************
@@ -2314,8 +2453,8 @@ static void __exit efx_exit_module(void)
 module_init(efx_init_module);
 module_exit(efx_exit_module);
 
-MODULE_AUTHOR("Michael Brown <mbrown@fensystems.co.uk> and "
-	      "Solarflare Communications");
+MODULE_AUTHOR("Solarflare Communications and "
+	      "Michael Brown <mbrown@fensystems.co.uk>");
 MODULE_DESCRIPTION("Solarflare Communications network driver");
 MODULE_LICENSE("GPL");
 MODULE_DEVICE_TABLE(pci, efx_pci_table);
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index aecaf62f4929..a615ac051530 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2008 Solarflare Communications Inc.
+ * Copyright 2006-2009 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -18,35 +18,64 @@
 #define FALCON_A_P_DEVID	0x0703
 #define FALCON_A_S_DEVID        0x6703
 #define FALCON_B_P_DEVID        0x0710
+#define BETHPAGE_A_P_DEVID      0x0803
+#define SIENA_A_P_DEVID         0x0813
+
+/* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
+#define EFX_MEM_BAR 2
 
 /* TX */
-extern netdev_tx_t efx_xmit(struct efx_nic *efx,
-				  struct efx_tx_queue *tx_queue,
-				  struct sk_buff *skb);
+extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
+extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
+extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
+extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
+extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
+extern netdev_tx_t
+efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
+extern netdev_tx_t
+efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
 extern void efx_stop_queue(struct efx_nic *efx);
 extern void efx_wake_queue(struct efx_nic *efx);
+#define EFX_TXQ_SIZE 1024
+#define EFX_TXQ_MASK (EFX_TXQ_SIZE - 1)
 
 /* RX */
-extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
+extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
+extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
+extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
+extern void efx_rx_strategy(struct efx_channel *channel);
+extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
+extern void efx_rx_work(struct work_struct *data);
+extern void __efx_rx_packet(struct efx_channel *channel,
+			    struct efx_rx_buffer *rx_buf, bool checksummed);
 extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
 			  unsigned int len, bool checksummed, bool discard);
 extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay);
+#define EFX_RXQ_SIZE 1024
+#define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1)
 
 /* Channels */
 extern void efx_process_channel_now(struct efx_channel *channel);
-extern void efx_flush_queues(struct efx_nic *efx);
+#define EFX_EVQ_SIZE 4096
+#define EFX_EVQ_MASK (EFX_EVQ_SIZE - 1)
 
 /* Ports */
-extern void efx_stats_disable(struct efx_nic *efx);
-extern void efx_stats_enable(struct efx_nic *efx);
-extern void efx_reconfigure_port(struct efx_nic *efx);
-extern void __efx_reconfigure_port(struct efx_nic *efx);
+extern int efx_reconfigure_port(struct efx_nic *efx);
+extern int __efx_reconfigure_port(struct efx_nic *efx);
+
+/* Ethtool support */
+extern int efx_ethtool_get_settings(struct net_device *net_dev,
+				    struct ethtool_cmd *ecmd);
+extern int efx_ethtool_set_settings(struct net_device *net_dev,
+				    struct ethtool_cmd *ecmd);
+extern const struct ethtool_ops efx_ethtool_ops;
 
 /* Reset handling */
-extern void efx_reset_down(struct efx_nic *efx, enum reset_type method,
-			   struct ethtool_cmd *ecmd);
-extern int efx_reset_up(struct efx_nic *efx, enum reset_type method,
-			struct ethtool_cmd *ecmd, bool ok);
+extern int efx_reset(struct efx_nic *efx, enum reset_type method);
+extern void efx_reset_down(struct efx_nic *efx, enum reset_type method);
+extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
 
 /* Global */
 extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
@@ -60,7 +89,9 @@ extern void efx_hex_dump(const u8 *, unsigned int, const char *);
 /* Dummy PHY ops for PHY drivers */
 extern int efx_port_dummy_op_int(struct efx_nic *efx);
 extern void efx_port_dummy_op_void(struct efx_nic *efx);
-extern void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink);
+extern void
+efx_port_dummy_op_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
+extern bool efx_port_dummy_op_poll(struct efx_nic *efx);
 
 /* MTD */
 #ifdef CONFIG_SFC_MTD
@@ -84,4 +115,8 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
 	napi_schedule(&channel->napi_str);
 }
 
+extern void efx_link_status_changed(struct efx_nic *efx);
+extern void efx_link_set_advertising(struct efx_nic *efx, u32);
+extern void efx_link_set_wanted_fc(struct efx_nic *efx, enum efx_fc_type);
+
 #endif /* EFX_EFX_H */
diff --git a/drivers/net/sfc/enum.h b/drivers/net/sfc/enum.h
index 60cbc6e1e66b..384cfe3b1be1 100644
--- a/drivers/net/sfc/enum.h
+++ b/drivers/net/sfc/enum.h
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2008 Solarflare Communications Inc.
+ * Copyright 2007-2009 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -13,44 +13,101 @@
 /**
  * enum efx_loopback_mode - loopback modes
  * @LOOPBACK_NONE: no loopback
- * @LOOPBACK_GMAC: loopback within GMAC at unspecified level
- * @LOOPBACK_XGMII: loopback within XMAC at XGMII level
- * @LOOPBACK_XGXS: loopback within XMAC at XGXS level
- * @LOOPBACK_XAUI: loopback within XMAC at XAUI level
+ * @LOOPBACK_DATA: data path loopback
+ * @LOOPBACK_GMAC: loopback within GMAC
+ * @LOOPBACK_XGMII: loopback after XMAC
+ * @LOOPBACK_XGXS: loopback within BPX after XGXS
+ * @LOOPBACK_XAUI: loopback within BPX before XAUI serdes
+ * @LOOPBACK_GMII: loopback within BPX after GMAC
+ * @LOOPBACK_SGMII: loopback within BPX within SGMII
+ * @LOOPBACK_XGBR: loopback within BPX within XGBR
+ * @LOOPBACK_XFI: loopback within BPX before XFI serdes
+ * @LOOPBACK_XAUI_FAR: loopback within BPX after XAUI serdes
+ * @LOOPBACK_GMII_FAR: loopback within BPX before SGMII
+ * @LOOPBACK_SGMII_FAR: loopback within BPX after SGMII
+ * @LOOPBACK_XFI_FAR: loopback after XFI serdes
  * @LOOPBACK_GPHY: loopback within 1G PHY at unspecified level
  * @LOOPBACK_PHYXS: loopback within 10G PHY at PHYXS level
  * @LOOPBACK_PCS: loopback within 10G PHY at PCS level
  * @LOOPBACK_PMAPMD: loopback within 10G PHY at PMAPMD level
- * @LOOPBACK_NETWORK: reflecting loopback (even further than furthest!)
+ * @LOOPBACK_XPORT: cross port loopback
+ * @LOOPBACK_XGMII_WS: wireside loopback excluding XMAC
+ * @LOOPBACK_XAUI_WS: wireside loopback within BPX within XAUI serdes
+ * @LOOPBACK_XAUI_WS_FAR: wireside loopback within BPX including XAUI serdes
+ * @LOOPBACK_XAUI_WS_NEAR: wireside loopback within BPX excluding XAUI serdes
+ * @LOOPBACK_GMII_WS: wireside loopback excluding GMAC
+ * @LOOPBACK_XFI_WS: wireside loopback excluding XFI serdes
+ * @LOOPBACK_XFI_WS_FAR: wireside loopback including XFI serdes
+ * @LOOPBACK_PHYXS_WS: wireside loopback within 10G PHY at PHYXS level
  */
-/* Please keep in order and up-to-date w.r.t the following two #defines */
+/* Please keep up-to-date w.r.t the following two #defines */
 enum efx_loopback_mode {
 	LOOPBACK_NONE = 0,
-	LOOPBACK_GMAC = 1,
-	LOOPBACK_XGMII = 2,
-	LOOPBACK_XGXS = 3,
-	LOOPBACK_XAUI = 4,
-	LOOPBACK_GPHY = 5,
-	LOOPBACK_PHYXS = 6,
-	LOOPBACK_PCS = 7,
-	LOOPBACK_PMAPMD = 8,
-	LOOPBACK_NETWORK = 9,
+	LOOPBACK_DATA = 1,
+	LOOPBACK_GMAC = 2,
+	LOOPBACK_XGMII = 3,
+	LOOPBACK_XGXS = 4,
+	LOOPBACK_XAUI = 5,
+	LOOPBACK_GMII = 6,
+	LOOPBACK_SGMII = 7,
+	LOOPBACK_XGBR = 8,
+	LOOPBACK_XFI = 9,
+	LOOPBACK_XAUI_FAR = 10,
+	LOOPBACK_GMII_FAR = 11,
+	LOOPBACK_SGMII_FAR = 12,
+	LOOPBACK_XFI_FAR = 13,
+	LOOPBACK_GPHY = 14,
+	LOOPBACK_PHYXS = 15,
+	LOOPBACK_PCS = 16,
+	LOOPBACK_PMAPMD = 17,
+	LOOPBACK_XPORT = 18,
+	LOOPBACK_XGMII_WS = 19,
+	LOOPBACK_XAUI_WS = 20,
+	LOOPBACK_XAUI_WS_FAR = 21,
+	LOOPBACK_XAUI_WS_NEAR = 22,
+	LOOPBACK_GMII_WS = 23,
+	LOOPBACK_XFI_WS = 24,
+	LOOPBACK_XFI_WS_FAR = 25,
+	LOOPBACK_PHYXS_WS = 26,
 	LOOPBACK_MAX
 };
-
 #define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD
 
-extern const char *efx_loopback_mode_names[];
-#define LOOPBACK_MODE_NAME(mode)			\
-	STRING_TABLE_LOOKUP(mode, efx_loopback_mode)
-#define LOOPBACK_MODE(efx)				\
-	LOOPBACK_MODE_NAME(efx->loopback_mode)
-
 /* These loopbacks occur within the controller */
-#define LOOPBACKS_INTERNAL ((1 << LOOPBACK_GMAC) |     \
-			    (1 << LOOPBACK_XGMII)|     \
-			    (1 << LOOPBACK_XGXS) |     \
-			    (1 << LOOPBACK_XAUI))
+#define LOOPBACKS_INTERNAL ((1 << LOOPBACK_DATA) |		\
+			    (1 << LOOPBACK_GMAC) |		\
+			    (1 << LOOPBACK_XGMII)|		\
+			    (1 << LOOPBACK_XGXS) |		\
+			    (1 << LOOPBACK_XAUI) |		\
+			    (1 << LOOPBACK_GMII) |		\
+			    (1 << LOOPBACK_SGMII) |		\
+			    (1 << LOOPBACK_SGMII) |		\
+			    (1 << LOOPBACK_XGBR) |		\
+			    (1 << LOOPBACK_XFI) |		\
+			    (1 << LOOPBACK_XAUI_FAR) |		\
+			    (1 << LOOPBACK_GMII_FAR) |		\
+			    (1 << LOOPBACK_SGMII_FAR) |		\
+			    (1 << LOOPBACK_XFI_FAR) |		\
+			    (1 << LOOPBACK_XGMII_WS) |		\
+			    (1 << LOOPBACK_XAUI_WS) |		\
+			    (1 << LOOPBACK_XAUI_WS_FAR) |	\
+			    (1 << LOOPBACK_XAUI_WS_NEAR) |	\
+			    (1 << LOOPBACK_GMII_WS) |		\
+			    (1 << LOOPBACK_XFI_WS) |		\
+			    (1 << LOOPBACK_XFI_WS_FAR))
+
+#define LOOPBACKS_WS ((1 << LOOPBACK_XGMII_WS) |		\
+		      (1 << LOOPBACK_XAUI_WS) |			\
+		      (1 << LOOPBACK_XAUI_WS_FAR) |		\
+		      (1 << LOOPBACK_XAUI_WS_NEAR) |		\
+		      (1 << LOOPBACK_GMII_WS) |			\
+		      (1 << LOOPBACK_XFI_WS) |			\
+		      (1 << LOOPBACK_XFI_WS_FAR) |		\
+		      (1 << LOOPBACK_PHYXS_WS))
+
+#define LOOPBACKS_EXTERNAL(_efx)					\
+	((_efx)->loopback_modes & ~LOOPBACKS_INTERNAL &			\
+	 ~(1 << LOOPBACK_NONE))
 
 #define LOOPBACK_MASK(_efx)			\
 	(1 << (_efx)->loopback_mode)
@@ -58,6 +115,9 @@ extern const char *efx_loopback_mode_names[];
 #define LOOPBACK_INTERNAL(_efx)				\
 	(!!(LOOPBACKS_INTERNAL & LOOPBACK_MASK(_efx)))
 
+#define LOOPBACK_EXTERNAL(_efx)				\
+	(!!(LOOPBACK_MASK(_efx) & LOOPBACKS_EXTERNAL(_efx)))
+
 #define LOOPBACK_CHANGED(_from, _to, _mask)				\
 	(!!((LOOPBACK_MASK(_from) ^ LOOPBACK_MASK(_to)) & (_mask)))
 
@@ -84,6 +144,7 @@ extern const char *efx_loopback_mode_names[];
  * @RESET_TYPE_RX_DESC_FETCH: pcie error during rx descriptor fetch
  * @RESET_TYPE_TX_DESC_FETCH: pcie error during tx descriptor fetch
  * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
+ * @RESET_TYPE_MC_FAILURE: MC reboot/assertion
  */
 enum reset_type {
 	RESET_TYPE_NONE = -1,
@@ -98,6 +159,7 @@ enum reset_type {
 	RESET_TYPE_RX_DESC_FETCH,
 	RESET_TYPE_TX_DESC_FETCH,
 	RESET_TYPE_TX_SKIP,
+	RESET_TYPE_MC_FAILURE,
 	RESET_TYPE_MAX,
 };
 
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 45018f283ffa..6c0bbed8c477 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2008 Solarflare Communications Inc.
+ * Copyright 2006-2009 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -10,30 +10,15 @@
 
 #include <linux/netdevice.h>
 #include <linux/ethtool.h>
-#include <linux/mdio.h>
 #include <linux/rtnetlink.h>
 #include "net_driver.h"
 #include "workarounds.h"
 #include "selftest.h"
 #include "efx.h"
-#include "ethtool.h"
-#include "falcon.h"
+#include "nic.h"
 #include "spi.h"
 #include "mdio_10g.h"
 
-const char *efx_loopback_mode_names[] = {
-	[LOOPBACK_NONE]		= "NONE",
-	[LOOPBACK_GMAC]		= "GMAC",
-	[LOOPBACK_XGMII]	= "XGMII",
-	[LOOPBACK_XGXS]		= "XGXS",
-	[LOOPBACK_XAUI] 	= "XAUI",
-	[LOOPBACK_GPHY]		= "GPHY",
-	[LOOPBACK_PHYXS]	= "PHYXS",
-	[LOOPBACK_PCS]	 	= "PCS",
-	[LOOPBACK_PMAPMD]	= "PMA/PMD",
-	[LOOPBACK_NETWORK]	= "NETWORK",
-};
-
 struct ethtool_string {
 	char name[ETH_GSTRING_LEN];
 };
@@ -167,6 +152,7 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
+	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
 };
 
@@ -187,13 +173,15 @@ static int efx_ethtool_phys_id(struct net_device *net_dev, u32 count)
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
 
-	efx->board_info.blink(efx, 1);
-	set_current_state(TASK_INTERRUPTIBLE);
-	if (count)
-		schedule_timeout(count * HZ);
-	else
-		schedule();
-	efx->board_info.blink(efx, 0);
+	do {
+		efx->type->set_id_led(efx, EFX_LED_ON);
+		schedule_timeout_interruptible(HZ / 2);
+
+		efx->type->set_id_led(efx, EFX_LED_OFF);
+		schedule_timeout_interruptible(HZ / 2);
+	} while (!signal_pending(current) && --count != 0);
+
+	efx->type->set_id_led(efx, EFX_LED_DEFAULT);
 	return 0;
 }
 
@@ -202,6 +190,7 @@ int efx_ethtool_get_settings(struct net_device *net_dev,
 			     struct ethtool_cmd *ecmd)
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
+	struct efx_link_state *link_state = &efx->link_state;
 
 	mutex_lock(&efx->mac_lock);
 	efx->phy_op->get_settings(efx, ecmd);
@@ -209,6 +198,13 @@ int efx_ethtool_get_settings(struct net_device *net_dev,
 
 	/* Falcon GMAC does not support 1000Mbps HD */
 	ecmd->supported &= ~SUPPORTED_1000baseT_Half;
+	/* Both MACs support pause frames (bidirectional and respond-only) */
+	ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+
+	if (LOOPBACK_INTERNAL(efx)) {
+		ecmd->speed = link_state->speed;
+		ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
+	}
 
 	return 0;
 }
@@ -230,9 +226,6 @@ int efx_ethtool_set_settings(struct net_device *net_dev,
 	mutex_lock(&efx->mac_lock);
 	rc = efx->phy_op->set_settings(efx, ecmd);
 	mutex_unlock(&efx->mac_lock);
-	if (!rc)
-		efx_reconfigure_port(efx);
-
 	return rc;
 }
 
@@ -243,6 +236,9 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
 
 	strlcpy(info->driver, EFX_DRIVER_NAME, sizeof(info->driver));
 	strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
+	if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
+		siena_print_fwver(efx, info->fw_version,
+				  sizeof(info->fw_version));
 	strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
 }
 
@@ -289,7 +285,7 @@ static void efx_fill_test(unsigned int test_index,
 #define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
 #define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
 #define EFX_LOOPBACK_NAME(_mode, _counter)			\
-	"loopback.%s." _counter, LOOPBACK_MODE_NAME(mode)
+	"loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode)
 
 /**
  * efx_fill_loopback_test - fill in a block of loopback self-test entries
@@ -372,9 +368,21 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
 	efx_fill_test(n++, strings, data, &tests->registers,
 		      "core", 0, "registers", NULL);
 
-	for (i = 0; i < efx->phy_op->num_tests; i++)
-		efx_fill_test(n++, strings, data, &tests->phy[i],
-			      "phy", 0, efx->phy_op->test_names[i], NULL);
+	if (efx->phy_op->run_tests != NULL) {
+		EFX_BUG_ON_PARANOID(efx->phy_op->test_name == NULL);
+
+		for (i = 0; true; ++i) {
+			const char *name;
+
+			EFX_BUG_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
+			name = efx->phy_op->test_name(efx, i);
+			if (name == NULL)
+				break;
+
+			efx_fill_test(n++, strings, data, &tests->phy[i],
+				      "phy", 0, name, NULL);
+		}
+	}
 
 	/* Loopback tests */
 	for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
@@ -463,6 +471,36 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
 	}
 }
 
+static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
+{
+	struct efx_nic *efx __attribute__ ((unused)) = netdev_priv(net_dev);
+	unsigned long features;
+
+	features = NETIF_F_TSO;
+	if (efx->type->offload_features & NETIF_F_V6_CSUM)
+		features |= NETIF_F_TSO6;
+
+	if (enable)
+		net_dev->features |= features;
+	else
+		net_dev->features &= ~features;
+
+	return 0;
+}
+
+static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	unsigned long features = efx->type->offload_features & NETIF_F_ALL_CSUM;
+
+	if (enable)
+		net_dev->features |= features;
+	else
+		net_dev->features &= ~features;
+
+	return 0;
+}
+
 static int efx_ethtool_set_rx_csum(struct net_device *net_dev, u32 enable)
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
@@ -537,7 +575,7 @@ static u32 efx_ethtool_get_link(struct net_device *net_dev)
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
 
-	return efx->link_up;
+	return efx->link_state.up;
 }
 
 static int efx_ethtool_get_eeprom_len(struct net_device *net_dev)
@@ -562,7 +600,8 @@ static int efx_ethtool_get_eeprom(struct net_device *net_dev,
 	rc = mutex_lock_interruptible(&efx->spi_lock);
 	if (rc)
 		return rc;
-	rc = falcon_spi_read(spi, eeprom->offset + EFX_EEPROM_BOOTCONFIG_START,
+	rc = falcon_spi_read(efx, spi,
+			     eeprom->offset + EFX_EEPROM_BOOTCONFIG_START,
 			     eeprom->len, &len, buf);
 	mutex_unlock(&efx->spi_lock);
 
@@ -585,7 +624,8 @@ static int efx_ethtool_set_eeprom(struct net_device *net_dev,
 	rc = mutex_lock_interruptible(&efx->spi_lock);
 	if (rc)
 		return rc;
-	rc = falcon_spi_write(spi, eeprom->offset + EFX_EEPROM_BOOTCONFIG_START,
+	rc = falcon_spi_write(efx, spi,
+			      eeprom->offset + EFX_EEPROM_BOOTCONFIG_START,
 			      eeprom->len, &len, buf);
 	mutex_unlock(&efx->spi_lock);
 
@@ -618,6 +658,9 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
 	coalesce->use_adaptive_rx_coalesce = efx->irq_rx_adaptive;
 	coalesce->rx_coalesce_usecs_irq = efx->irq_rx_moderation;
 
+	coalesce->tx_coalesce_usecs_irq *= EFX_IRQ_MOD_RESOLUTION;
+	coalesce->rx_coalesce_usecs_irq *= EFX_IRQ_MOD_RESOLUTION;
+
 	return 0;
 }
 
@@ -656,13 +699,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
 	}
 
 	efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive);
-
-	/* Reset channel to pick up new moderation value.  Note that
-	 * this may change the value of the irq_moderation field
-	 * (e.g. to allow for hardware timer granularity).
-	 */
 	efx_for_each_channel(channel, efx)
-		falcon_set_int_moderation(channel);
+		efx->type->push_irq_moderation(channel);
 
 	return 0;
 }
@@ -671,8 +709,12 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
 				      struct ethtool_pauseparam *pause)
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
-	enum efx_fc_type wanted_fc;
+	enum efx_fc_type wanted_fc, old_fc;
+	u32 old_adv;
 	bool reset;
+	int rc = 0;
+
+	mutex_lock(&efx->mac_lock);
 
 	wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) |
 		     (pause->tx_pause ? EFX_FC_TX : 0) |
@@ -680,14 +722,14 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
 
 	if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) {
 		EFX_LOG(efx, "Flow control unsupported: tx ON rx OFF\n");
-		return -EINVAL;
+		rc = -EINVAL;
+		goto out;
 	}
 
-	if (!(efx->phy_op->mmds & MDIO_DEVS_AN) &&
-	    (wanted_fc & EFX_FC_AUTO)) {
-		EFX_LOG(efx, "PHY does not support flow control "
-			"autonegotiation\n");
-		return -EINVAL;
+	if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) {
+		EFX_LOG(efx, "Autonegotiation is disabled\n");
+		rc = -EINVAL;
+		goto out;
 	}
 
 	/* TX flow control may automatically turn itself off if the
@@ -697,27 +739,40 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
 	 * and fix it be cycling transmit flow control on this end. */
 	reset = (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX);
 	if (EFX_WORKAROUND_11482(efx) && reset) {
-		if (falcon_rev(efx) >= FALCON_REV_B0) {
+		if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) {
 			/* Recover by resetting the EM block */
-			if (efx->link_up)
-				falcon_drain_tx_fifo(efx);
+			falcon_stop_nic_stats(efx);
+			falcon_drain_tx_fifo(efx);
+			efx->mac_op->reconfigure(efx);
+			falcon_start_nic_stats(efx);
 		} else {
 			/* Schedule a reset to recover */
 			efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
 		}
 	}
 
-	/* Try to push the pause parameters */
-	mutex_lock(&efx->mac_lock);
+	old_adv = efx->link_advertising;
+	old_fc = efx->wanted_fc;
+	efx_link_set_wanted_fc(efx, wanted_fc);
+	if (efx->link_advertising != old_adv ||
+	    (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
+		rc = efx->phy_op->reconfigure(efx);
+		if (rc) {
+			EFX_ERR(efx, "Unable to advertise requested flow "
+				"control setting\n");
+			goto out;
+		}
+	}
 
-	efx->wanted_fc = wanted_fc;
-	if (efx->phy_op->mmds & MDIO_DEVS_AN)
-		mdio45_ethtool_spauseparam_an(&efx->mdio, pause);
-	__efx_reconfigure_port(efx);
+	/* Reconfigure the MAC. The PHY *may* generate a link state change event
+	 * if the user just changed the advertised capabilities, but there's no
+	 * harm doing this twice */
+	efx->mac_op->reconfigure(efx);
 
+out:
 	mutex_unlock(&efx->mac_lock);
 
-	return 0;
+	return rc;
 }
 
 static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
@@ -731,6 +786,50 @@ static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
 }
 
 
+static void efx_ethtool_get_wol(struct net_device *net_dev,
+				struct ethtool_wolinfo *wol)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	return efx->type->get_wol(efx, wol);
+}
+
+
+static int efx_ethtool_set_wol(struct net_device *net_dev,
+			       struct ethtool_wolinfo *wol)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	return efx->type->set_wol(efx, wol->wolopts);
+}
+
+extern int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	enum reset_type method;
+	enum {
+		ETH_RESET_EFX_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER |
+					   ETH_RESET_OFFLOAD | ETH_RESET_MAC)
+	};
+
+	/* Check for minimal reset flags */
+	if ((*flags & ETH_RESET_EFX_INVISIBLE) != ETH_RESET_EFX_INVISIBLE)
+		return -EINVAL;
+	*flags ^= ETH_RESET_EFX_INVISIBLE;
+	method = RESET_TYPE_INVISIBLE;
+
+	if (*flags & ETH_RESET_PHY) {
+		*flags ^= ETH_RESET_PHY;
+		method = RESET_TYPE_ALL;
+	}
+
+	if ((*flags & efx->type->reset_world_flags) ==
+	    efx->type->reset_world_flags) {
+		*flags ^= efx->type->reset_world_flags;
+		method = RESET_TYPE_WORLD;
+	}
+
+	return efx_reset(efx, method);
+}
+
 const struct ethtool_ops efx_ethtool_ops = {
 	.get_settings		= efx_ethtool_get_settings,
 	.set_settings		= efx_ethtool_set_settings,
@@ -747,11 +846,13 @@ const struct ethtool_ops efx_ethtool_ops = {
 	.get_rx_csum		= efx_ethtool_get_rx_csum,
 	.set_rx_csum		= efx_ethtool_set_rx_csum,
 	.get_tx_csum		= ethtool_op_get_tx_csum,
-	.set_tx_csum		= ethtool_op_set_tx_csum,
+	/* Need to enable/disable IPv6 too */
+	.set_tx_csum		= efx_ethtool_set_tx_csum,
 	.get_sg			= ethtool_op_get_sg,
 	.set_sg			= ethtool_op_set_sg,
 	.get_tso		= ethtool_op_get_tso,
-	.set_tso		= ethtool_op_set_tso,
+	/* Need to enable/disable TSO-IPv6 too */
+	.set_tso		= efx_ethtool_set_tso,
 	.get_flags		= ethtool_op_get_flags,
 	.set_flags		= ethtool_op_set_flags,
 	.get_sset_count		= efx_ethtool_get_sset_count,
@@ -759,4 +860,7 @@ const struct ethtool_ops efx_ethtool_ops = {
 	.get_strings		= efx_ethtool_get_strings,
 	.phys_id		= efx_ethtool_phys_id,
 	.get_ethtool_stats	= efx_ethtool_get_stats,
+	.get_wol                = efx_ethtool_get_wol,
+	.set_wol                = efx_ethtool_set_wol,
+	.reset			= efx_ethtool_reset,
 };
diff --git a/drivers/net/sfc/ethtool.h b/drivers/net/sfc/ethtool.h
deleted file mode 100644
index 295ead403356..000000000000
--- a/drivers/net/sfc/ethtool.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2005 Fen Systems Ltd.
- * Copyright 2006 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#ifndef EFX_ETHTOOL_H
-#define EFX_ETHTOOL_H
-
-#include "net_driver.h"
-
-/*
- * Ethtool support
- */
-
-extern int efx_ethtool_get_settings(struct net_device *net_dev,
-				    struct ethtool_cmd *ecmd);
-extern int efx_ethtool_set_settings(struct net_device *net_dev,
-				    struct ethtool_cmd *ecmd);
-
-extern const struct ethtool_ops efx_ethtool_ops;
-
-#endif /* EFX_ETHTOOL_H */
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index c049364aec46..17afcd26e870 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2008 Solarflare Communications Inc.
+ * Copyright 2006-2009 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -14,66 +14,20 @@
 #include <linux/module.h>
 #include <linux/seq_file.h>
 #include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
 #include <linux/mii.h>
 #include "net_driver.h"
 #include "bitfield.h"
 #include "efx.h"
 #include "mac.h"
 #include "spi.h"
-#include "falcon.h"
-#include "falcon_hwdefs.h"
-#include "falcon_io.h"
+#include "nic.h"
+#include "regs.h"
+#include "io.h"
 #include "mdio_10g.h"
 #include "phy.h"
-#include "boards.h"
 #include "workarounds.h"
 
-/* Falcon hardware control.
- * Falcon is the internal codename for the SFC4000 controller that is
- * present in SFE400X evaluation boards
- */
-
-/**
- * struct falcon_nic_data - Falcon NIC state
- * @next_buffer_table: First available buffer table id
- * @pci_dev2: The secondary PCI device if present
- * @i2c_data: Operations and state for I2C bit-bashing algorithm
- * @int_error_count: Number of internal errors seen recently
- * @int_error_expire: Time at which error count will be expired
- */
-struct falcon_nic_data {
-	unsigned next_buffer_table;
-	struct pci_dev *pci_dev2;
-	struct i2c_algo_bit_data i2c_data;
-
-	unsigned int_error_count;
-	unsigned long int_error_expire;
-};
-
-/**************************************************************************
- *
- * Configurable values
- *
- **************************************************************************
- */
-
-static int disable_dma_stats;
-
-/* This is set to 16 for a good reason.  In summary, if larger than
- * 16, the descriptor cache holds more than a default socket
- * buffer's worth of packets (for UDP we can only have at most one
- * socket buffer's worth outstanding).  This combined with the fact
- * that we only get 1 TX event per descriptor cache means the NIC
- * goes idle.
- */
-#define TX_DC_ENTRIES 16
-#define TX_DC_ENTRIES_ORDER 0
-#define TX_DC_BASE 0x130000
-
-#define RX_DC_ENTRIES 64
-#define RX_DC_ENTRIES_ORDER 2
-#define RX_DC_BASE 0x100000
+/* Hardware control for SFC4000 (aka Falcon). */
 
 static const unsigned int
 /* "Large" EEPROM device: Atmel AT25640 or similar
@@ -89,104 +43,6 @@ default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
 		      | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
 		      | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN));
 
-/* RX FIFO XOFF watermark
- *
- * When the amount of the RX FIFO increases used increases past this
- * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
- * This also has an effect on RX/TX arbitration
- */
-static int rx_xoff_thresh_bytes = -1;
-module_param(rx_xoff_thresh_bytes, int, 0644);
-MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
-
-/* RX FIFO XON watermark
- *
- * When the amount of the RX FIFO used decreases below this
- * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
- * This also has an effect on RX/TX arbitration
- */
-static int rx_xon_thresh_bytes = -1;
-module_param(rx_xon_thresh_bytes, int, 0644);
-MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
-
-/* TX descriptor ring size - min 512 max 4k */
-#define FALCON_TXD_RING_ORDER TX_DESCQ_SIZE_1K
-#define FALCON_TXD_RING_SIZE 1024
-#define FALCON_TXD_RING_MASK (FALCON_TXD_RING_SIZE - 1)
-
-/* RX descriptor ring size - min 512 max 4k */
-#define FALCON_RXD_RING_ORDER RX_DESCQ_SIZE_1K
-#define FALCON_RXD_RING_SIZE 1024
-#define FALCON_RXD_RING_MASK (FALCON_RXD_RING_SIZE - 1)
-
-/* Event queue size - max 32k */
-#define FALCON_EVQ_ORDER EVQ_SIZE_4K
-#define FALCON_EVQ_SIZE 4096
-#define FALCON_EVQ_MASK (FALCON_EVQ_SIZE - 1)
-
-/* If FALCON_MAX_INT_ERRORS internal errors occur within
- * FALCON_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
- * disable it.
- */
-#define FALCON_INT_ERROR_EXPIRE 3600
-#define FALCON_MAX_INT_ERRORS 5
-
-/* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
- */
-#define FALCON_FLUSH_INTERVAL 10
-#define FALCON_FLUSH_POLL_COUNT 100
-
-/**************************************************************************
- *
- * Falcon constants
- *
- **************************************************************************
- */
-
-/* DMA address mask */
-#define FALCON_DMA_MASK DMA_BIT_MASK(46)
-
-/* TX DMA length mask (13-bit) */
-#define FALCON_TX_DMA_MASK (4096 - 1)
-
-/* Size and alignment of special buffers (4KB) */
-#define FALCON_BUF_SIZE 4096
-
-/* Dummy SRAM size code */
-#define SRM_NB_BSZ_ONCHIP_ONLY (-1)
-
-#define FALCON_IS_DUAL_FUNC(efx)		\
-	(falcon_rev(efx) < FALCON_REV_B0)
-
-/**************************************************************************
- *
- * Falcon hardware access
- *
- **************************************************************************/
-
-/* Read the current event from the event queue */
-static inline efx_qword_t *falcon_event(struct efx_channel *channel,
-					unsigned int index)
-{
-	return (((efx_qword_t *) (channel->eventq.addr)) + index);
-}
-
-/* See if an event is present
- *
- * We check both the high and low dword of the event for all ones.  We
- * wrote all ones when we cleared the event, and no valid event can
- * have all ones in either its high or low dwords.  This approach is
- * robust against reordering.
- *
- * Note that using a single 64-bit comparison is incorrect; even
- * though the CPU read will be atomic, the DMA write may not be.
- */
-static inline int falcon_event_present(efx_qword_t *event)
-{
-	return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
-		  EFX_DWORD_IS_ALL_ONES(event->dword[1])));
-}
-
 /**************************************************************************
  *
  * I2C bus - this is a bit-bashing interface using GPIO pins
@@ -200,9 +56,9 @@ static void falcon_setsda(void *data, int state)
 	struct efx_nic *efx = (struct efx_nic *)data;
 	efx_oword_t reg;
 
-	falcon_read(efx, &reg, GPIO_CTL_REG_KER);
-	EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, !state);
-	falcon_write(efx, &reg, GPIO_CTL_REG_KER);
+	efx_reado(efx, &reg, FR_AB_GPIO_CTL);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
+	efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
 }
 
 static void falcon_setscl(void *data, int state)
@@ -210,9 +66,9 @@ static void falcon_setscl(void *data, int state)
 	struct efx_nic *efx = (struct efx_nic *)data;
 	efx_oword_t reg;
 
-	falcon_read(efx, &reg, GPIO_CTL_REG_KER);
-	EFX_SET_OWORD_FIELD(reg, GPIO0_OEN, !state);
-	falcon_write(efx, &reg, GPIO_CTL_REG_KER);
+	efx_reado(efx, &reg, FR_AB_GPIO_CTL);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
+	efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
 }
 
 static int falcon_getsda(void *data)
@@ -220,8 +76,8 @@ static int falcon_getsda(void *data)
 	struct efx_nic *efx = (struct efx_nic *)data;
 	efx_oword_t reg;
 
-	falcon_read(efx, &reg, GPIO_CTL_REG_KER);
-	return EFX_OWORD_FIELD(reg, GPIO3_IN);
+	efx_reado(efx, &reg, FR_AB_GPIO_CTL);
+	return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
 }
 
 static int falcon_getscl(void *data)
@@ -229,8 +85,8 @@ static int falcon_getscl(void *data)
 	struct efx_nic *efx = (struct efx_nic *)data;
 	efx_oword_t reg;
 
-	falcon_read(efx, &reg, GPIO_CTL_REG_KER);
-	return EFX_OWORD_FIELD(reg, GPIO0_IN);
+	efx_reado(efx, &reg, FR_AB_GPIO_CTL);
+	return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
 }
 
 static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
@@ -243,1115 +99,39 @@ static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
 	.timeout	= DIV_ROUND_UP(HZ, 20),
 };
 
-/**************************************************************************
- *
- * Falcon special buffer handling
- * Special buffers are used for event queues and the TX and RX
- * descriptor rings.
- *
- *************************************************************************/
-
-/*
- * Initialise a Falcon special buffer
- *
- * This will define a buffer (previously allocated via
- * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing
- * it to be used for event queues, descriptor rings etc.
- */
-static void
-falcon_init_special_buffer(struct efx_nic *efx,
-			   struct efx_special_buffer *buffer)
-{
-	efx_qword_t buf_desc;
-	int index;
-	dma_addr_t dma_addr;
-	int i;
-
-	EFX_BUG_ON_PARANOID(!buffer->addr);
-
-	/* Write buffer descriptors to NIC */
-	for (i = 0; i < buffer->entries; i++) {
-		index = buffer->index + i;
-		dma_addr = buffer->dma_addr + (i * 4096);
-		EFX_LOG(efx, "mapping special buffer %d at %llx\n",
-			index, (unsigned long long)dma_addr);
-		EFX_POPULATE_QWORD_4(buf_desc,
-				     IP_DAT_BUF_SIZE, IP_DAT_BUF_SIZE_4K,
-				     BUF_ADR_REGION, 0,
-				     BUF_ADR_FBUF, (dma_addr >> 12),
-				     BUF_OWNER_ID_FBUF, 0);
-		falcon_write_sram(efx, &buf_desc, index);
-	}
-}
-
-/* Unmaps a buffer from Falcon and clears the buffer table entries */
-static void
-falcon_fini_special_buffer(struct efx_nic *efx,
-			   struct efx_special_buffer *buffer)
-{
-	efx_oword_t buf_tbl_upd;
-	unsigned int start = buffer->index;
-	unsigned int end = (buffer->index + buffer->entries - 1);
-
-	if (!buffer->entries)
-		return;
-
-	EFX_LOG(efx, "unmapping special buffers %d-%d\n",
-		buffer->index, buffer->index + buffer->entries - 1);
-
-	EFX_POPULATE_OWORD_4(buf_tbl_upd,
-			     BUF_UPD_CMD, 0,
-			     BUF_CLR_CMD, 1,
-			     BUF_CLR_END_ID, end,
-			     BUF_CLR_START_ID, start);
-	falcon_write(efx, &buf_tbl_upd, BUF_TBL_UPD_REG_KER);
-}
-
-/*
- * Allocate a new Falcon special buffer
- *
- * This allocates memory for a new buffer, clears it and allocates a
- * new buffer ID range.  It does not write into Falcon's buffer table.
- *
- * This call will allocate 4KB buffers, since Falcon can't use 8KB
- * buffers for event queues and descriptor rings.
- */
-static int falcon_alloc_special_buffer(struct efx_nic *efx,
-				       struct efx_special_buffer *buffer,
-				       unsigned int len)
-{
-	struct falcon_nic_data *nic_data = efx->nic_data;
-
-	len = ALIGN(len, FALCON_BUF_SIZE);
-
-	buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
-					    &buffer->dma_addr);
-	if (!buffer->addr)
-		return -ENOMEM;
-	buffer->len = len;
-	buffer->entries = len / FALCON_BUF_SIZE;
-	BUG_ON(buffer->dma_addr & (FALCON_BUF_SIZE - 1));
-
-	/* All zeros is a potentially valid event so memset to 0xff */
-	memset(buffer->addr, 0xff, len);
-
-	/* Select new buffer ID */
-	buffer->index = nic_data->next_buffer_table;
-	nic_data->next_buffer_table += buffer->entries;
-
-	EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
-		"(virt %p phys %llx)\n", buffer->index,
-		buffer->index + buffer->entries - 1,
-		(u64)buffer->dma_addr, len,
-		buffer->addr, (u64)virt_to_phys(buffer->addr));
-
-	return 0;
-}
-
-static void falcon_free_special_buffer(struct efx_nic *efx,
-				       struct efx_special_buffer *buffer)
-{
-	if (!buffer->addr)
-		return;
-
-	EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x "
-		"(virt %p phys %llx)\n", buffer->index,
-		buffer->index + buffer->entries - 1,
-		(u64)buffer->dma_addr, buffer->len,
-		buffer->addr, (u64)virt_to_phys(buffer->addr));
-
-	pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr,
-			    buffer->dma_addr);
-	buffer->addr = NULL;
-	buffer->entries = 0;
-}
-
-/**************************************************************************
- *
- * Falcon generic buffer handling
- * These buffers are used for interrupt status and MAC stats
- *
- **************************************************************************/
-
-static int falcon_alloc_buffer(struct efx_nic *efx,
-			       struct efx_buffer *buffer, unsigned int len)
-{
-	buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
-					    &buffer->dma_addr);
-	if (!buffer->addr)
-		return -ENOMEM;
-	buffer->len = len;
-	memset(buffer->addr, 0, len);
-	return 0;
-}
-
-static void falcon_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
-{
-	if (buffer->addr) {
-		pci_free_consistent(efx->pci_dev, buffer->len,
-				    buffer->addr, buffer->dma_addr);
-		buffer->addr = NULL;
-	}
-}
-
-/**************************************************************************
- *
- * Falcon TX path
- *
- **************************************************************************/
-
-/* Returns a pointer to the specified transmit descriptor in the TX
- * descriptor queue belonging to the specified channel.
- */
-static inline efx_qword_t *falcon_tx_desc(struct efx_tx_queue *tx_queue,
-					       unsigned int index)
-{
-	return (((efx_qword_t *) (tx_queue->txd.addr)) + index);
-}
-
-/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
-static inline void falcon_notify_tx_desc(struct efx_tx_queue *tx_queue)
-{
-	unsigned write_ptr;
-	efx_dword_t reg;
-
-	write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK;
-	EFX_POPULATE_DWORD_1(reg, TX_DESC_WPTR_DWORD, write_ptr);
-	falcon_writel_page(tx_queue->efx, &reg,
-			   TX_DESC_UPD_REG_KER_DWORD, tx_queue->queue);
-}
-
-
-/* For each entry inserted into the software descriptor ring, create a
- * descriptor in the hardware TX descriptor ring (in host memory), and
- * write a doorbell.
- */
-void falcon_push_buffers(struct efx_tx_queue *tx_queue)
-{
-
-	struct efx_tx_buffer *buffer;
-	efx_qword_t *txd;
-	unsigned write_ptr;
-
-	BUG_ON(tx_queue->write_count == tx_queue->insert_count);
-
-	do {
-		write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK;
-		buffer = &tx_queue->buffer[write_ptr];
-		txd = falcon_tx_desc(tx_queue, write_ptr);
-		++tx_queue->write_count;
-
-		/* Create TX descriptor ring entry */
-		EFX_POPULATE_QWORD_5(*txd,
-				     TX_KER_PORT, 0,
-				     TX_KER_CONT, buffer->continuation,
-				     TX_KER_BYTE_CNT, buffer->len,
-				     TX_KER_BUF_REGION, 0,
-				     TX_KER_BUF_ADR, buffer->dma_addr);
-	} while (tx_queue->write_count != tx_queue->insert_count);
-
-	wmb(); /* Ensure descriptors are written before they are fetched */
-	falcon_notify_tx_desc(tx_queue);
-}
-
-/* Allocate hardware resources for a TX queue */
-int falcon_probe_tx(struct efx_tx_queue *tx_queue)
-{
-	struct efx_nic *efx = tx_queue->efx;
-	return falcon_alloc_special_buffer(efx, &tx_queue->txd,
-					   FALCON_TXD_RING_SIZE *
-					   sizeof(efx_qword_t));
-}
-
-void falcon_init_tx(struct efx_tx_queue *tx_queue)
-{
-	efx_oword_t tx_desc_ptr;
-	struct efx_nic *efx = tx_queue->efx;
-
-	tx_queue->flushed = false;
-
-	/* Pin TX descriptor ring */
-	falcon_init_special_buffer(efx, &tx_queue->txd);
-
-	/* Push TX descriptor ring to card */
-	EFX_POPULATE_OWORD_10(tx_desc_ptr,
-			      TX_DESCQ_EN, 1,
-			      TX_ISCSI_DDIG_EN, 0,
-			      TX_ISCSI_HDIG_EN, 0,
-			      TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
-			      TX_DESCQ_EVQ_ID, tx_queue->channel->channel,
-			      TX_DESCQ_OWNER_ID, 0,
-			      TX_DESCQ_LABEL, tx_queue->queue,
-			      TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER,
-			      TX_DESCQ_TYPE, 0,
-			      TX_NON_IP_DROP_DIS_B0, 1);
-
-	if (falcon_rev(efx) >= FALCON_REV_B0) {
-		int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM;
-		EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, !csum);
-		EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, !csum);
-	}
-
-	falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
-			   tx_queue->queue);
-
-	if (falcon_rev(efx) < FALCON_REV_B0) {
-		efx_oword_t reg;
-
-		/* Only 128 bits in this register */
-		BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128);
-
-		falcon_read(efx, &reg, TX_CHKSM_CFG_REG_KER_A1);
-		if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM)
-			clear_bit_le(tx_queue->queue, (void *)&reg);
-		else
-			set_bit_le(tx_queue->queue, (void *)&reg);
-		falcon_write(efx, &reg, TX_CHKSM_CFG_REG_KER_A1);
-	}
-}
-
-static void falcon_flush_tx_queue(struct efx_tx_queue *tx_queue)
-{
-	struct efx_nic *efx = tx_queue->efx;
-	efx_oword_t tx_flush_descq;
-
-	/* Post a flush command */
-	EFX_POPULATE_OWORD_2(tx_flush_descq,
-			     TX_FLUSH_DESCQ_CMD, 1,
-			     TX_FLUSH_DESCQ, tx_queue->queue);
-	falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER);
-}
-
-void falcon_fini_tx(struct efx_tx_queue *tx_queue)
-{
-	struct efx_nic *efx = tx_queue->efx;
-	efx_oword_t tx_desc_ptr;
-
-	/* The queue should have been flushed */
-	WARN_ON(!tx_queue->flushed);
-
-	/* Remove TX descriptor ring from card */
-	EFX_ZERO_OWORD(tx_desc_ptr);
-	falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
-			   tx_queue->queue);
-
-	/* Unpin TX descriptor ring */
-	falcon_fini_special_buffer(efx, &tx_queue->txd);
-}
-
-/* Free buffers backing TX queue */
-void falcon_remove_tx(struct efx_tx_queue *tx_queue)
-{
-	falcon_free_special_buffer(tx_queue->efx, &tx_queue->txd);
-}
-
-/**************************************************************************
- *
- * Falcon RX path
- *
- **************************************************************************/
-
-/* Returns a pointer to the specified descriptor in the RX descriptor queue */
-static inline efx_qword_t *falcon_rx_desc(struct efx_rx_queue *rx_queue,
-					       unsigned int index)
-{
-	return (((efx_qword_t *) (rx_queue->rxd.addr)) + index);
-}
-
-/* This creates an entry in the RX descriptor queue */
-static inline void falcon_build_rx_desc(struct efx_rx_queue *rx_queue,
-					unsigned index)
-{
-	struct efx_rx_buffer *rx_buf;
-	efx_qword_t *rxd;
-
-	rxd = falcon_rx_desc(rx_queue, index);
-	rx_buf = efx_rx_buffer(rx_queue, index);
-	EFX_POPULATE_QWORD_3(*rxd,
-			     RX_KER_BUF_SIZE,
-			     rx_buf->len -
-			     rx_queue->efx->type->rx_buffer_padding,
-			     RX_KER_BUF_REGION, 0,
-			     RX_KER_BUF_ADR, rx_buf->dma_addr);
-}
-
-/* This writes to the RX_DESC_WPTR register for the specified receive
- * descriptor ring.
- */
-void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
-{
-	efx_dword_t reg;
-	unsigned write_ptr;
-
-	while (rx_queue->notified_count != rx_queue->added_count) {
-		falcon_build_rx_desc(rx_queue,
-				     rx_queue->notified_count &
-				     FALCON_RXD_RING_MASK);
-		++rx_queue->notified_count;
-	}
-
-	wmb();
-	write_ptr = rx_queue->added_count & FALCON_RXD_RING_MASK;
-	EFX_POPULATE_DWORD_1(reg, RX_DESC_WPTR_DWORD, write_ptr);
-	falcon_writel_page(rx_queue->efx, &reg,
-			   RX_DESC_UPD_REG_KER_DWORD, rx_queue->queue);
-}
-
-int falcon_probe_rx(struct efx_rx_queue *rx_queue)
-{
-	struct efx_nic *efx = rx_queue->efx;
-	return falcon_alloc_special_buffer(efx, &rx_queue->rxd,
-					   FALCON_RXD_RING_SIZE *
-					   sizeof(efx_qword_t));
-}
-
-void falcon_init_rx(struct efx_rx_queue *rx_queue)
-{
-	efx_oword_t rx_desc_ptr;
-	struct efx_nic *efx = rx_queue->efx;
-	bool is_b0 = falcon_rev(efx) >= FALCON_REV_B0;
-	bool iscsi_digest_en = is_b0;
-
-	EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
-		rx_queue->queue, rx_queue->rxd.index,
-		rx_queue->rxd.index + rx_queue->rxd.entries - 1);
-
-	rx_queue->flushed = false;
-
-	/* Pin RX descriptor ring */
-	falcon_init_special_buffer(efx, &rx_queue->rxd);
-
-	/* Push RX descriptor ring to card */
-	EFX_POPULATE_OWORD_10(rx_desc_ptr,
-			      RX_ISCSI_DDIG_EN, iscsi_digest_en,
-			      RX_ISCSI_HDIG_EN, iscsi_digest_en,
-			      RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
-			      RX_DESCQ_EVQ_ID, rx_queue->channel->channel,
-			      RX_DESCQ_OWNER_ID, 0,
-			      RX_DESCQ_LABEL, rx_queue->queue,
-			      RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER,
-			      RX_DESCQ_TYPE, 0 /* kernel queue */ ,
-			      /* For >=B0 this is scatter so disable */
-			      RX_DESCQ_JUMBO, !is_b0,
-			      RX_DESCQ_EN, 1);
-	falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
-			   rx_queue->queue);
-}
-
-static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
-{
-	struct efx_nic *efx = rx_queue->efx;
-	efx_oword_t rx_flush_descq;
-
-	/* Post a flush command */
-	EFX_POPULATE_OWORD_2(rx_flush_descq,
-			     RX_FLUSH_DESCQ_CMD, 1,
-			     RX_FLUSH_DESCQ, rx_queue->queue);
-	falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER);
-}
-
-void falcon_fini_rx(struct efx_rx_queue *rx_queue)
-{
-	efx_oword_t rx_desc_ptr;
-	struct efx_nic *efx = rx_queue->efx;
-
-	/* The queue should already have been flushed */
-	WARN_ON(!rx_queue->flushed);
-
-	/* Remove RX descriptor ring from card */
-	EFX_ZERO_OWORD(rx_desc_ptr);
-	falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
-			   rx_queue->queue);
-
-	/* Unpin RX descriptor ring */
-	falcon_fini_special_buffer(efx, &rx_queue->rxd);
-}
-
-/* Free buffers backing RX queue */
-void falcon_remove_rx(struct efx_rx_queue *rx_queue)
-{
-	falcon_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
-}
-
-/**************************************************************************
- *
- * Falcon event queue processing
- * Event queues are processed by per-channel tasklets.
- *
- **************************************************************************/
-
-/* Update a channel's event queue's read pointer (RPTR) register
- *
- * This writes the EVQ_RPTR_REG register for the specified channel's
- * event queue.
- *
- * Note that EVQ_RPTR_REG contains the index of the "last read" event,
- * whereas channel->eventq_read_ptr contains the index of the "next to
- * read" event.
- */
-void falcon_eventq_read_ack(struct efx_channel *channel)
-{
-	efx_dword_t reg;
-	struct efx_nic *efx = channel->efx;
-
-	EFX_POPULATE_DWORD_1(reg, EVQ_RPTR_DWORD, channel->eventq_read_ptr);
-	falcon_writel_table(efx, &reg, efx->type->evq_rptr_tbl_base,
-			    channel->channel);
-}
-
-/* Use HW to insert a SW defined event */
-void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event)
-{
-	efx_oword_t drv_ev_reg;
-
-	EFX_POPULATE_OWORD_2(drv_ev_reg,
-			     DRV_EV_QID, channel->channel,
-			     DRV_EV_DATA,
-			     EFX_QWORD_FIELD64(*event, WHOLE_EVENT));
-	falcon_write(channel->efx, &drv_ev_reg, DRV_EV_REG_KER);
-}
-
-/* Handle a transmit completion event
- *
- * Falcon batches TX completion events; the message we receive is of
- * the form "complete all TX events up to this index".
- */
-static void falcon_handle_tx_event(struct efx_channel *channel,
-				   efx_qword_t *event)
-{
-	unsigned int tx_ev_desc_ptr;
-	unsigned int tx_ev_q_label;
-	struct efx_tx_queue *tx_queue;
-	struct efx_nic *efx = channel->efx;
-
-	if (likely(EFX_QWORD_FIELD(*event, TX_EV_COMP))) {
-		/* Transmit completion */
-		tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, TX_EV_DESC_PTR);
-		tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
-		tx_queue = &efx->tx_queue[tx_ev_q_label];
-		channel->irq_mod_score +=
-			(tx_ev_desc_ptr - tx_queue->read_count) &
-			efx->type->txd_ring_mask;
-		efx_xmit_done(tx_queue, tx_ev_desc_ptr);
-	} else if (EFX_QWORD_FIELD(*event, TX_EV_WQ_FF_FULL)) {
-		/* Rewrite the FIFO write pointer */
-		tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
-		tx_queue = &efx->tx_queue[tx_ev_q_label];
-
-		if (efx_dev_registered(efx))
-			netif_tx_lock(efx->net_dev);
-		falcon_notify_tx_desc(tx_queue);
-		if (efx_dev_registered(efx))
-			netif_tx_unlock(efx->net_dev);
-	} else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) &&
-		   EFX_WORKAROUND_10727(efx)) {
-		efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
-	} else {
-		EFX_ERR(efx, "channel %d unexpected TX event "
-			EFX_QWORD_FMT"\n", channel->channel,
-			EFX_QWORD_VAL(*event));
-	}
-}
-
-/* Detect errors included in the rx_evt_pkt_ok bit. */
-static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
-				    const efx_qword_t *event,
-				    bool *rx_ev_pkt_ok,
-				    bool *discard)
-{
-	struct efx_nic *efx = rx_queue->efx;
-	bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
-	bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
-	bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
-	bool rx_ev_other_err, rx_ev_pause_frm;
-	bool rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt;
-	unsigned rx_ev_pkt_type;
-
-	rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
-	rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
-	rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, RX_EV_TOBE_DISC);
-	rx_ev_pkt_type = EFX_QWORD_FIELD(*event, RX_EV_PKT_TYPE);
-	rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
-						 RX_EV_BUF_OWNER_ID_ERR);
-	rx_ev_ip_frag_err = EFX_QWORD_FIELD(*event, RX_EV_IF_FRAG_ERR);
-	rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
-						  RX_EV_IP_HDR_CHKSUM_ERR);
-	rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
-						   RX_EV_TCP_UDP_CHKSUM_ERR);
-	rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR);
-	rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC);
-	rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ?
-			  0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB));
-	rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR);
-
-	/* Every error apart from tobe_disc and pause_frm */
-	rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
-			   rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
-			   rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
-
-	/* Count errors that are not in MAC stats.  Ignore expected
-	 * checksum errors during self-test. */
-	if (rx_ev_frm_trunc)
-		++rx_queue->channel->n_rx_frm_trunc;
-	else if (rx_ev_tobe_disc)
-		++rx_queue->channel->n_rx_tobe_disc;
-	else if (!efx->loopback_selftest) {
-		if (rx_ev_ip_hdr_chksum_err)
-			++rx_queue->channel->n_rx_ip_hdr_chksum_err;
-		else if (rx_ev_tcp_udp_chksum_err)
-			++rx_queue->channel->n_rx_tcp_udp_chksum_err;
-	}
-	if (rx_ev_ip_frag_err)
-		++rx_queue->channel->n_rx_ip_frag_err;
-
-	/* The frame must be discarded if any of these are true. */
-	*discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
-		    rx_ev_tobe_disc | rx_ev_pause_frm);
-
-	/* TOBE_DISC is expected on unicast mismatches; don't print out an
-	 * error message.  FRM_TRUNC indicates RXDP dropped the packet due
-	 * to a FIFO overflow.
-	 */
-#ifdef EFX_ENABLE_DEBUG
-	if (rx_ev_other_err) {
-		EFX_INFO_RL(efx, " RX queue %d unexpected RX event "
-			    EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
-			    rx_queue->queue, EFX_QWORD_VAL(*event),
-			    rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
-			    rx_ev_ip_hdr_chksum_err ?
-			    " [IP_HDR_CHKSUM_ERR]" : "",
-			    rx_ev_tcp_udp_chksum_err ?
-			    " [TCP_UDP_CHKSUM_ERR]" : "",
-			    rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
-			    rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
-			    rx_ev_drib_nib ? " [DRIB_NIB]" : "",
-			    rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
-			    rx_ev_pause_frm ? " [PAUSE]" : "");
-	}
-#endif
-}
-
-/* Handle receive events that are not in-order. */
-static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue,
-				       unsigned index)
-{
-	struct efx_nic *efx = rx_queue->efx;
-	unsigned expected, dropped;
-
-	expected = rx_queue->removed_count & FALCON_RXD_RING_MASK;
-	dropped = ((index + FALCON_RXD_RING_SIZE - expected) &
-		   FALCON_RXD_RING_MASK);
-	EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n",
-		dropped, index, expected);
-
-	efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
-			   RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
-}
-
-/* Handle a packet received event
- *
- * Falcon silicon gives a "discard" flag if it's a unicast packet with the
- * wrong destination address
- * Also "is multicast" and "matches multicast filter" flags can be used to
- * discard non-matching multicast packets.
- */
-static void falcon_handle_rx_event(struct efx_channel *channel,
-				   const efx_qword_t *event)
-{
-	unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
-	unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
-	unsigned expected_ptr;
-	bool rx_ev_pkt_ok, discard = false, checksummed;
-	struct efx_rx_queue *rx_queue;
-	struct efx_nic *efx = channel->efx;
-
-	/* Basic packet information */
-	rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, RX_EV_BYTE_CNT);
-	rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, RX_EV_PKT_OK);
-	rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
-	WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT));
-	WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1);
-	WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL) != channel->channel);
-
-	rx_queue = &efx->rx_queue[channel->channel];
-
-	rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
-	expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK;
-	if (unlikely(rx_ev_desc_ptr != expected_ptr))
-		falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
-
-	if (likely(rx_ev_pkt_ok)) {
-		/* If packet is marked as OK and packet type is TCP/IPv4 or
-		 * UDP/IPv4, then we can rely on the hardware checksum.
-		 */
-		checksummed = RX_EV_HDR_TYPE_HAS_CHECKSUMS(rx_ev_hdr_type);
-	} else {
-		falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok,
-					&discard);
-		checksummed = false;
-	}
-
-	/* Detect multicast packets that didn't match the filter */
-	rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
-	if (rx_ev_mcast_pkt) {
-		unsigned int rx_ev_mcast_hash_match =
-			EFX_QWORD_FIELD(*event, RX_EV_MCAST_HASH_MATCH);
-
-		if (unlikely(!rx_ev_mcast_hash_match))
-			discard = true;
-	}
-
-	channel->irq_mod_score += 2;
-
-	/* Handle received packet */
-	efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
-		      checksummed, discard);
-}
-
-/* Global events are basically PHY events */
-static void falcon_handle_global_event(struct efx_channel *channel,
-				       efx_qword_t *event)
-{
-	struct efx_nic *efx = channel->efx;
-	bool handled = false;
-
-	if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) ||
-	    EFX_QWORD_FIELD(*event, G_PHY1_INTR) ||
-	    EFX_QWORD_FIELD(*event, XG_PHY_INTR) ||
-	    EFX_QWORD_FIELD(*event, XFP_PHY_INTR)) {
-		efx->phy_op->clear_interrupt(efx);
-		queue_work(efx->workqueue, &efx->phy_work);
-		handled = true;
-	}
-
-	if ((falcon_rev(efx) >= FALCON_REV_B0) &&
-	    EFX_QWORD_FIELD(*event, XG_MNT_INTR_B0)) {
-		queue_work(efx->workqueue, &efx->mac_work);
-		handled = true;
-	}
-
-	if (EFX_QWORD_FIELD_VER(efx, *event, RX_RECOVERY)) {
-		EFX_ERR(efx, "channel %d seen global RX_RESET "
-			"event. Resetting.\n", channel->channel);
-
-		atomic_inc(&efx->rx_reset);
-		efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
-				   RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
-		handled = true;
-	}
-
-	if (!handled)
-		EFX_ERR(efx, "channel %d unknown global event "
-			EFX_QWORD_FMT "\n", channel->channel,
-			EFX_QWORD_VAL(*event));
-}
-
-static void falcon_handle_driver_event(struct efx_channel *channel,
-				       efx_qword_t *event)
-{
-	struct efx_nic *efx = channel->efx;
-	unsigned int ev_sub_code;
-	unsigned int ev_sub_data;
-
-	ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
-	ev_sub_data = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_DATA);
-
-	switch (ev_sub_code) {
-	case TX_DESCQ_FLS_DONE_EV_DECODE:
-		EFX_TRACE(efx, "channel %d TXQ %d flushed\n",
-			  channel->channel, ev_sub_data);
-		break;
-	case RX_DESCQ_FLS_DONE_EV_DECODE:
-		EFX_TRACE(efx, "channel %d RXQ %d flushed\n",
-			  channel->channel, ev_sub_data);
-		break;
-	case EVQ_INIT_DONE_EV_DECODE:
-		EFX_LOG(efx, "channel %d EVQ %d initialised\n",
-			channel->channel, ev_sub_data);
-		break;
-	case SRM_UPD_DONE_EV_DECODE:
-		EFX_TRACE(efx, "channel %d SRAM update done\n",
-			  channel->channel);
-		break;
-	case WAKE_UP_EV_DECODE:
-		EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n",
-			  channel->channel, ev_sub_data);
-		break;
-	case TIMER_EV_DECODE:
-		EFX_TRACE(efx, "channel %d RX queue %d timer expired\n",
-			  channel->channel, ev_sub_data);
-		break;
-	case RX_RECOVERY_EV_DECODE:
-		EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
-			"Resetting.\n", channel->channel);
-		atomic_inc(&efx->rx_reset);
-		efx_schedule_reset(efx,
-				   EFX_WORKAROUND_6555(efx) ?
-				   RESET_TYPE_RX_RECOVERY :
-				   RESET_TYPE_DISABLE);
-		break;
-	case RX_DSC_ERROR_EV_DECODE:
-		EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error."
-			" RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
-		efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
-		break;
-	case TX_DSC_ERROR_EV_DECODE:
-		EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error."
-			" TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
-		efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
-		break;
-	default:
-		EFX_TRACE(efx, "channel %d unknown driver event code %d "
-			  "data %04x\n", channel->channel, ev_sub_code,
-			  ev_sub_data);
-		break;
-	}
-}
-
-int falcon_process_eventq(struct efx_channel *channel, int rx_quota)
-{
-	unsigned int read_ptr;
-	efx_qword_t event, *p_event;
-	int ev_code;
-	int rx_packets = 0;
-
-	read_ptr = channel->eventq_read_ptr;
-
-	do {
-		p_event = falcon_event(channel, read_ptr);
-		event = *p_event;
-
-		if (!falcon_event_present(&event))
-			/* End of events */
-			break;
-
-		EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n",
-			  channel->channel, EFX_QWORD_VAL(event));
-
-		/* Clear this event by marking it all ones */
-		EFX_SET_QWORD(*p_event);
-
-		ev_code = EFX_QWORD_FIELD(event, EV_CODE);
-
-		switch (ev_code) {
-		case RX_IP_EV_DECODE:
-			falcon_handle_rx_event(channel, &event);
-			++rx_packets;
-			break;
-		case TX_IP_EV_DECODE:
-			falcon_handle_tx_event(channel, &event);
-			break;
-		case DRV_GEN_EV_DECODE:
-			channel->eventq_magic
-				= EFX_QWORD_FIELD(event, EVQ_MAGIC);
-			EFX_LOG(channel->efx, "channel %d received generated "
-				"event "EFX_QWORD_FMT"\n", channel->channel,
-				EFX_QWORD_VAL(event));
-			break;
-		case GLOBAL_EV_DECODE:
-			falcon_handle_global_event(channel, &event);
-			break;
-		case DRIVER_EV_DECODE:
-			falcon_handle_driver_event(channel, &event);
-			break;
-		default:
-			EFX_ERR(channel->efx, "channel %d unknown event type %d"
-				" (data " EFX_QWORD_FMT ")\n", channel->channel,
-				ev_code, EFX_QWORD_VAL(event));
-		}
-
-		/* Increment read pointer */
-		read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
-
-	} while (rx_packets < rx_quota);
-
-	channel->eventq_read_ptr = read_ptr;
-	return rx_packets;
-}
-
-void falcon_set_int_moderation(struct efx_channel *channel)
+static void falcon_push_irq_moderation(struct efx_channel *channel)
 {
 	efx_dword_t timer_cmd;
 	struct efx_nic *efx = channel->efx;
 
 	/* Set timer register */
 	if (channel->irq_moderation) {
-		/* Round to resolution supported by hardware.  The value we
-		 * program is based at 0.  So actual interrupt moderation
-		 * achieved is ((x + 1) * res).
-		 */
-		channel->irq_moderation -= (channel->irq_moderation %
-					    FALCON_IRQ_MOD_RESOLUTION);
-		if (channel->irq_moderation < FALCON_IRQ_MOD_RESOLUTION)
-			channel->irq_moderation = FALCON_IRQ_MOD_RESOLUTION;
 		EFX_POPULATE_DWORD_2(timer_cmd,
-				     TIMER_MODE, TIMER_MODE_INT_HLDOFF,
-				     TIMER_VAL,
-				     channel->irq_moderation /
-				     FALCON_IRQ_MOD_RESOLUTION - 1);
+				     FRF_AB_TC_TIMER_MODE,
+				     FFE_BB_TIMER_MODE_INT_HLDOFF,
+				     FRF_AB_TC_TIMER_VAL,
+				     channel->irq_moderation - 1);
 	} else {
 		EFX_POPULATE_DWORD_2(timer_cmd,
-				     TIMER_MODE, TIMER_MODE_DIS,
-				     TIMER_VAL, 0);
+				     FRF_AB_TC_TIMER_MODE,
+				     FFE_BB_TIMER_MODE_DIS,
+				     FRF_AB_TC_TIMER_VAL, 0);
 	}
-	falcon_writel_page_locked(efx, &timer_cmd, TIMER_CMD_REG_KER,
-				  channel->channel);
-
+	BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0);
+	efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
+			       channel->channel);
 }
 
-/* Allocate buffer table entries for event queue */
-int falcon_probe_eventq(struct efx_channel *channel)
-{
-	struct efx_nic *efx = channel->efx;
-	unsigned int evq_size;
-
-	evq_size = FALCON_EVQ_SIZE * sizeof(efx_qword_t);
-	return falcon_alloc_special_buffer(efx, &channel->eventq, evq_size);
-}
+static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx);
 
-void falcon_init_eventq(struct efx_channel *channel)
+static void falcon_prepare_flush(struct efx_nic *efx)
 {
-	efx_oword_t evq_ptr;
-	struct efx_nic *efx = channel->efx;
-
-	EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n",
-		channel->channel, channel->eventq.index,
-		channel->eventq.index + channel->eventq.entries - 1);
-
-	/* Pin event queue buffer */
-	falcon_init_special_buffer(efx, &channel->eventq);
+	falcon_deconfigure_mac_wrapper(efx);
 
-	/* Fill event queue with all ones (i.e. empty events) */
-	memset(channel->eventq.addr, 0xff, channel->eventq.len);
-
-	/* Push event queue to card */
-	EFX_POPULATE_OWORD_3(evq_ptr,
-			     EVQ_EN, 1,
-			     EVQ_SIZE, FALCON_EVQ_ORDER,
-			     EVQ_BUF_BASE_ID, channel->eventq.index);
-	falcon_write_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
-			   channel->channel);
-
-	falcon_set_int_moderation(channel);
-}
-
-void falcon_fini_eventq(struct efx_channel *channel)
-{
-	efx_oword_t eventq_ptr;
-	struct efx_nic *efx = channel->efx;
-
-	/* Remove event queue from card */
-	EFX_ZERO_OWORD(eventq_ptr);
-	falcon_write_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base,
-			   channel->channel);
-
-	/* Unpin event queue */
-	falcon_fini_special_buffer(efx, &channel->eventq);
-}
-
-/* Free buffers backing event queue */
-void falcon_remove_eventq(struct efx_channel *channel)
-{
-	falcon_free_special_buffer(channel->efx, &channel->eventq);
-}
-
-
-/* Generates a test event on the event queue.  A subsequent call to
- * process_eventq() should pick up the event and place the value of
- * "magic" into channel->eventq_magic;
- */
-void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic)
-{
-	efx_qword_t test_event;
-
-	EFX_POPULATE_QWORD_2(test_event,
-			     EV_CODE, DRV_GEN_EV_DECODE,
-			     EVQ_MAGIC, magic);
-	falcon_generate_event(channel, &test_event);
-}
-
-void falcon_sim_phy_event(struct efx_nic *efx)
-{
-	efx_qword_t phy_event;
-
-	EFX_POPULATE_QWORD_1(phy_event, EV_CODE, GLOBAL_EV_DECODE);
-	if (EFX_IS10G(efx))
-		EFX_SET_QWORD_FIELD(phy_event, XG_PHY_INTR, 1);
-	else
-		EFX_SET_QWORD_FIELD(phy_event, G_PHY0_INTR, 1);
-
-	falcon_generate_event(&efx->channel[0], &phy_event);
-}
-
-/**************************************************************************
- *
- * Flush handling
- *
- **************************************************************************/
-
-
-static void falcon_poll_flush_events(struct efx_nic *efx)
-{
-	struct efx_channel *channel = &efx->channel[0];
-	struct efx_tx_queue *tx_queue;
-	struct efx_rx_queue *rx_queue;
-	unsigned int read_ptr = channel->eventq_read_ptr;
-	unsigned int end_ptr = (read_ptr - 1) & FALCON_EVQ_MASK;
-
-	do {
-		efx_qword_t *event = falcon_event(channel, read_ptr);
-		int ev_code, ev_sub_code, ev_queue;
-		bool ev_failed;
-
-		if (!falcon_event_present(event))
-			break;
-
-		ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
-		ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
-		if (ev_code == DRIVER_EV_DECODE &&
-		    ev_sub_code == TX_DESCQ_FLS_DONE_EV_DECODE) {
-			ev_queue = EFX_QWORD_FIELD(*event,
-						   DRIVER_EV_TX_DESCQ_ID);
-			if (ev_queue < EFX_TX_QUEUE_COUNT) {
-				tx_queue = efx->tx_queue + ev_queue;
-				tx_queue->flushed = true;
-			}
-		} else if (ev_code == DRIVER_EV_DECODE &&
-			   ev_sub_code == RX_DESCQ_FLS_DONE_EV_DECODE) {
-			ev_queue = EFX_QWORD_FIELD(*event,
-						   DRIVER_EV_RX_DESCQ_ID);
-			ev_failed = EFX_QWORD_FIELD(*event,
-						    DRIVER_EV_RX_FLUSH_FAIL);
-			if (ev_queue < efx->n_rx_queues) {
-				rx_queue = efx->rx_queue + ev_queue;
-
-				/* retry the rx flush */
-				if (ev_failed)
-					falcon_flush_rx_queue(rx_queue);
-				else
-					rx_queue->flushed = true;
-			}
-		}
-
-		read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
-	} while (read_ptr != end_ptr);
-}
-
-/* Handle tx and rx flushes at the same time, since they run in
- * parallel in the hardware and there's no reason for us to
- * serialise them */
-int falcon_flush_queues(struct efx_nic *efx)
-{
-	struct efx_rx_queue *rx_queue;
-	struct efx_tx_queue *tx_queue;
-	int i;
-	bool outstanding;
-
-	/* Issue flush requests */
-	efx_for_each_tx_queue(tx_queue, efx) {
-		tx_queue->flushed = false;
-		falcon_flush_tx_queue(tx_queue);
-	}
-	efx_for_each_rx_queue(rx_queue, efx) {
-		rx_queue->flushed = false;
-		falcon_flush_rx_queue(rx_queue);
-	}
-
-	/* Poll the evq looking for flush completions. Since we're not pushing
-	 * any more rx or tx descriptors at this point, we're in no danger of
-	 * overflowing the evq whilst we wait */
-	for (i = 0; i < FALCON_FLUSH_POLL_COUNT; ++i) {
-		msleep(FALCON_FLUSH_INTERVAL);
-		falcon_poll_flush_events(efx);
-
-		/* Check if every queue has been succesfully flushed */
-		outstanding = false;
-		efx_for_each_tx_queue(tx_queue, efx)
-			outstanding |= !tx_queue->flushed;
-		efx_for_each_rx_queue(rx_queue, efx)
-			outstanding |= !rx_queue->flushed;
-		if (!outstanding)
-			return 0;
-	}
-
-	/* Mark the queues as all flushed. We're going to return failure
-	 * leading to a reset, or fake up success anyway. "flushed" now
-	 * indicates that we tried to flush. */
-	efx_for_each_tx_queue(tx_queue, efx) {
-		if (!tx_queue->flushed)
-			EFX_ERR(efx, "tx queue %d flush command timed out\n",
-				tx_queue->queue);
-		tx_queue->flushed = true;
-	}
-	efx_for_each_rx_queue(rx_queue, efx) {
-		if (!rx_queue->flushed)
-			EFX_ERR(efx, "rx queue %d flush command timed out\n",
-				rx_queue->queue);
-		rx_queue->flushed = true;
-	}
-
-	if (EFX_WORKAROUND_7803(efx))
-		return 0;
-
-	return -ETIMEDOUT;
-}
-
-/**************************************************************************
- *
- * Falcon hardware interrupts
- * The hardware interrupt handler does very little work; all the event
- * queue processing is carried out by per-channel tasklets.
- *
- **************************************************************************/
-
-/* Enable/disable/generate Falcon interrupts */
-static inline void falcon_interrupts(struct efx_nic *efx, int enabled,
-				     int force)
-{
-	efx_oword_t int_en_reg_ker;
-
-	EFX_POPULATE_OWORD_2(int_en_reg_ker,
-			     KER_INT_KER, force,
-			     DRV_INT_EN_KER, enabled);
-	falcon_write(efx, &int_en_reg_ker, INT_EN_REG_KER);
-}
-
-void falcon_enable_interrupts(struct efx_nic *efx)
-{
-	efx_oword_t int_adr_reg_ker;
-	struct efx_channel *channel;
-
-	EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
-	wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
-
-	/* Program address */
-	EFX_POPULATE_OWORD_2(int_adr_reg_ker,
-			     NORM_INT_VEC_DIS_KER, EFX_INT_MODE_USE_MSI(efx),
-			     INT_ADR_KER, efx->irq_status.dma_addr);
-	falcon_write(efx, &int_adr_reg_ker, INT_ADR_REG_KER);
-
-	/* Enable interrupts */
-	falcon_interrupts(efx, 1, 0);
-
-	/* Force processing of all the channels to get the EVQ RPTRs up to
-	   date */
-	efx_for_each_channel(channel, efx)
-		efx_schedule_channel(channel);
-}
-
-void falcon_disable_interrupts(struct efx_nic *efx)
-{
-	/* Disable interrupts */
-	falcon_interrupts(efx, 0, 0);
-}
-
-/* Generate a Falcon test interrupt
- * Interrupt must already have been enabled, otherwise nasty things
- * may happen.
- */
-void falcon_generate_interrupt(struct efx_nic *efx)
-{
-	falcon_interrupts(efx, 1, 1);
+	/* Wait for the tx and rx fifo's to get to the next packet boundary
+	 * (~1ms without back-pressure), then to drain the remainder of the
+	 * fifo's at data path speeds (negligible), with a healthy margin. */
+	msleep(10);
 }
 
 /* Acknowledge a legacy interrupt from Falcon
@@ -1364,113 +144,17 @@ void falcon_generate_interrupt(struct efx_nic *efx)
  *
  * NB most hardware supports MSI interrupts
  */
-static inline void falcon_irq_ack_a1(struct efx_nic *efx)
-{
-	efx_dword_t reg;
-
-	EFX_POPULATE_DWORD_1(reg, INT_ACK_DUMMY_DATA, 0xb7eb7e);
-	falcon_writel(efx, &reg, INT_ACK_REG_KER_A1);
-	falcon_readl(efx, &reg, WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1);
-}
-
-/* Process a fatal interrupt
- * Disable bus mastering ASAP and schedule a reset
- */
-static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
+inline void falcon_irq_ack_a1(struct efx_nic *efx)
 {
-	struct falcon_nic_data *nic_data = efx->nic_data;
-	efx_oword_t *int_ker = efx->irq_status.addr;
-	efx_oword_t fatal_intr;
-	int error, mem_perr;
-
-	falcon_read(efx, &fatal_intr, FATAL_INTR_REG_KER);
-	error = EFX_OWORD_FIELD(fatal_intr, INT_KER_ERROR);
-
-	EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status "
-		EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
-		EFX_OWORD_VAL(fatal_intr),
-		error ? "disabling bus mastering" : "no recognised error");
-	if (error == 0)
-		goto out;
-
-	/* If this is a memory parity error dump which blocks are offending */
-	mem_perr = EFX_OWORD_FIELD(fatal_intr, MEM_PERR_INT_KER);
-	if (mem_perr) {
-		efx_oword_t reg;
-		falcon_read(efx, &reg, MEM_STAT_REG_KER);
-		EFX_ERR(efx, "SYSTEM ERROR: memory parity error "
-			EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
-	}
-
-	/* Disable both devices */
-	pci_clear_master(efx->pci_dev);
-	if (FALCON_IS_DUAL_FUNC(efx))
-		pci_clear_master(nic_data->pci_dev2);
-	falcon_disable_interrupts(efx);
-
-	/* Count errors and reset or disable the NIC accordingly */
-	if (nic_data->int_error_count == 0 ||
-	    time_after(jiffies, nic_data->int_error_expire)) {
-		nic_data->int_error_count = 0;
-		nic_data->int_error_expire =
-			jiffies + FALCON_INT_ERROR_EXPIRE * HZ;
-	}
-	if (++nic_data->int_error_count < FALCON_MAX_INT_ERRORS) {
-		EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
-		efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
-	} else {
-		EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen."
-			"NIC will be disabled\n");
-		efx_schedule_reset(efx, RESET_TYPE_DISABLE);
-	}
-out:
-	return IRQ_HANDLED;
-}
-
-/* Handle a legacy interrupt from Falcon
- * Acknowledges the interrupt and schedule event queue processing.
- */
-static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
-{
-	struct efx_nic *efx = dev_id;
-	efx_oword_t *int_ker = efx->irq_status.addr;
-	irqreturn_t result = IRQ_NONE;
-	struct efx_channel *channel;
 	efx_dword_t reg;
-	u32 queues;
-	int syserr;
 
-	/* Read the ISR which also ACKs the interrupts */
-	falcon_readl(efx, &reg, INT_ISR0_B0);
-	queues = EFX_EXTRACT_DWORD(reg, 0, 31);
-
-	/* Check to see if we have a serious error condition */
-	syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
-	if (unlikely(syserr))
-		return falcon_fatal_interrupt(efx);
-
-	/* Schedule processing of any interrupting queues */
-	efx_for_each_channel(channel, efx) {
-		if ((queues & 1) ||
-		    falcon_event_present(
-			    falcon_event(channel, channel->eventq_read_ptr))) {
-			efx_schedule_channel(channel);
-			result = IRQ_HANDLED;
-		}
-		queues >>= 1;
-	}
-
-	if (result == IRQ_HANDLED) {
-		efx->last_irq_cpu = raw_smp_processor_id();
-		EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
-			  irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
-	}
-
-	return result;
+	EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
+	efx_writed(efx, &reg, FR_AA_INT_ACK_KER);
+	efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
 }
 
 
-static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
+irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
 {
 	struct efx_nic *efx = dev_id;
 	efx_oword_t *int_ker = efx->irq_status.addr;
@@ -1491,15 +175,15 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
 		  irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
 
 	/* Check to see if we have a serious error condition */
-	syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
+	syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
 	if (unlikely(syserr))
-		return falcon_fatal_interrupt(efx);
+		return efx_nic_fatal_interrupt(efx);
 
 	/* Determine interrupting queues, clear interrupt status
 	 * register and acknowledge the device interrupt.
 	 */
-	BUILD_BUG_ON(INT_EVQS_WIDTH > EFX_MAX_CHANNELS);
-	queues = EFX_OWORD_FIELD(*int_ker, INT_EVQS);
+	BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS);
+	queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
 	EFX_ZERO_OWORD(*int_ker);
 	wmb(); /* Ensure the vector is cleared before interrupt ack */
 	falcon_irq_ack_a1(efx);
@@ -1515,126 +199,6 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
 
 	return IRQ_HANDLED;
 }
-
-/* Handle an MSI interrupt from Falcon
- *
- * Handle an MSI hardware interrupt.  This routine schedules event
- * queue processing.  No interrupt acknowledgement cycle is necessary.
- * Also, we never need to check that the interrupt is for us, since
- * MSI interrupts cannot be shared.
- */
-static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)
-{
-	struct efx_channel *channel = dev_id;
-	struct efx_nic *efx = channel->efx;
-	efx_oword_t *int_ker = efx->irq_status.addr;
-	int syserr;
-
-	efx->last_irq_cpu = raw_smp_processor_id();
-	EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
-		  irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
-
-	/* Check to see if we have a serious error condition */
-	syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
-	if (unlikely(syserr))
-		return falcon_fatal_interrupt(efx);
-
-	/* Schedule processing of the channel */
-	efx_schedule_channel(channel);
-
-	return IRQ_HANDLED;
-}
-
-
-/* Setup RSS indirection table.
- * This maps from the hash value of the packet to RXQ
- */
-static void falcon_setup_rss_indir_table(struct efx_nic *efx)
-{
-	int i = 0;
-	unsigned long offset;
-	efx_dword_t dword;
-
-	if (falcon_rev(efx) < FALCON_REV_B0)
-		return;
-
-	for (offset = RX_RSS_INDIR_TBL_B0;
-	     offset < RX_RSS_INDIR_TBL_B0 + 0x800;
-	     offset += 0x10) {
-		EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0,
-				     i % efx->n_rx_queues);
-		falcon_writel(efx, &dword, offset);
-		i++;
-	}
-}
-
-/* Hook interrupt handler(s)
- * Try MSI and then legacy interrupts.
- */
-int falcon_init_interrupt(struct efx_nic *efx)
-{
-	struct efx_channel *channel;
-	int rc;
-
-	if (!EFX_INT_MODE_USE_MSI(efx)) {
-		irq_handler_t handler;
-		if (falcon_rev(efx) >= FALCON_REV_B0)
-			handler = falcon_legacy_interrupt_b0;
-		else
-			handler = falcon_legacy_interrupt_a1;
-
-		rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
-				 efx->name, efx);
-		if (rc) {
-			EFX_ERR(efx, "failed to hook legacy IRQ %d\n",
-				efx->pci_dev->irq);
-			goto fail1;
-		}
-		return 0;
-	}
-
-	/* Hook MSI or MSI-X interrupt */
-	efx_for_each_channel(channel, efx) {
-		rc = request_irq(channel->irq, falcon_msi_interrupt,
-				 IRQF_PROBE_SHARED, /* Not shared */
-				 channel->name, channel);
-		if (rc) {
-			EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq);
-			goto fail2;
-		}
-	}
-
-	return 0;
-
- fail2:
-	efx_for_each_channel(channel, efx)
-		free_irq(channel->irq, channel);
- fail1:
-	return rc;
-}
-
-void falcon_fini_interrupt(struct efx_nic *efx)
-{
-	struct efx_channel *channel;
-	efx_oword_t reg;
-
-	/* Disable MSI/MSI-X interrupts */
-	efx_for_each_channel(channel, efx) {
-		if (channel->irq)
-			free_irq(channel->irq, channel);
-	}
-
-	/* ACK legacy interrupt */
-	if (falcon_rev(efx) >= FALCON_REV_B0)
-		falcon_read(efx, &reg, INT_ISR0_B0);
-	else
-		falcon_irq_ack_a1(efx);
-
-	/* Disable legacy interrupt */
-	if (efx->legacy_irq)
-		free_irq(efx->legacy_irq, efx);
-}
-
 /**************************************************************************
  *
  * EEPROM/flash
@@ -1647,8 +211,8 @@ void falcon_fini_interrupt(struct efx_nic *efx)
 static int falcon_spi_poll(struct efx_nic *efx)
 {
 	efx_oword_t reg;
-	falcon_read(efx, &reg, EE_SPI_HCMD_REG_KER);
-	return EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
+	efx_reado(efx, &reg, FR_AB_EE_SPI_HCMD);
+	return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
 }
 
 /* Wait for SPI command completion */
@@ -1678,11 +242,10 @@ static int falcon_spi_wait(struct efx_nic *efx)
 	}
 }
 
-int falcon_spi_cmd(const struct efx_spi_device *spi,
+int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
 		   unsigned int command, int address,
 		   const void *in, void *out, size_t len)
 {
-	struct efx_nic *efx = spi->efx;
 	bool addressed = (address >= 0);
 	bool reading = (out != NULL);
 	efx_oword_t reg;
@@ -1700,27 +263,27 @@ int falcon_spi_cmd(const struct efx_spi_device *spi,
 
 	/* Program address register, if we have an address */
 	if (addressed) {
-		EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address);
-		falcon_write(efx, &reg, EE_SPI_HADR_REG_KER);
+		EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
+		efx_writeo(efx, &reg, FR_AB_EE_SPI_HADR);
 	}
 
 	/* Program data register, if we have data */
 	if (in != NULL) {
 		memcpy(&reg, in, len);
-		falcon_write(efx, &reg, EE_SPI_HDATA_REG_KER);
+		efx_writeo(efx, &reg, FR_AB_EE_SPI_HDATA);
 	}
 
 	/* Issue read/write command */
 	EFX_POPULATE_OWORD_7(reg,
-			     EE_SPI_HCMD_CMD_EN, 1,
-			     EE_SPI_HCMD_SF_SEL, spi->device_id,
-			     EE_SPI_HCMD_DABCNT, len,
-			     EE_SPI_HCMD_READ, reading,
-			     EE_SPI_HCMD_DUBCNT, 0,
-			     EE_SPI_HCMD_ADBCNT,
+			     FRF_AB_EE_SPI_HCMD_CMD_EN, 1,
+			     FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id,
+			     FRF_AB_EE_SPI_HCMD_DABCNT, len,
+			     FRF_AB_EE_SPI_HCMD_READ, reading,
+			     FRF_AB_EE_SPI_HCMD_DUBCNT, 0,
+			     FRF_AB_EE_SPI_HCMD_ADBCNT,
 			     (addressed ? spi->addr_len : 0),
-			     EE_SPI_HCMD_ENC, command);
-	falcon_write(efx, &reg, EE_SPI_HCMD_REG_KER);
+			     FRF_AB_EE_SPI_HCMD_ENC, command);
+	efx_writeo(efx, &reg, FR_AB_EE_SPI_HCMD);
 
 	/* Wait for read/write to complete */
 	rc = falcon_spi_wait(efx);
@@ -1729,7 +292,7 @@ int falcon_spi_cmd(const struct efx_spi_device *spi,
 
 	/* Read data */
 	if (out != NULL) {
-		falcon_read(efx, &reg, EE_SPI_HDATA_REG_KER);
+		efx_reado(efx, &reg, FR_AB_EE_SPI_HDATA);
 		memcpy(out, &reg, len);
 	}
 
@@ -1751,15 +314,15 @@ efx_spi_munge_command(const struct efx_spi_device *spi,
 }
 
 /* Wait up to 10 ms for buffered write completion */
-int falcon_spi_wait_write(const struct efx_spi_device *spi)
+int
+falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi)
 {
-	struct efx_nic *efx = spi->efx;
 	unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
 	u8 status;
 	int rc;
 
 	for (;;) {
-		rc = falcon_spi_cmd(spi, SPI_RDSR, -1, NULL,
+		rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
 				    &status, sizeof(status));
 		if (rc)
 			return rc;
@@ -1775,8 +338,8 @@ int falcon_spi_wait_write(const struct efx_spi_device *spi)
 	}
 }
 
-int falcon_spi_read(const struct efx_spi_device *spi, loff_t start,
-		    size_t len, size_t *retlen, u8 *buffer)
+int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
+		    loff_t start, size_t len, size_t *retlen, u8 *buffer)
 {
 	size_t block_len, pos = 0;
 	unsigned int command;
@@ -1786,7 +349,7 @@ int falcon_spi_read(const struct efx_spi_device *spi, loff_t start,
 		block_len = min(len - pos, FALCON_SPI_MAX_LEN);
 
 		command = efx_spi_munge_command(spi, SPI_READ, start + pos);
-		rc = falcon_spi_cmd(spi, command, start + pos, NULL,
+		rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL,
 				    buffer + pos, block_len);
 		if (rc)
 			break;
@@ -1805,8 +368,9 @@ int falcon_spi_read(const struct efx_spi_device *spi, loff_t start,
 	return rc;
 }
 
-int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
-		     size_t len, size_t *retlen, const u8 *buffer)
+int
+falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
+		 loff_t start, size_t len, size_t *retlen, const u8 *buffer)
 {
 	u8 verify_buffer[FALCON_SPI_MAX_LEN];
 	size_t block_len, pos = 0;
@@ -1814,24 +378,24 @@ int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
 	int rc = 0;
 
 	while (pos < len) {
-		rc = falcon_spi_cmd(spi, SPI_WREN, -1, NULL, NULL, 0);
+		rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
 		if (rc)
 			break;
 
 		block_len = min(len - pos,
 				falcon_spi_write_limit(spi, start + pos));
 		command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
-		rc = falcon_spi_cmd(spi, command, start + pos,
+		rc = falcon_spi_cmd(efx, spi, command, start + pos,
 				    buffer + pos, NULL, block_len);
 		if (rc)
 			break;
 
-		rc = falcon_spi_wait_write(spi);
+		rc = falcon_spi_wait_write(efx, spi);
 		if (rc)
 			break;
 
 		command = efx_spi_munge_command(spi, SPI_READ, start + pos);
-		rc = falcon_spi_cmd(spi, command, start + pos,
+		rc = falcon_spi_cmd(efx, spi, command, start + pos,
 				    NULL, verify_buffer, block_len);
 		if (memcmp(verify_buffer, buffer + pos, block_len)) {
 			rc = -EIO;
@@ -1860,60 +424,70 @@ int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
  **************************************************************************
  */
 
-static int falcon_reset_macs(struct efx_nic *efx)
+static void falcon_push_multicast_hash(struct efx_nic *efx)
 {
-	efx_oword_t reg;
+	union efx_multicast_hash *mc_hash = &efx->multicast_hash;
+
+	WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+	efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
+	efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
+}
+
+static void falcon_reset_macs(struct efx_nic *efx)
+{
+	struct falcon_nic_data *nic_data = efx->nic_data;
+	efx_oword_t reg, mac_ctrl;
 	int count;
 
-	if (falcon_rev(efx) < FALCON_REV_B0) {
+	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
 		/* It's not safe to use GLB_CTL_REG to reset the
 		 * macs, so instead use the internal MAC resets
 		 */
 		if (!EFX_IS10G(efx)) {
-			EFX_POPULATE_OWORD_1(reg, GM_SW_RST, 1);
-			falcon_write(efx, &reg, GM_CFG1_REG);
+			EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 1);
+			efx_writeo(efx, &reg, FR_AB_GM_CFG1);
 			udelay(1000);
 
-			EFX_POPULATE_OWORD_1(reg, GM_SW_RST, 0);
-			falcon_write(efx, &reg, GM_CFG1_REG);
+			EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 0);
+			efx_writeo(efx, &reg, FR_AB_GM_CFG1);
 			udelay(1000);
-			return 0;
+			return;
 		} else {
-			EFX_POPULATE_OWORD_1(reg, XM_CORE_RST, 1);
-			falcon_write(efx, &reg, XM_GLB_CFG_REG);
+			EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
+			efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
 
 			for (count = 0; count < 10000; count++) {
-				falcon_read(efx, &reg, XM_GLB_CFG_REG);
-				if (EFX_OWORD_FIELD(reg, XM_CORE_RST) == 0)
-					return 0;
+				efx_reado(efx, &reg, FR_AB_XM_GLB_CFG);
+				if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
+				    0)
+					return;
 				udelay(10);
 			}
 
 			EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
-			return -ETIMEDOUT;
 		}
 	}
 
-	/* MAC stats will fail whilst the TX fifo is draining. Serialise
-	 * the drain sequence with the statistics fetch */
-	efx_stats_disable(efx);
+	/* Mac stats will fail whist the TX fifo is draining */
+	WARN_ON(nic_data->stats_disable_count == 0);
 
-	falcon_read(efx, &reg, MAC0_CTRL_REG_KER);
-	EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1);
-	falcon_write(efx, &reg, MAC0_CTRL_REG_KER);
+	efx_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL);
+	EFX_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1);
+	efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
 
-	falcon_read(efx, &reg, GLB_CTL_REG_KER);
-	EFX_SET_OWORD_FIELD(reg, RST_XGTX, 1);
-	EFX_SET_OWORD_FIELD(reg, RST_XGRX, 1);
-	EFX_SET_OWORD_FIELD(reg, RST_EM, 1);
-	falcon_write(efx, &reg, GLB_CTL_REG_KER);
+	efx_reado(efx, &reg, FR_AB_GLB_CTL);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
+	efx_writeo(efx, &reg, FR_AB_GLB_CTL);
 
 	count = 0;
 	while (1) {
-		falcon_read(efx, &reg, GLB_CTL_REG_KER);
-		if (!EFX_OWORD_FIELD(reg, RST_XGTX) &&
-		    !EFX_OWORD_FIELD(reg, RST_XGRX) &&
-		    !EFX_OWORD_FIELD(reg, RST_EM)) {
+		efx_reado(efx, &reg, FR_AB_GLB_CTL);
+		if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
+		    !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
+		    !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
 			EFX_LOG(efx, "Completed MAC reset after %d loops\n",
 				count);
 			break;
@@ -1926,55 +500,50 @@ static int falcon_reset_macs(struct efx_nic *efx)
 		udelay(10);
 	}
 
-	efx_stats_enable(efx);
-
-	/* If we've reset the EM block and the link is up, then
-	 * we'll have to kick the XAUI link so the PHY can recover */
-	if (efx->link_up && EFX_IS10G(efx) && EFX_WORKAROUND_5147(efx))
-		falcon_reset_xaui(efx);
-
-	return 0;
+	/* Ensure the correct MAC is selected before statistics
+	 * are re-enabled by the caller */
+	efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
 }
 
 void falcon_drain_tx_fifo(struct efx_nic *efx)
 {
 	efx_oword_t reg;
 
-	if ((falcon_rev(efx) < FALCON_REV_B0) ||
+	if ((efx_nic_rev(efx) < EFX_REV_FALCON_B0) ||
 	    (efx->loopback_mode != LOOPBACK_NONE))
 		return;
 
-	falcon_read(efx, &reg, MAC0_CTRL_REG_KER);
+	efx_reado(efx, &reg, FR_AB_MAC_CTRL);
 	/* There is no point in draining more than once */
-	if (EFX_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0))
+	if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
 		return;
 
 	falcon_reset_macs(efx);
 }
 
-void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
+static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
 {
 	efx_oword_t reg;
 
-	if (falcon_rev(efx) < FALCON_REV_B0)
+	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
 		return;
 
 	/* Isolate the MAC -> RX */
-	falcon_read(efx, &reg, RX_CFG_REG_KER);
-	EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 0);
-	falcon_write(efx, &reg, RX_CFG_REG_KER);
+	efx_reado(efx, &reg, FR_AZ_RX_CFG);
+	EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
+	efx_writeo(efx, &reg, FR_AZ_RX_CFG);
 
-	if (!efx->link_up)
-		falcon_drain_tx_fifo(efx);
+	/* Isolate TX -> MAC */
+	falcon_drain_tx_fifo(efx);
 }
 
 void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
 {
+	struct efx_link_state *link_state = &efx->link_state;
 	efx_oword_t reg;
 	int link_speed;
-	bool tx_fc;
 
-	switch (efx->link_speed) {
+	switch (link_state->speed) {
 	case 10000: link_speed = 3; break;
 	case 1000:  link_speed = 2; break;
 	case 100:   link_speed = 1; break;
@@ -1985,75 +554,139 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
 	 * indefinitely held and TX queue can be flushed at any point
 	 * while the link is down. */
 	EFX_POPULATE_OWORD_5(reg,
-			     MAC_XOFF_VAL, 0xffff /* max pause time */,
-			     MAC_BCAD_ACPT, 1,
-			     MAC_UC_PROM, efx->promiscuous,
-			     MAC_LINK_STATUS, 1, /* always set */
-			     MAC_SPEED, link_speed);
+			     FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
+			     FRF_AB_MAC_BCAD_ACPT, 1,
+			     FRF_AB_MAC_UC_PROM, efx->promiscuous,
+			     FRF_AB_MAC_LINK_STATUS, 1, /* always set */
+			     FRF_AB_MAC_SPEED, link_speed);
 	/* On B0, MAC backpressure can be disabled and packets get
 	 * discarded. */
-	if (falcon_rev(efx) >= FALCON_REV_B0) {
-		EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0,
-				    !efx->link_up);
+	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+		EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
+				    !link_state->up);
 	}
 
-	falcon_write(efx, &reg, MAC0_CTRL_REG_KER);
+	efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
 
 	/* Restore the multicast hash registers. */
-	falcon_set_multicast_hash(efx);
-
-	/* Transmission of pause frames when RX crosses the threshold is
-	 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
-	 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */
-	tx_fc = !!(efx->link_fc & EFX_FC_TX);
-	falcon_read(efx, &reg, RX_CFG_REG_KER);
-	EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);
+	falcon_push_multicast_hash(efx);
 
+	efx_reado(efx, &reg, FR_AZ_RX_CFG);
+	/* Enable XOFF signal from RX FIFO (we enabled it during NIC
+	 * initialisation but it may read back as 0) */
+	EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
 	/* Unisolate the MAC -> RX */
-	if (falcon_rev(efx) >= FALCON_REV_B0)
-		EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1);
-	falcon_write(efx, &reg, RX_CFG_REG_KER);
+	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
+		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
+	efx_writeo(efx, &reg, FR_AZ_RX_CFG);
 }
 
-int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
+static void falcon_stats_request(struct efx_nic *efx)
 {
+	struct falcon_nic_data *nic_data = efx->nic_data;
 	efx_oword_t reg;
-	u32 *dma_done;
-	int i;
 
-	if (disable_dma_stats)
-		return 0;
+	WARN_ON(nic_data->stats_pending);
+	WARN_ON(nic_data->stats_disable_count);
 
-	/* Statistics fetch will fail if the MAC is in TX drain */
-	if (falcon_rev(efx) >= FALCON_REV_B0) {
-		efx_oword_t temp;
-		falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
-		if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))
-			return 0;
-	}
+	if (nic_data->stats_dma_done == NULL)
+		return;	/* no mac selected */
 
-	dma_done = (efx->stats_buffer.addr + done_offset);
-	*dma_done = FALCON_STATS_NOT_DONE;
+	*nic_data->stats_dma_done = FALCON_STATS_NOT_DONE;
+	nic_data->stats_pending = true;
 	wmb(); /* ensure done flag is clear */
 
 	/* Initiate DMA transfer of stats */
 	EFX_POPULATE_OWORD_2(reg,
-			     MAC_STAT_DMA_CMD, 1,
-			     MAC_STAT_DMA_ADR,
+			     FRF_AB_MAC_STAT_DMA_CMD, 1,
+			     FRF_AB_MAC_STAT_DMA_ADR,
 			     efx->stats_buffer.dma_addr);
-	falcon_write(efx, &reg, MAC0_STAT_DMA_REG_KER);
+	efx_writeo(efx, &reg, FR_AB_MAC_STAT_DMA);
 
-	/* Wait for transfer to complete */
-	for (i = 0; i < 400; i++) {
-		if (*(volatile u32 *)dma_done == FALCON_STATS_DONE) {
-			rmb(); /* Ensure the stats are valid. */
-			return 0;
-		}
-		udelay(10);
+	mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2));
+}
+
+static void falcon_stats_complete(struct efx_nic *efx)
+{
+	struct falcon_nic_data *nic_data = efx->nic_data;
+
+	if (!nic_data->stats_pending)
+		return;
+
+	nic_data->stats_pending = 0;
+	if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
+		rmb(); /* read the done flag before the stats */
+		efx->mac_op->update_stats(efx);
+	} else {
+		EFX_ERR(efx, "timed out waiting for statistics\n");
 	}
+}
 
-	EFX_ERR(efx, "timed out waiting for statistics\n");
-	return -ETIMEDOUT;
+static void falcon_stats_timer_func(unsigned long context)
+{
+	struct efx_nic *efx = (struct efx_nic *)context;
+	struct falcon_nic_data *nic_data = efx->nic_data;
+
+	spin_lock(&efx->stats_lock);
+
+	falcon_stats_complete(efx);
+	if (nic_data->stats_disable_count == 0)
+		falcon_stats_request(efx);
+
+	spin_unlock(&efx->stats_lock);
+}
+
+static void falcon_switch_mac(struct efx_nic *efx);
+
+static bool falcon_loopback_link_poll(struct efx_nic *efx)
+{
+	struct efx_link_state old_state = efx->link_state;
+
+	WARN_ON(!mutex_is_locked(&efx->mac_lock));
+	WARN_ON(!LOOPBACK_INTERNAL(efx));
+
+	efx->link_state.fd = true;
+	efx->link_state.fc = efx->wanted_fc;
+	efx->link_state.up = true;
+
+	if (efx->loopback_mode == LOOPBACK_GMAC)
+		efx->link_state.speed = 1000;
+	else
+		efx->link_state.speed = 10000;
+
+	return !efx_link_state_equal(&efx->link_state, &old_state);
+}
+
+static int falcon_reconfigure_port(struct efx_nic *efx)
+{
+	int rc;
+
+	WARN_ON(efx_nic_rev(efx) > EFX_REV_FALCON_B0);
+
+	/* Poll the PHY link state *before* reconfiguring it. This means we
+	 * will pick up the correct speed (in loopback) to select the correct
+	 * MAC.
+	 */
+	if (LOOPBACK_INTERNAL(efx))
+		falcon_loopback_link_poll(efx);
+	else
+		efx->phy_op->poll(efx);
+
+	falcon_stop_nic_stats(efx);
+	falcon_deconfigure_mac_wrapper(efx);
+
+	falcon_switch_mac(efx);
+
+	efx->phy_op->reconfigure(efx);
+	rc = efx->mac_op->reconfigure(efx);
+	BUG_ON(rc);
+
+	falcon_start_nic_stats(efx);
+
+	/* Synchronise efx->link_state with the kernel */
+	efx_link_status_changed(efx);
+
+	return 0;
 }
 
 /**************************************************************************
@@ -2066,18 +699,18 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
 /* Wait for GMII access to complete */
 static int falcon_gmii_wait(struct efx_nic *efx)
 {
-	efx_dword_t md_stat;
+	efx_oword_t md_stat;
 	int count;
 
 	/* wait upto 50ms - taken max from datasheet */
 	for (count = 0; count < 5000; count++) {
-		falcon_readl(efx, &md_stat, MD_STAT_REG_KER);
-		if (EFX_DWORD_FIELD(md_stat, MD_BSY) == 0) {
-			if (EFX_DWORD_FIELD(md_stat, MD_LNFL) != 0 ||
-			    EFX_DWORD_FIELD(md_stat, MD_BSERR) != 0) {
+		efx_reado(efx, &md_stat, FR_AB_MD_STAT);
+		if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
+			if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
+			    EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
 				EFX_ERR(efx, "error from GMII access "
-					EFX_DWORD_FMT"\n",
-					EFX_DWORD_VAL(md_stat));
+					EFX_OWORD_FMT"\n",
+					EFX_OWORD_VAL(md_stat));
 				return -EIO;
 			}
 			return 0;
@@ -2099,7 +732,7 @@ static int falcon_mdio_write(struct net_device *net_dev,
 	EFX_REGDUMP(efx, "writing MDIO %d register %d.%d with 0x%04x\n",
 		    prtad, devad, addr, value);
 
-	spin_lock_bh(&efx->phy_lock);
+	mutex_lock(&efx->mdio_lock);
 
 	/* Check MDIO not currently being accessed */
 	rc = falcon_gmii_wait(efx);
@@ -2107,34 +740,35 @@ static int falcon_mdio_write(struct net_device *net_dev,
 		goto out;
 
 	/* Write the address/ID register */
-	EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr);
-	falcon_write(efx, &reg, MD_PHY_ADR_REG_KER);
+	EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
+	efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
 
-	EFX_POPULATE_OWORD_2(reg, MD_PRT_ADR, prtad, MD_DEV_ADR, devad);
-	falcon_write(efx, &reg, MD_ID_REG_KER);
+	EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
+			     FRF_AB_MD_DEV_ADR, devad);
+	efx_writeo(efx, &reg, FR_AB_MD_ID);
 
 	/* Write data */
-	EFX_POPULATE_OWORD_1(reg, MD_TXD, value);
-	falcon_write(efx, &reg, MD_TXD_REG_KER);
+	EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
+	efx_writeo(efx, &reg, FR_AB_MD_TXD);
 
 	EFX_POPULATE_OWORD_2(reg,
-			     MD_WRC, 1,
-			     MD_GC, 0);
-	falcon_write(efx, &reg, MD_CS_REG_KER);
+			     FRF_AB_MD_WRC, 1,
+			     FRF_AB_MD_GC, 0);
+	efx_writeo(efx, &reg, FR_AB_MD_CS);
 
 	/* Wait for data to be written */
 	rc = falcon_gmii_wait(efx);
 	if (rc) {
 		/* Abort the write operation */
 		EFX_POPULATE_OWORD_2(reg,
-				     MD_WRC, 0,
-				     MD_GC, 1);
-		falcon_write(efx, &reg, MD_CS_REG_KER);
+				     FRF_AB_MD_WRC, 0,
+				     FRF_AB_MD_GC, 1);
+		efx_writeo(efx, &reg, FR_AB_MD_CS);
 		udelay(10);
 	}
 
- out:
-	spin_unlock_bh(&efx->phy_lock);
+out:
+	mutex_unlock(&efx->mdio_lock);
 	return rc;
 }
 
@@ -2146,152 +780,139 @@ static int falcon_mdio_read(struct net_device *net_dev,
 	efx_oword_t reg;
 	int rc;
 
-	spin_lock_bh(&efx->phy_lock);
+	mutex_lock(&efx->mdio_lock);
 
 	/* Check MDIO not currently being accessed */
 	rc = falcon_gmii_wait(efx);
 	if (rc)
 		goto out;
 
-	EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr);
-	falcon_write(efx, &reg, MD_PHY_ADR_REG_KER);
+	EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
+	efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
 
-	EFX_POPULATE_OWORD_2(reg, MD_PRT_ADR, prtad, MD_DEV_ADR, devad);
-	falcon_write(efx, &reg, MD_ID_REG_KER);
+	EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
+			     FRF_AB_MD_DEV_ADR, devad);
+	efx_writeo(efx, &reg, FR_AB_MD_ID);
 
 	/* Request data to be read */
-	EFX_POPULATE_OWORD_2(reg, MD_RDC, 1, MD_GC, 0);
-	falcon_write(efx, &reg, MD_CS_REG_KER);
+	EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
+	efx_writeo(efx, &reg, FR_AB_MD_CS);
 
 	/* Wait for data to become available */
 	rc = falcon_gmii_wait(efx);
 	if (rc == 0) {
-		falcon_read(efx, &reg, MD_RXD_REG_KER);
-		rc = EFX_OWORD_FIELD(reg, MD_RXD);
+		efx_reado(efx, &reg, FR_AB_MD_RXD);
+		rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD);
 		EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n",
 			    prtad, devad, addr, rc);
 	} else {
 		/* Abort the read operation */
 		EFX_POPULATE_OWORD_2(reg,
-				     MD_RIC, 0,
-				     MD_GC, 1);
-		falcon_write(efx, &reg, MD_CS_REG_KER);
+				     FRF_AB_MD_RIC, 0,
+				     FRF_AB_MD_GC, 1);
+		efx_writeo(efx, &reg, FR_AB_MD_CS);
 
 		EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n",
 			prtad, devad, addr, rc);
 	}
 
- out:
-	spin_unlock_bh(&efx->phy_lock);
+out:
+	mutex_unlock(&efx->mdio_lock);
 	return rc;
 }
 
-static int falcon_probe_phy(struct efx_nic *efx)
+static void falcon_clock_mac(struct efx_nic *efx)
 {
-	switch (efx->phy_type) {
-	case PHY_TYPE_SFX7101:
-		efx->phy_op = &falcon_sfx7101_phy_ops;
-		break;
-	case PHY_TYPE_SFT9001A:
-	case PHY_TYPE_SFT9001B:
-		efx->phy_op = &falcon_sft9001_phy_ops;
-		break;
-	case PHY_TYPE_QT2022C2:
-	case PHY_TYPE_QT2025C:
-		efx->phy_op = &falcon_xfp_phy_ops;
-		break;
-	default:
-		EFX_ERR(efx, "Unknown PHY type %d\n",
-			efx->phy_type);
-		return -1;
-	}
-
-	if (efx->phy_op->macs & EFX_XMAC)
-		efx->loopback_modes |= ((1 << LOOPBACK_XGMII) |
-					(1 << LOOPBACK_XGXS) |
-					(1 << LOOPBACK_XAUI));
-	if (efx->phy_op->macs & EFX_GMAC)
-		efx->loopback_modes |= (1 << LOOPBACK_GMAC);
-	efx->loopback_modes |= efx->phy_op->loopbacks;
+	unsigned strap_val;
+	efx_oword_t nic_stat;
 
-	return 0;
+	/* Configure the NIC generated MAC clock correctly */
+	efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
+	strap_val = EFX_IS10G(efx) ? 5 : 3;
+	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+		EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP_EN, 1);
+		EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP, strap_val);
+		efx_writeo(efx, &nic_stat, FR_AB_NIC_STAT);
+	} else {
+		/* Falcon A1 does not support 1G/10G speed switching
+		 * and must not be used with a PHY that does. */
+		BUG_ON(EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_PINS) !=
+		       strap_val);
+	}
 }
 
-int falcon_switch_mac(struct efx_nic *efx)
+static void falcon_switch_mac(struct efx_nic *efx)
 {
 	struct efx_mac_operations *old_mac_op = efx->mac_op;
-	efx_oword_t nic_stat;
-	unsigned strap_val;
-	int rc = 0;
-
-	/* Don't try to fetch MAC stats while we're switching MACs */
-	efx_stats_disable(efx);
-
-	/* Internal loopbacks override the phy speed setting */
-	if (efx->loopback_mode == LOOPBACK_GMAC) {
-		efx->link_speed = 1000;
-		efx->link_fd = true;
-	} else if (LOOPBACK_INTERNAL(efx)) {
-		efx->link_speed = 10000;
-		efx->link_fd = true;
-	}
+	struct falcon_nic_data *nic_data = efx->nic_data;
+	unsigned int stats_done_offset;
 
 	WARN_ON(!mutex_is_locked(&efx->mac_lock));
+	WARN_ON(nic_data->stats_disable_count == 0);
+
 	efx->mac_op = (EFX_IS10G(efx) ?
 		       &falcon_xmac_operations : &falcon_gmac_operations);
 
-	/* Always push the NIC_STAT_REG setting even if the mac hasn't
-	 * changed, because this function is run post online reset */
-	falcon_read(efx, &nic_stat, NIC_STAT_REG);
-	strap_val = EFX_IS10G(efx) ? 5 : 3;
-	if (falcon_rev(efx) >= FALCON_REV_B0) {
-		EFX_SET_OWORD_FIELD(nic_stat, EE_STRAP_EN, 1);
-		EFX_SET_OWORD_FIELD(nic_stat, EE_STRAP_OVR, strap_val);
-		falcon_write(efx, &nic_stat, NIC_STAT_REG);
-	} else {
-		/* Falcon A1 does not support 1G/10G speed switching
-		 * and must not be used with a PHY that does. */
-		BUG_ON(EFX_OWORD_FIELD(nic_stat, STRAP_PINS) != strap_val);
-	}
+	if (EFX_IS10G(efx))
+		stats_done_offset = XgDmaDone_offset;
+	else
+		stats_done_offset = GDmaDone_offset;
+	nic_data->stats_dma_done = efx->stats_buffer.addr + stats_done_offset;
 
 	if (old_mac_op == efx->mac_op)
-		goto out;
+		return;
+
+	falcon_clock_mac(efx);
 
 	EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G');
 	/* Not all macs support a mac-level link state */
-	efx->mac_up = true;
-
-	rc = falcon_reset_macs(efx);
-out:
-	efx_stats_enable(efx);
-	return rc;
+	efx->xmac_poll_required = false;
+	falcon_reset_macs(efx);
 }
 
 /* This call is responsible for hooking in the MAC and PHY operations */
-int falcon_probe_port(struct efx_nic *efx)
+static int falcon_probe_port(struct efx_nic *efx)
 {
 	int rc;
 
-	/* Hook in PHY operations table */
-	rc = falcon_probe_phy(efx);
-	if (rc)
-		return rc;
+	switch (efx->phy_type) {
+	case PHY_TYPE_SFX7101:
+		efx->phy_op = &falcon_sfx7101_phy_ops;
+		break;
+	case PHY_TYPE_SFT9001A:
+	case PHY_TYPE_SFT9001B:
+		efx->phy_op = &falcon_sft9001_phy_ops;
+		break;
+	case PHY_TYPE_QT2022C2:
+	case PHY_TYPE_QT2025C:
+		efx->phy_op = &falcon_qt202x_phy_ops;
+		break;
+	default:
+		EFX_ERR(efx, "Unknown PHY type %d\n",
+			efx->phy_type);
+		return -ENODEV;
+	}
 
-	/* Set up MDIO structure for PHY */
-	efx->mdio.mmds = efx->phy_op->mmds;
-	efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+	/* Fill out MDIO structure and loopback modes */
 	efx->mdio.mdio_read = falcon_mdio_read;
 	efx->mdio.mdio_write = falcon_mdio_write;
+	rc = efx->phy_op->probe(efx);
+	if (rc != 0)
+		return rc;
+
+	/* Initial assumption */
+	efx->link_state.speed = 10000;
+	efx->link_state.fd = true;
 
 	/* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
-	if (falcon_rev(efx) >= FALCON_REV_B0)
+	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
 		efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
 	else
 		efx->wanted_fc = EFX_FC_RX;
 
 	/* Allocate buffer for stats */
-	rc = falcon_alloc_buffer(efx, &efx->stats_buffer,
-				 FALCON_MAC_STATS_SIZE);
+	rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
+				  FALCON_MAC_STATS_SIZE);
 	if (rc)
 		return rc;
 	EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n",
@@ -2302,40 +923,19 @@ int falcon_probe_port(struct efx_nic *efx)
 	return 0;
 }
 
-void falcon_remove_port(struct efx_nic *efx)
+static void falcon_remove_port(struct efx_nic *efx)
 {
-	falcon_free_buffer(efx, &efx->stats_buffer);
+	efx_nic_free_buffer(efx, &efx->stats_buffer);
 }
 
 /**************************************************************************
  *
- * Multicast filtering
- *
- **************************************************************************
- */
-
-void falcon_set_multicast_hash(struct efx_nic *efx)
-{
-	union efx_multicast_hash *mc_hash = &efx->multicast_hash;
-
-	/* Broadcast packets go through the multicast hash filter.
-	 * ether_crc_le() of the broadcast address is 0xbe2612ff
-	 * so we always add bit 0xff to the mask.
-	 */
-	set_bit_le(0xff, mc_hash->byte);
-
-	falcon_write(efx, &mc_hash->oword[0], MAC_MCAST_HASH_REG0_KER);
-	falcon_write(efx, &mc_hash->oword[1], MAC_MCAST_HASH_REG1_KER);
-}
-
-
-/**************************************************************************
- *
  * Falcon test code
  *
  **************************************************************************/
 
-int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
+static int
+falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
 {
 	struct falcon_nvconfig *nvconfig;
 	struct efx_spi_device *spi;
@@ -2351,10 +951,10 @@ int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
 	region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
 	if (!region)
 		return -ENOMEM;
-	nvconfig = region + NVCONFIG_OFFSET;
+	nvconfig = region + FALCON_NVCONFIG_OFFSET;
 
 	mutex_lock(&efx->spi_lock);
-	rc = falcon_spi_read(spi, 0, FALCON_NVCONFIG_END, NULL, region);
+	rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
 	mutex_unlock(&efx->spi_lock);
 	if (rc) {
 		EFX_ERR(efx, "Failed to read %s\n",
@@ -2367,7 +967,7 @@ int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
 	struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
 
 	rc = -EINVAL;
-	if (magic_num != NVCONFIG_BOARD_MAGIC_NUM) {
+	if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
 		EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num);
 		goto out;
 	}
@@ -2398,107 +998,54 @@ int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
 	return rc;
 }
 
-/* Registers tested in the falcon register test */
-static struct {
-	unsigned address;
-	efx_oword_t mask;
-} efx_test_registers[] = {
-	{ ADR_REGION_REG_KER,
+static int falcon_test_nvram(struct efx_nic *efx)
+{
+	return falcon_read_nvram(efx, NULL);
+}
+
+static const struct efx_nic_register_test falcon_b0_register_tests[] = {
+	{ FR_AZ_ADR_REGION,
 	  EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
-	{ RX_CFG_REG_KER,
+	{ FR_AZ_RX_CFG,
 	  EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
-	{ TX_CFG_REG_KER,
+	{ FR_AZ_TX_CFG,
 	  EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
-	{ TX_CFG2_REG_KER,
+	{ FR_AZ_TX_RESERVED,
 	  EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
-	{ MAC0_CTRL_REG_KER,
+	{ FR_AB_MAC_CTRL,
 	  EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
-	{ SRM_TX_DC_CFG_REG_KER,
+	{ FR_AZ_SRM_TX_DC_CFG,
 	  EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
-	{ RX_DC_CFG_REG_KER,
+	{ FR_AZ_RX_DC_CFG,
 	  EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
-	{ RX_DC_PF_WM_REG_KER,
+	{ FR_AZ_RX_DC_PF_WM,
 	  EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
-	{ DP_CTRL_REG,
+	{ FR_BZ_DP_CTRL,
 	  EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
-	{ GM_CFG2_REG,
+	{ FR_AB_GM_CFG2,
 	  EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
-	{ GMF_CFG0_REG,
+	{ FR_AB_GMF_CFG0,
 	  EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
-	{ XM_GLB_CFG_REG,
+	{ FR_AB_XM_GLB_CFG,
 	  EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
-	{ XM_TX_CFG_REG,
+	{ FR_AB_XM_TX_CFG,
 	  EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
-	{ XM_RX_CFG_REG,
+	{ FR_AB_XM_RX_CFG,
 	  EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
-	{ XM_RX_PARAM_REG,
+	{ FR_AB_XM_RX_PARAM,
 	  EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
-	{ XM_FC_REG,
+	{ FR_AB_XM_FC,
 	  EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
-	{ XM_ADR_LO_REG,
+	{ FR_AB_XM_ADR_LO,
 	  EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
-	{ XX_SD_CTL_REG,
+	{ FR_AB_XX_SD_CTL,
 	  EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
 };
 
-static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
-				     const efx_oword_t *mask)
+static int falcon_b0_test_registers(struct efx_nic *efx)
 {
-	return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
-		((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
-}
-
-int falcon_test_registers(struct efx_nic *efx)
-{
-	unsigned address = 0, i, j;
-	efx_oword_t mask, imask, original, reg, buf;
-
-	/* Falcon should be in loopback to isolate the XMAC from the PHY */
-	WARN_ON(!LOOPBACK_INTERNAL(efx));
-
-	for (i = 0; i < ARRAY_SIZE(efx_test_registers); ++i) {
-		address = efx_test_registers[i].address;
-		mask = imask = efx_test_registers[i].mask;
-		EFX_INVERT_OWORD(imask);
-
-		falcon_read(efx, &original, address);
-
-		/* bit sweep on and off */
-		for (j = 0; j < 128; j++) {
-			if (!EFX_EXTRACT_OWORD32(mask, j, j))
-				continue;
-
-			/* Test this testable bit can be set in isolation */
-			EFX_AND_OWORD(reg, original, mask);
-			EFX_SET_OWORD32(reg, j, j, 1);
-
-			falcon_write(efx, &reg, address);
-			falcon_read(efx, &buf, address);
-
-			if (efx_masked_compare_oword(&reg, &buf, &mask))
-				goto fail;
-
-			/* Test this testable bit can be cleared in isolation */
-			EFX_OR_OWORD(reg, original, mask);
-			EFX_SET_OWORD32(reg, j, j, 0);
-
-			falcon_write(efx, &reg, address);
-			falcon_read(efx, &buf, address);
-
-			if (efx_masked_compare_oword(&reg, &buf, &mask))
-				goto fail;
-		}
-
-		falcon_write(efx, &original, address);
-	}
-
-	return 0;
-
-fail:
-	EFX_ERR(efx, "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
-		" at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
-		EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
-	return -EIO;
+	return efx_nic_test_registers(efx, falcon_b0_register_tests,
+				      ARRAY_SIZE(falcon_b0_register_tests));
 }
 
 /**************************************************************************
@@ -2510,13 +1057,13 @@ fail:
 
 /* Resets NIC to known state.  This routine must be called in process
  * context and is allowed to sleep. */
-int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
+static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
 {
 	struct falcon_nic_data *nic_data = efx->nic_data;
 	efx_oword_t glb_ctl_reg_ker;
 	int rc;
 
-	EFX_LOG(efx, "performing hardware reset (%d)\n", method);
+	EFX_LOG(efx, "performing %s hardware reset\n", RESET_TYPE(method));
 
 	/* Initiate device reset */
 	if (method == RESET_TYPE_WORLD) {
@@ -2526,7 +1073,7 @@ int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
 				"function prior to hardware reset\n");
 			goto fail1;
 		}
-		if (FALCON_IS_DUAL_FUNC(efx)) {
+		if (efx_nic_is_dual_func(efx)) {
 			rc = pci_save_state(nic_data->pci_dev2);
 			if (rc) {
 				EFX_ERR(efx, "failed to backup PCI state of "
@@ -2537,29 +1084,31 @@ int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
 		}
 
 		EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
-				     EXT_PHY_RST_DUR, 0x7,
-				     SWRST, 1);
+				     FRF_AB_EXT_PHY_RST_DUR,
+				     FFE_AB_EXT_PHY_RST_DUR_10240US,
+				     FRF_AB_SWRST, 1);
 	} else {
-		int reset_phy = (method == RESET_TYPE_INVISIBLE ?
-				 EXCLUDE_FROM_RESET : 0);
-
 		EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
-				     EXT_PHY_RST_CTL, reset_phy,
-				     PCIE_CORE_RST_CTL, EXCLUDE_FROM_RESET,
-				     PCIE_NSTCK_RST_CTL, EXCLUDE_FROM_RESET,
-				     PCIE_SD_RST_CTL, EXCLUDE_FROM_RESET,
-				     EE_RST_CTL, EXCLUDE_FROM_RESET,
-				     EXT_PHY_RST_DUR, 0x7 /* 10ms */,
-				     SWRST, 1);
-	}
-	falcon_write(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER);
+				     /* exclude PHY from "invisible" reset */
+				     FRF_AB_EXT_PHY_RST_CTL,
+				     method == RESET_TYPE_INVISIBLE,
+				     /* exclude EEPROM/flash and PCIe */
+				     FRF_AB_PCIE_CORE_RST_CTL, 1,
+				     FRF_AB_PCIE_NSTKY_RST_CTL, 1,
+				     FRF_AB_PCIE_SD_RST_CTL, 1,
+				     FRF_AB_EE_RST_CTL, 1,
+				     FRF_AB_EXT_PHY_RST_DUR,
+				     FFE_AB_EXT_PHY_RST_DUR_10240US,
+				     FRF_AB_SWRST, 1);
+	}
+	efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
 
 	EFX_LOG(efx, "waiting for hardware reset\n");
 	schedule_timeout_uninterruptible(HZ / 20);
 
 	/* Restore PCI configuration if needed */
 	if (method == RESET_TYPE_WORLD) {
-		if (FALCON_IS_DUAL_FUNC(efx)) {
+		if (efx_nic_is_dual_func(efx)) {
 			rc = pci_restore_state(nic_data->pci_dev2);
 			if (rc) {
 				EFX_ERR(efx, "failed to restore PCI config for "
@@ -2577,8 +1126,8 @@ int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
 	}
 
 	/* Assert that reset complete */
-	falcon_read(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER);
-	if (EFX_OWORD_FIELD(glb_ctl_reg_ker, SWRST) != 0) {
+	efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
+	if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
 		rc = -ETIMEDOUT;
 		EFX_ERR(efx, "timed out waiting for hardware reset\n");
 		goto fail5;
@@ -2597,6 +1146,44 @@ fail5:
 	return rc;
 }
 
+static void falcon_monitor(struct efx_nic *efx)
+{
+	bool link_changed;
+	int rc;
+
+	BUG_ON(!mutex_is_locked(&efx->mac_lock));
+
+	rc = falcon_board(efx)->type->monitor(efx);
+	if (rc) {
+		EFX_ERR(efx, "Board sensor %s; shutting down PHY\n",
+			(rc == -ERANGE) ? "reported fault" : "failed");
+		efx->phy_mode |= PHY_MODE_LOW_POWER;
+		rc = __efx_reconfigure_port(efx);
+		WARN_ON(rc);
+	}
+
+	if (LOOPBACK_INTERNAL(efx))
+		link_changed = falcon_loopback_link_poll(efx);
+	else
+		link_changed = efx->phy_op->poll(efx);
+
+	if (link_changed) {
+		falcon_stop_nic_stats(efx);
+		falcon_deconfigure_mac_wrapper(efx);
+
+		falcon_switch_mac(efx);
+		rc = efx->mac_op->reconfigure(efx);
+		BUG_ON(rc);
+
+		falcon_start_nic_stats(efx);
+
+		efx_link_status_changed(efx);
+	}
+
+	if (EFX_IS10G(efx))
+		falcon_poll_xmac(efx);
+}
+
 /* Zeroes out the SRAM contents.  This routine must be called in
  * process context and is allowed to sleep.
  */
@@ -2606,16 +1193,16 @@ static int falcon_reset_sram(struct efx_nic *efx)
 	int count;
 
 	/* Set the SRAM wake/sleep GPIO appropriately. */
-	falcon_read(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER);
-	EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OEN, 1);
-	EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OUT, 1);
-	falcon_write(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER);
+	efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
+	EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
+	EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
+	efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
 
 	/* Initiate SRAM reset */
 	EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
-			     SRAM_OOB_BT_INIT_EN, 1,
-			     SRM_NUM_BANKS_AND_BANK_SIZE, 0);
-	falcon_write(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER);
+			     FRF_AZ_SRM_INIT_EN, 1,
+			     FRF_AZ_SRM_NB_SZ, 0);
+	efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
 
 	/* Wait for SRAM reset to complete */
 	count = 0;
@@ -2626,8 +1213,8 @@ static int falcon_reset_sram(struct efx_nic *efx)
 		schedule_timeout_uninterruptible(HZ / 50);
 
 		/* Check for reset complete */
-		falcon_read(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER);
-		if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, SRAM_OOB_BT_INIT_EN)) {
+		efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
+		if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
 			EFX_LOG(efx, "SRAM reset complete\n");
 
 			return 0;
@@ -2663,8 +1250,6 @@ static int falcon_spi_device_init(struct efx_nic *efx,
 		spi_device->block_size =
 			1 << SPI_DEV_TYPE_FIELD(device_type,
 						SPI_DEV_TYPE_BLOCK_SIZE);
-
-		spi_device->efx = efx;
 	} else {
 		spi_device = NULL;
 	}
@@ -2674,7 +1259,6 @@ static int falcon_spi_device_init(struct efx_nic *efx,
 	return 0;
 }
 
-
 static void falcon_remove_spi_devices(struct efx_nic *efx)
 {
 	kfree(efx->spi_eeprom);
@@ -2712,16 +1296,16 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
 		board_rev = le16_to_cpu(v2->board_revision);
 
 		if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
-			__le32 fl = v3->spi_device_type[EE_SPI_FLASH];
-			__le32 ee = v3->spi_device_type[EE_SPI_EEPROM];
-			rc = falcon_spi_device_init(efx, &efx->spi_flash,
-						    EE_SPI_FLASH,
-						    le32_to_cpu(fl));
+			rc = falcon_spi_device_init(
+				efx, &efx->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
+				le32_to_cpu(v3->spi_device_type
+					    [FFE_AB_SPI_DEVICE_FLASH]));
 			if (rc)
 				goto fail2;
-			rc = falcon_spi_device_init(efx, &efx->spi_eeprom,
-						    EE_SPI_EEPROM,
-						    le32_to_cpu(ee));
+			rc = falcon_spi_device_init(
+				efx, &efx->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
+				le32_to_cpu(v3->spi_device_type
+					    [FFE_AB_SPI_DEVICE_EEPROM]));
 			if (rc)
 				goto fail2;
 		}
@@ -2732,7 +1316,7 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
 
 	EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
 
-	efx_set_board_info(efx, board_rev);
+	falcon_probe_board(efx, board_rev);
 
 	kfree(nvconfig);
 	return 0;
@@ -2744,89 +1328,49 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
 	return rc;
 }
 
-/* Probe the NIC variant (revision, ASIC vs FPGA, function count, port
- * count, port speed).  Set workaround and feature flags accordingly.
- */
-static int falcon_probe_nic_variant(struct efx_nic *efx)
-{
-	efx_oword_t altera_build;
-	efx_oword_t nic_stat;
-
-	falcon_read(efx, &altera_build, ALTERA_BUILD_REG_KER);
-	if (EFX_OWORD_FIELD(altera_build, VER_ALL)) {
-		EFX_ERR(efx, "Falcon FPGA not supported\n");
-		return -ENODEV;
-	}
-
-	falcon_read(efx, &nic_stat, NIC_STAT_REG);
-
-	switch (falcon_rev(efx)) {
-	case FALCON_REV_A0:
-	case 0xff:
-		EFX_ERR(efx, "Falcon rev A0 not supported\n");
-		return -ENODEV;
-
-	case FALCON_REV_A1:
-		if (EFX_OWORD_FIELD(nic_stat, STRAP_PCIE) == 0) {
-			EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n");
-			return -ENODEV;
-		}
-		break;
-
-	case FALCON_REV_B0:
-		break;
-
-	default:
-		EFX_ERR(efx, "Unknown Falcon rev %d\n", falcon_rev(efx));
-		return -ENODEV;
-	}
-
-	/* Initial assumed speed */
-	efx->link_speed = EFX_OWORD_FIELD(nic_stat, STRAP_10G) ? 10000 : 1000;
-
-	return 0;
-}
-
 /* Probe all SPI devices on the NIC */
 static void falcon_probe_spi_devices(struct efx_nic *efx)
 {
 	efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
 	int boot_dev;
 
-	falcon_read(efx, &gpio_ctl, GPIO_CTL_REG_KER);
-	falcon_read(efx, &nic_stat, NIC_STAT_REG);
-	falcon_read(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER);
+	efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
+	efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
+	efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
 
-	if (EFX_OWORD_FIELD(gpio_ctl, BOOTED_USING_NVDEVICE)) {
-		boot_dev = (EFX_OWORD_FIELD(nic_stat, SF_PRST) ?
-			    EE_SPI_FLASH : EE_SPI_EEPROM);
+	if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
+		boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
+			    FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
 		EFX_LOG(efx, "Booted from %s\n",
-			boot_dev == EE_SPI_FLASH ? "flash" : "EEPROM");
+			boot_dev == FFE_AB_SPI_DEVICE_FLASH ? "flash" : "EEPROM");
 	} else {
 		/* Disable VPD and set clock dividers to safe
 		 * values for initial programming. */
 		boot_dev = -1;
 		EFX_LOG(efx, "Booted from internal ASIC settings;"
 			" setting SPI config\n");
-		EFX_POPULATE_OWORD_3(ee_vpd_cfg, EE_VPD_EN, 0,
+		EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
 				     /* 125 MHz / 7 ~= 20 MHz */
-				     EE_SF_CLOCK_DIV, 7,
+				     FRF_AB_EE_SF_CLOCK_DIV, 7,
 				     /* 125 MHz / 63 ~= 2 MHz */
-				     EE_EE_CLOCK_DIV, 63);
-		falcon_write(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER);
+				     FRF_AB_EE_EE_CLOCK_DIV, 63);
+		efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
 	}
 
-	if (boot_dev == EE_SPI_FLASH)
-		falcon_spi_device_init(efx, &efx->spi_flash, EE_SPI_FLASH,
+	if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
+		falcon_spi_device_init(efx, &efx->spi_flash,
+				       FFE_AB_SPI_DEVICE_FLASH,
 				       default_flash_type);
-	if (boot_dev == EE_SPI_EEPROM)
-		falcon_spi_device_init(efx, &efx->spi_eeprom, EE_SPI_EEPROM,
+	if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
+		falcon_spi_device_init(efx, &efx->spi_eeprom,
+				       FFE_AB_SPI_DEVICE_EEPROM,
 				       large_eeprom_type);
 }
 
-int falcon_probe_nic(struct efx_nic *efx)
+static int falcon_probe_nic(struct efx_nic *efx)
 {
 	struct falcon_nic_data *nic_data;
+	struct falcon_board *board;
 	int rc;
 
 	/* Allocate storage for hardware specific data */
@@ -2835,15 +1379,33 @@ int falcon_probe_nic(struct efx_nic *efx)
 		return -ENOMEM;
 	efx->nic_data = nic_data;
 
-	/* Determine number of ports etc. */
-	rc = falcon_probe_nic_variant(efx);
-	if (rc)
+	rc = -ENODEV;
+
+	if (efx_nic_fpga_ver(efx) != 0) {
+		EFX_ERR(efx, "Falcon FPGA not supported\n");
 		goto fail1;
+	}
 
-	/* Probe secondary function if expected */
-	if (FALCON_IS_DUAL_FUNC(efx)) {
-		struct pci_dev *dev = pci_dev_get(efx->pci_dev);
+	if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
+		efx_oword_t nic_stat;
+		struct pci_dev *dev;
+		u8 pci_rev = efx->pci_dev->revision;
+
+		if ((pci_rev == 0xff) || (pci_rev == 0)) {
+			EFX_ERR(efx, "Falcon rev A0 not supported\n");
+			goto fail1;
+		}
+		efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
+		if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
+			EFX_ERR(efx, "Falcon rev A1 1G not supported\n");
+			goto fail1;
+		}
+		if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
+			EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n");
+			goto fail1;
+		}
 
+		dev = pci_dev_get(efx->pci_dev);
 		while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID,
 					     dev))) {
 			if (dev->bus == efx->pci_dev->bus &&
@@ -2867,7 +1429,7 @@ int falcon_probe_nic(struct efx_nic *efx)
 	}
 
 	/* Allocate memory for INT_KER */
-	rc = falcon_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
+	rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
 	if (rc)
 		goto fail4;
 	BUG_ON(efx->irq_status.dma_addr & 0x0f);
@@ -2884,21 +1446,36 @@ int falcon_probe_nic(struct efx_nic *efx)
 		goto fail5;
 
 	/* Initialise I2C adapter */
-	efx->i2c_adap.owner = THIS_MODULE;
-	nic_data->i2c_data = falcon_i2c_bit_operations;
-	nic_data->i2c_data.data = efx;
-	efx->i2c_adap.algo_data = &nic_data->i2c_data;
-	efx->i2c_adap.dev.parent = &efx->pci_dev->dev;
-	strlcpy(efx->i2c_adap.name, "SFC4000 GPIO", sizeof(efx->i2c_adap.name));
-	rc = i2c_bit_add_bus(&efx->i2c_adap);
+	board = falcon_board(efx);
+	board->i2c_adap.owner = THIS_MODULE;
+	board->i2c_data = falcon_i2c_bit_operations;
+	board->i2c_data.data = efx;
+	board->i2c_adap.algo_data = &board->i2c_data;
+	board->i2c_adap.dev.parent = &efx->pci_dev->dev;
+	strlcpy(board->i2c_adap.name, "SFC4000 GPIO",
+		sizeof(board->i2c_adap.name));
+	rc = i2c_bit_add_bus(&board->i2c_adap);
 	if (rc)
 		goto fail5;
 
+	rc = falcon_board(efx)->type->init(efx);
+	if (rc) {
+		EFX_ERR(efx, "failed to initialise board\n");
+		goto fail6;
+	}
+
+	nic_data->stats_disable_count = 1;
+	setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func,
+		    (unsigned long)efx);
+
 	return 0;
 
+ fail6:
+	BUG_ON(i2c_del_adapter(&board->i2c_adap));
+	memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
  fail5:
 	falcon_remove_spi_devices(efx);
-	falcon_free_buffer(efx, &efx->irq_status);
+	efx_nic_free_buffer(efx, &efx->irq_status);
  fail4:
  fail3:
 	if (nic_data->pci_dev2) {
@@ -2911,166 +1488,147 @@ int falcon_probe_nic(struct efx_nic *efx)
 	return rc;
 }
 
+static void falcon_init_rx_cfg(struct efx_nic *efx)
+{
+	/* Prior to Siena the RX DMA engine will split each frame at
+	 * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to
+	 * be so large that that never happens. */
+	const unsigned huge_buf_size = (3 * 4096) >> 5;
+	/* RX control FIFO thresholds (32 entries) */
+	const unsigned ctrl_xon_thr = 20;
+	const unsigned ctrl_xoff_thr = 25;
+	/* RX data FIFO thresholds (256-byte units; size varies) */
+	int data_xon_thr = efx_nic_rx_xon_thresh >> 8;
+	int data_xoff_thr = efx_nic_rx_xoff_thresh >> 8;
+	efx_oword_t reg;
+
+	efx_reado(efx, &reg, FR_AZ_RX_CFG);
+	if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
+		/* Data FIFO size is 5.5K */
+		if (data_xon_thr < 0)
+			data_xon_thr = 512 >> 8;
+		if (data_xoff_thr < 0)
+			data_xoff_thr = 2048 >> 8;
+		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
+		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
+				    huge_buf_size);
+		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, data_xon_thr);
+		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, data_xoff_thr);
+		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
+		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
+	} else {
+		/* Data FIFO size is 80K; register fields moved */
+		if (data_xon_thr < 0)
+			data_xon_thr = 27648 >> 8; /* ~3*max MTU */
+		if (data_xoff_thr < 0)
+			data_xoff_thr = 54272 >> 8; /* ~80Kb - 3*max MTU */
+		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
+		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
+				    huge_buf_size);
+		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, data_xon_thr);
+		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, data_xoff_thr);
+		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
+		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
+		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
+	}
+	/* Always enable XOFF signal from RX FIFO.  We enable
+	 * or disable transmission of pause frames at the MAC. */
+	EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
+	efx_writeo(efx, &reg, FR_AZ_RX_CFG);
+}
+
 /* This call performs hardware-specific global initialisation, such as
  * defining the descriptor cache sizes and number of RSS channels.
  * It does not set up any buffers, descriptor rings or event queues.
  */
-int falcon_init_nic(struct efx_nic *efx)
+static int falcon_init_nic(struct efx_nic *efx)
 {
 	efx_oword_t temp;
-	unsigned thresh;
 	int rc;
 
 	/* Use on-chip SRAM */
-	falcon_read(efx, &temp, NIC_STAT_REG);
-	EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, 1);
-	falcon_write(efx, &temp, NIC_STAT_REG);
+	efx_reado(efx, &temp, FR_AB_NIC_STAT);
+	EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
+	efx_writeo(efx, &temp, FR_AB_NIC_STAT);
 
 	/* Set the source of the GMAC clock */
-	if (falcon_rev(efx) == FALCON_REV_B0) {
-		falcon_read(efx, &temp, GPIO_CTL_REG_KER);
-		EFX_SET_OWORD_FIELD(temp, GPIO_USE_NIC_CLK, true);
-		falcon_write(efx, &temp, GPIO_CTL_REG_KER);
+	if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) {
+		efx_reado(efx, &temp, FR_AB_GPIO_CTL);
+		EFX_SET_OWORD_FIELD(temp, FRF_AB_USE_NIC_CLK, true);
+		efx_writeo(efx, &temp, FR_AB_GPIO_CTL);
 	}
 
-	/* Set buffer table mode */
-	EFX_POPULATE_OWORD_1(temp, BUF_TBL_MODE, BUF_TBL_MODE_FULL);
-	falcon_write(efx, &temp, BUF_TBL_CFG_REG_KER);
+	/* Select the correct MAC */
+	falcon_clock_mac(efx);
 
 	rc = falcon_reset_sram(efx);
 	if (rc)
 		return rc;
 
-	/* Set positions of descriptor caches in SRAM. */
-	EFX_POPULATE_OWORD_1(temp, SRM_TX_DC_BASE_ADR, TX_DC_BASE / 8);
-	falcon_write(efx, &temp, SRM_TX_DC_CFG_REG_KER);
-	EFX_POPULATE_OWORD_1(temp, SRM_RX_DC_BASE_ADR, RX_DC_BASE / 8);
-	falcon_write(efx, &temp, SRM_RX_DC_CFG_REG_KER);
-
-	/* Set TX descriptor cache size. */
-	BUILD_BUG_ON(TX_DC_ENTRIES != (16 << TX_DC_ENTRIES_ORDER));
-	EFX_POPULATE_OWORD_1(temp, TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
-	falcon_write(efx, &temp, TX_DC_CFG_REG_KER);
-
-	/* Set RX descriptor cache size.  Set low watermark to size-8, as
-	 * this allows most efficient prefetching.
-	 */
-	BUILD_BUG_ON(RX_DC_ENTRIES != (16 << RX_DC_ENTRIES_ORDER));
-	EFX_POPULATE_OWORD_1(temp, RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
-	falcon_write(efx, &temp, RX_DC_CFG_REG_KER);
-	EFX_POPULATE_OWORD_1(temp, RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
-	falcon_write(efx, &temp, RX_DC_PF_WM_REG_KER);
-
 	/* Clear the parity enables on the TX data fifos as
 	 * they produce false parity errors because of timing issues
 	 */
 	if (EFX_WORKAROUND_5129(efx)) {
-		falcon_read(efx, &temp, SPARE_REG_KER);
-		EFX_SET_OWORD_FIELD(temp, MEM_PERR_EN_TX_DATA, 0);
-		falcon_write(efx, &temp, SPARE_REG_KER);
+		efx_reado(efx, &temp, FR_AZ_CSR_SPARE);
+		EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
+		efx_writeo(efx, &temp, FR_AZ_CSR_SPARE);
 	}
 
-	/* Enable all the genuinely fatal interrupts.  (They are still
-	 * masked by the overall interrupt mask, controlled by
-	 * falcon_interrupts()).
-	 *
-	 * Note: All other fatal interrupts are enabled
-	 */
-	EFX_POPULATE_OWORD_3(temp,
-			     ILL_ADR_INT_KER_EN, 1,
-			     RBUF_OWN_INT_KER_EN, 1,
-			     TBUF_OWN_INT_KER_EN, 1);
-	EFX_INVERT_OWORD(temp);
-	falcon_write(efx, &temp, FATAL_INTR_REG_KER);
-
 	if (EFX_WORKAROUND_7244(efx)) {
-		falcon_read(efx, &temp, RX_FILTER_CTL_REG);
-		EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8);
-		EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8);
-		EFX_SET_OWORD_FIELD(temp, TCP_FULL_SRCH_LIMIT, 8);
-		EFX_SET_OWORD_FIELD(temp, TCP_WILD_SRCH_LIMIT, 8);
-		falcon_write(efx, &temp, RX_FILTER_CTL_REG);
+		efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
+		EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
+		EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
+		EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
+		EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
+		efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
 	}
 
-	falcon_setup_rss_indir_table(efx);
-
+	/* XXX This is documented only for Falcon A0/A1 */
 	/* Setup RX.  Wait for descriptor is broken and must
 	 * be disabled.  RXDP recovery shouldn't be needed, but is.
 	 */
-	falcon_read(efx, &temp, RX_SELF_RST_REG_KER);
-	EFX_SET_OWORD_FIELD(temp, RX_NODESC_WAIT_DIS, 1);
-	EFX_SET_OWORD_FIELD(temp, RX_RECOVERY_EN, 1);
+	efx_reado(efx, &temp, FR_AA_RX_SELF_RST);
+	EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
+	EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
 	if (EFX_WORKAROUND_5583(efx))
-		EFX_SET_OWORD_FIELD(temp, RX_ISCSI_DIS, 1);
-	falcon_write(efx, &temp, RX_SELF_RST_REG_KER);
-
-	/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
-	 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
-	 */
-	falcon_read(efx, &temp, TX_CFG2_REG_KER);
-	EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER, 0xfe);
-	EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER_EN, 1);
-	EFX_SET_OWORD_FIELD(temp, TX_ONE_PKT_PER_Q, 1);
-	EFX_SET_OWORD_FIELD(temp, TX_CSR_PUSH_EN, 0);
-	EFX_SET_OWORD_FIELD(temp, TX_DIS_NON_IP_EV, 1);
-	/* Enable SW_EV to inherit in char driver - assume harmless here */
-	EFX_SET_OWORD_FIELD(temp, TX_SW_EV_EN, 1);
-	/* Prefetch threshold 2 => fetch when descriptor cache half empty */
-	EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2);
-	/* Squash TX of packets of 16 bytes or less */
-	if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx))
-		EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1);
-	falcon_write(efx, &temp, TX_CFG2_REG_KER);
+		EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
+	efx_writeo(efx, &temp, FR_AA_RX_SELF_RST);
 
 	/* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
 	 * descriptors (which is bad).
 	 */
-	falcon_read(efx, &temp, TX_CFG_REG_KER);
-	EFX_SET_OWORD_FIELD(temp, TX_NO_EOP_DISC_EN, 0);
-	falcon_write(efx, &temp, TX_CFG_REG_KER);
-
-	/* RX config */
-	falcon_read(efx, &temp, RX_CFG_REG_KER);
-	EFX_SET_OWORD_FIELD_VER(efx, temp, RX_DESC_PUSH_EN, 0);
-	if (EFX_WORKAROUND_7575(efx))
-		EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE,
-					(3 * 4096) / 32);
-	if (falcon_rev(efx) >= FALCON_REV_B0)
-		EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1);
-
-	/* RX FIFO flow control thresholds */
-	thresh = ((rx_xon_thresh_bytes >= 0) ?
-		  rx_xon_thresh_bytes : efx->type->rx_xon_thresh);
-	EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_MAC_TH, thresh / 256);
-	thresh = ((rx_xoff_thresh_bytes >= 0) ?
-		  rx_xoff_thresh_bytes : efx->type->rx_xoff_thresh);
-	EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_MAC_TH, thresh / 256);
-	/* RX control FIFO thresholds [32 entries] */
-	EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_TX_TH, 20);
-	EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_TX_TH, 25);
-	falcon_write(efx, &temp, RX_CFG_REG_KER);
+	efx_reado(efx, &temp, FR_AZ_TX_CFG);
+	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
+	efx_writeo(efx, &temp, FR_AZ_TX_CFG);
+
+	falcon_init_rx_cfg(efx);
 
 	/* Set destination of both TX and RX Flush events */
-	if (falcon_rev(efx) >= FALCON_REV_B0) {
-		EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0);
-		falcon_write(efx, &temp, DP_CTRL_REG);
+	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+		EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
+		efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
 	}
 
+	efx_nic_init_common(efx);
+
 	return 0;
 }
 
-void falcon_remove_nic(struct efx_nic *efx)
+static void falcon_remove_nic(struct efx_nic *efx)
 {
 	struct falcon_nic_data *nic_data = efx->nic_data;
+	struct falcon_board *board = falcon_board(efx);
 	int rc;
 
+	board->type->fini(efx);
+
 	/* Remove I2C adapter and clear it in preparation for a retry */
-	rc = i2c_del_adapter(&efx->i2c_adap);
+	rc = i2c_del_adapter(&board->i2c_adap);
 	BUG_ON(rc);
-	memset(&efx->i2c_adap, 0, sizeof(efx->i2c_adap));
+	memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
 
 	falcon_remove_spi_devices(efx);
-	falcon_free_buffer(efx, &efx->irq_status);
+	efx_nic_free_buffer(efx, &efx->irq_status);
 
 	falcon_reset_hw(efx, RESET_TYPE_ALL);
 
@@ -3085,12 +1643,86 @@ void falcon_remove_nic(struct efx_nic *efx)
 	efx->nic_data = NULL;
 }
 
-void falcon_update_nic_stats(struct efx_nic *efx)
+static void falcon_update_nic_stats(struct efx_nic *efx)
 {
+	struct falcon_nic_data *nic_data = efx->nic_data;
 	efx_oword_t cnt;
 
-	falcon_read(efx, &cnt, RX_NODESC_DROP_REG_KER);
-	efx->n_rx_nodesc_drop_cnt += EFX_OWORD_FIELD(cnt, RX_NODESC_DROP_CNT);
+	if (nic_data->stats_disable_count)
+		return;
+
+	efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
+	efx->n_rx_nodesc_drop_cnt +=
+		EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
+
+	if (nic_data->stats_pending &&
+	    *nic_data->stats_dma_done == FALCON_STATS_DONE) {
+		nic_data->stats_pending = false;
+		rmb(); /* read the done flag before the stats */
+		efx->mac_op->update_stats(efx);
+	}
+}
+
+void falcon_start_nic_stats(struct efx_nic *efx)
+{
+	struct falcon_nic_data *nic_data = efx->nic_data;
+
+	spin_lock_bh(&efx->stats_lock);
+	if (--nic_data->stats_disable_count == 0)
+		falcon_stats_request(efx);
+	spin_unlock_bh(&efx->stats_lock);
+}
+
+void falcon_stop_nic_stats(struct efx_nic *efx)
+{
+	struct falcon_nic_data *nic_data = efx->nic_data;
+	int i;
+
+	might_sleep();
+
+	spin_lock_bh(&efx->stats_lock);
+	++nic_data->stats_disable_count;
+	spin_unlock_bh(&efx->stats_lock);
+
+	del_timer_sync(&nic_data->stats_timer);
+
+	/* Wait enough time for the most recent transfer to
+	 * complete. */
+	for (i = 0; i < 4 && nic_data->stats_pending; i++) {
+		if (*nic_data->stats_dma_done == FALCON_STATS_DONE)
+			break;
+		msleep(1);
+	}
+
+	spin_lock_bh(&efx->stats_lock);
+	falcon_stats_complete(efx);
+	spin_unlock_bh(&efx->stats_lock);
+}
+
+static void falcon_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+{
+	falcon_board(efx)->type->set_id_led(efx, mode);
+}
+
+/**************************************************************************
+ *
+ * Wake on LAN
+ *
+ **************************************************************************
+ */
+
+static void falcon_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
+{
+	wol->supported = 0;
+	wol->wolopts = 0;
+	memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int falcon_set_wol(struct efx_nic *efx, u32 type)
+{
+	if (type != 0)
+		return -EINVAL;
+	return 0;
 }
 
 /**************************************************************************
@@ -3100,50 +1732,91 @@ void falcon_update_nic_stats(struct efx_nic *efx)
  **************************************************************************
  */
 
-struct efx_nic_type falcon_a_nic_type = {
-	.mem_bar = 2,
+struct efx_nic_type falcon_a1_nic_type = {
+	.probe = falcon_probe_nic,
+	.remove = falcon_remove_nic,
+	.init = falcon_init_nic,
+	.fini = efx_port_dummy_op_void,
+	.monitor = falcon_monitor,
+	.reset = falcon_reset_hw,
+	.probe_port = falcon_probe_port,
+	.remove_port = falcon_remove_port,
+	.prepare_flush = falcon_prepare_flush,
+	.update_stats = falcon_update_nic_stats,
+	.start_stats = falcon_start_nic_stats,
+	.stop_stats = falcon_stop_nic_stats,
+	.set_id_led = falcon_set_id_led,
+	.push_irq_moderation = falcon_push_irq_moderation,
+	.push_multicast_hash = falcon_push_multicast_hash,
+	.reconfigure_port = falcon_reconfigure_port,
+	.get_wol = falcon_get_wol,
+	.set_wol = falcon_set_wol,
+	.resume_wol = efx_port_dummy_op_void,
+	.test_nvram = falcon_test_nvram,
+	.default_mac_ops = &falcon_xmac_operations,
+
+	.revision = EFX_REV_FALCON_A1,
 	.mem_map_size = 0x20000,
-	.txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_A1,
-	.rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_A1,
-	.buf_tbl_base = BUF_TBL_KER_A1,
-	.evq_ptr_tbl_base = EVQ_PTR_TBL_KER_A1,
-	.evq_rptr_tbl_base = EVQ_RPTR_REG_KER_A1,
-	.txd_ring_mask = FALCON_TXD_RING_MASK,
-	.rxd_ring_mask = FALCON_RXD_RING_MASK,
-	.evq_size = FALCON_EVQ_SIZE,
-	.max_dma_mask = FALCON_DMA_MASK,
-	.tx_dma_mask = FALCON_TX_DMA_MASK,
-	.bug5391_mask = 0xf,
-	.rx_xoff_thresh = 2048,
-	.rx_xon_thresh = 512,
+	.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
+	.rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
+	.buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
+	.evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
+	.evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
+	.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
 	.rx_buffer_padding = 0x24,
 	.max_interrupt_mode = EFX_INT_MODE_MSI,
 	.phys_addr_channels = 4,
+	.tx_dc_base = 0x130000,
+	.rx_dc_base = 0x100000,
+	.offload_features = NETIF_F_IP_CSUM,
+	.reset_world_flags = ETH_RESET_IRQ,
 };
 
-struct efx_nic_type falcon_b_nic_type = {
-	.mem_bar = 2,
+struct efx_nic_type falcon_b0_nic_type = {
+	.probe = falcon_probe_nic,
+	.remove = falcon_remove_nic,
+	.init = falcon_init_nic,
+	.fini = efx_port_dummy_op_void,
+	.monitor = falcon_monitor,
+	.reset = falcon_reset_hw,
+	.probe_port = falcon_probe_port,
+	.remove_port = falcon_remove_port,
+	.prepare_flush = falcon_prepare_flush,
+	.update_stats = falcon_update_nic_stats,
+	.start_stats = falcon_start_nic_stats,
+	.stop_stats = falcon_stop_nic_stats,
+	.set_id_led = falcon_set_id_led,
+	.push_irq_moderation = falcon_push_irq_moderation,
+	.push_multicast_hash = falcon_push_multicast_hash,
+	.reconfigure_port = falcon_reconfigure_port,
+	.get_wol = falcon_get_wol,
+	.set_wol = falcon_set_wol,
+	.resume_wol = efx_port_dummy_op_void,
+	.test_registers = falcon_b0_test_registers,
+	.test_nvram = falcon_test_nvram,
+	.default_mac_ops = &falcon_xmac_operations,
+
+	.revision = EFX_REV_FALCON_B0,
 	/* Map everything up to and including the RSS indirection
 	 * table.  Don't map MSI-X table, MSI-X PBA since Linux
 	 * requires that they not be mapped.  */
-	.mem_map_size = RX_RSS_INDIR_TBL_B0 + 0x800,
-	.txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_B0,
-	.rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_B0,
-	.buf_tbl_base = BUF_TBL_KER_B0,
-	.evq_ptr_tbl_base = EVQ_PTR_TBL_KER_B0,
-	.evq_rptr_tbl_base = EVQ_RPTR_REG_KER_B0,
-	.txd_ring_mask = FALCON_TXD_RING_MASK,
-	.rxd_ring_mask = FALCON_RXD_RING_MASK,
-	.evq_size = FALCON_EVQ_SIZE,
-	.max_dma_mask = FALCON_DMA_MASK,
-	.tx_dma_mask = FALCON_TX_DMA_MASK,
-	.bug5391_mask = 0,
-	.rx_xoff_thresh = 54272, /* ~80Kb - 3*max MTU */
-	.rx_xon_thresh = 27648,  /* ~3*max MTU */
+	.mem_map_size = (FR_BZ_RX_INDIRECTION_TBL +
+			 FR_BZ_RX_INDIRECTION_TBL_STEP *
+			 FR_BZ_RX_INDIRECTION_TBL_ROWS),
+	.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
+	.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
+	.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
+	.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
+	.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
+	.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
 	.rx_buffer_padding = 0,
 	.max_interrupt_mode = EFX_INT_MODE_MSIX,
 	.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
 				   * interrupt handler only supports 32
 				   * channels */
+	.tx_dc_base = 0x130000,
+	.rx_dc_base = 0x100000,
+	.offload_features = NETIF_F_IP_CSUM,
+	.reset_world_flags = ETH_RESET_IRQ,
 };
 
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h
deleted file mode 100644
index 77f2e0db7ca1..000000000000
--- a/drivers/net/sfc/falcon.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2008 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#ifndef EFX_FALCON_H
-#define EFX_FALCON_H
-
-#include "net_driver.h"
-#include "efx.h"
-
-/*
- * Falcon hardware control
- */
-
-enum falcon_revision {
-	FALCON_REV_A0 = 0,
-	FALCON_REV_A1 = 1,
-	FALCON_REV_B0 = 2,
-};
-
-static inline int falcon_rev(struct efx_nic *efx)
-{
-	return efx->pci_dev->revision;
-}
-
-extern struct efx_nic_type falcon_a_nic_type;
-extern struct efx_nic_type falcon_b_nic_type;
-
-/**************************************************************************
- *
- * Externs
- *
- **************************************************************************
- */
-
-/* TX data path */
-extern int falcon_probe_tx(struct efx_tx_queue *tx_queue);
-extern void falcon_init_tx(struct efx_tx_queue *tx_queue);
-extern void falcon_fini_tx(struct efx_tx_queue *tx_queue);
-extern void falcon_remove_tx(struct efx_tx_queue *tx_queue);
-extern void falcon_push_buffers(struct efx_tx_queue *tx_queue);
-
-/* RX data path */
-extern int falcon_probe_rx(struct efx_rx_queue *rx_queue);
-extern void falcon_init_rx(struct efx_rx_queue *rx_queue);
-extern void falcon_fini_rx(struct efx_rx_queue *rx_queue);
-extern void falcon_remove_rx(struct efx_rx_queue *rx_queue);
-extern void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue);
-
-/* Event data path */
-extern int falcon_probe_eventq(struct efx_channel *channel);
-extern void falcon_init_eventq(struct efx_channel *channel);
-extern void falcon_fini_eventq(struct efx_channel *channel);
-extern void falcon_remove_eventq(struct efx_channel *channel);
-extern int falcon_process_eventq(struct efx_channel *channel, int rx_quota);
-extern void falcon_eventq_read_ack(struct efx_channel *channel);
-
-/* Ports */
-extern int falcon_probe_port(struct efx_nic *efx);
-extern void falcon_remove_port(struct efx_nic *efx);
-
-/* MAC/PHY */
-extern int falcon_switch_mac(struct efx_nic *efx);
-extern bool falcon_xaui_link_ok(struct efx_nic *efx);
-extern int falcon_dma_stats(struct efx_nic *efx,
-			    unsigned int done_offset);
-extern void falcon_drain_tx_fifo(struct efx_nic *efx);
-extern void falcon_deconfigure_mac_wrapper(struct efx_nic *efx);
-extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
-
-/* Interrupts and test events */
-extern int falcon_init_interrupt(struct efx_nic *efx);
-extern void falcon_enable_interrupts(struct efx_nic *efx);
-extern void falcon_generate_test_event(struct efx_channel *channel,
-				       unsigned int magic);
-extern void falcon_sim_phy_event(struct efx_nic *efx);
-extern void falcon_generate_interrupt(struct efx_nic *efx);
-extern void falcon_set_int_moderation(struct efx_channel *channel);
-extern void falcon_disable_interrupts(struct efx_nic *efx);
-extern void falcon_fini_interrupt(struct efx_nic *efx);
-
-#define FALCON_IRQ_MOD_RESOLUTION 5
-
-/* Global Resources */
-extern int falcon_probe_nic(struct efx_nic *efx);
-extern int falcon_probe_resources(struct efx_nic *efx);
-extern int falcon_init_nic(struct efx_nic *efx);
-extern int falcon_flush_queues(struct efx_nic *efx);
-extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
-extern void falcon_remove_resources(struct efx_nic *efx);
-extern void falcon_remove_nic(struct efx_nic *efx);
-extern void falcon_update_nic_stats(struct efx_nic *efx);
-extern void falcon_set_multicast_hash(struct efx_nic *efx);
-extern int falcon_reset_xaui(struct efx_nic *efx);
-
-/* Tests */
-struct falcon_nvconfig;
-extern int falcon_read_nvram(struct efx_nic *efx,
-			     struct falcon_nvconfig *nvconfig);
-extern int falcon_test_registers(struct efx_nic *efx);
-
-/**************************************************************************
- *
- * Falcon MAC stats
- *
- **************************************************************************
- */
-
-#define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset)
-#define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH)
-
-/* Retrieve statistic from statistics block */
-#define FALCON_STAT(efx, falcon_stat, efx_stat) do {		\
-	if (FALCON_STAT_WIDTH(falcon_stat) == 16)		\
-		(efx)->mac_stats.efx_stat += le16_to_cpu(	\
-			*((__force __le16 *)				\
-			  (efx->stats_buffer.addr +		\
-			   FALCON_STAT_OFFSET(falcon_stat))));	\
-	else if (FALCON_STAT_WIDTH(falcon_stat) == 32)		\
-		(efx)->mac_stats.efx_stat += le32_to_cpu(	\
-			*((__force __le32 *)				\
-			  (efx->stats_buffer.addr +		\
-			   FALCON_STAT_OFFSET(falcon_stat))));	\
-	else							\
-		(efx)->mac_stats.efx_stat += le64_to_cpu(	\
-			*((__force __le64 *)				\
-			  (efx->stats_buffer.addr +		\
-			   FALCON_STAT_OFFSET(falcon_stat))));	\
-	} while (0)
-
-#define FALCON_MAC_STATS_SIZE 0x100
-
-#define MAC_DATA_LBN 0
-#define MAC_DATA_WIDTH 32
-
-extern void falcon_generate_event(struct efx_channel *channel,
-				  efx_qword_t *event);
-
-#endif /* EFX_FALCON_H */
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
new file mode 100644
index 000000000000..bf0b96af5334
--- /dev/null
+++ b/drivers/net/sfc/falcon_boards.c
@@ -0,0 +1,752 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2007-2009 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/rtnetlink.h>
+
+#include "net_driver.h"
+#include "phy.h"
+#include "efx.h"
+#include "nic.h"
+#include "regs.h"
+#include "io.h"
+#include "workarounds.h"
+
+/* Macros for unpacking the board revision */
+/* The revision info is in host byte order. */
+#define FALCON_BOARD_TYPE(_rev) (_rev >> 8)
+#define FALCON_BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf)
+#define FALCON_BOARD_MINOR(_rev) (_rev & 0xf)
+
+/* Board types */
+#define FALCON_BOARD_SFE4001 0x01
+#define FALCON_BOARD_SFE4002 0x02
+#define FALCON_BOARD_SFN4111T 0x51
+#define FALCON_BOARD_SFN4112F 0x52
+
+/*****************************************************************************
+ * Support for LM87 sensor chip used on several boards
+ */
+#define LM87_REG_ALARMS1		0x41
+#define LM87_REG_ALARMS2		0x42
+#define LM87_IN_LIMITS(nr, _min, _max)			\
+	0x2B + (nr) * 2, _max, 0x2C + (nr) * 2, _min
+#define LM87_AIN_LIMITS(nr, _min, _max)			\
+	0x3B + (nr), _max, 0x1A + (nr), _min
+#define LM87_TEMP_INT_LIMITS(_min, _max)		\
+	0x39, _max, 0x3A, _min
+#define LM87_TEMP_EXT1_LIMITS(_min, _max)		\
+	0x37, _max, 0x38, _min
+
+#define LM87_ALARM_TEMP_INT		0x10
+#define LM87_ALARM_TEMP_EXT1		0x20
+
+#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
+
+static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
+			 const u8 *reg_values)
+{
+	struct falcon_board *board = falcon_board(efx);
+	struct i2c_client *client = i2c_new_device(&board->i2c_adap, info);
+	int rc;
+
+	if (!client)
+		return -EIO;
+
+	while (*reg_values) {
+		u8 reg = *reg_values++;
+		u8 value = *reg_values++;
+		rc = i2c_smbus_write_byte_data(client, reg, value);
+		if (rc)
+			goto err;
+	}
+
+	board->hwmon_client = client;
+	return 0;
+
+err:
+	i2c_unregister_device(client);
+	return rc;
+}
+
+static void efx_fini_lm87(struct efx_nic *efx)
+{
+	i2c_unregister_device(falcon_board(efx)->hwmon_client);
+}
+
+static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
+{
+	struct i2c_client *client = falcon_board(efx)->hwmon_client;
+	s32 alarms1, alarms2;
+
+	/* If link is up then do not monitor temperature */
+	if (EFX_WORKAROUND_7884(efx) && efx->link_state.up)
+		return 0;
+
+	alarms1 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
+	alarms2 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
+	if (alarms1 < 0)
+		return alarms1;
+	if (alarms2 < 0)
+		return alarms2;
+	alarms1 &= mask;
+	alarms2 &= mask >> 8;
+	if (alarms1 || alarms2) {
+		EFX_ERR(efx,
+			"LM87 detected a hardware failure (status %02x:%02x)"
+			"%s%s\n",
+			alarms1, alarms2,
+			(alarms1 & LM87_ALARM_TEMP_INT) ? " INTERNAL" : "",
+			(alarms1 & LM87_ALARM_TEMP_EXT1) ? " EXTERNAL" : "");
+		return -ERANGE;
+	}
+
+	return 0;
+}
+
+#else /* !CONFIG_SENSORS_LM87 */
+
+static inline int
+efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
+	      const u8 *reg_values)
+{
+	return 0;
+}
+static inline void efx_fini_lm87(struct efx_nic *efx)
+{
+}
+static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask)
+{
+	return 0;
+}
+
+#endif /* CONFIG_SENSORS_LM87 */
+
+/*****************************************************************************
+ * Support for the SFE4001 and SFN4111T NICs.
+ *
+ * The SFE4001 does not power-up fully at reset due to its high power
+ * consumption.  We control its power via a PCA9539 I/O expander.
+ * Both boards have a MAX6647 temperature monitor which we expose to
+ * the lm90 driver.
+ *
+ * This also provides minimal support for reflashing the PHY, which is
+ * initiated by resetting it with the FLASH_CFG_1 pin pulled down.
+ * On SFE4001 rev A2 and later this is connected to the 3V3X output of
+ * the IO-expander; on the SFN4111T it is connected to Falcon's GPIO3.
+ * We represent reflash mode as PHY_MODE_SPECIAL and make it mutually
+ * exclusive with the network device being open.
+ */
+
+/**************************************************************************
+ * Support for I2C IO Expander device on SFE4001
+ */
+#define	PCA9539 0x74
+
+#define	P0_IN 0x00
+#define	P0_OUT 0x02
+#define	P0_INVERT 0x04
+#define	P0_CONFIG 0x06
+
+#define	P0_EN_1V0X_LBN 0
+#define	P0_EN_1V0X_WIDTH 1
+#define	P0_EN_1V2_LBN 1
+#define	P0_EN_1V2_WIDTH 1
+#define	P0_EN_2V5_LBN 2
+#define	P0_EN_2V5_WIDTH 1
+#define	P0_EN_3V3X_LBN 3
+#define	P0_EN_3V3X_WIDTH 1
+#define	P0_EN_5V_LBN 4
+#define	P0_EN_5V_WIDTH 1
+#define	P0_SHORTEN_JTAG_LBN 5
+#define	P0_SHORTEN_JTAG_WIDTH 1
+#define	P0_X_TRST_LBN 6
+#define	P0_X_TRST_WIDTH 1
+#define	P0_DSP_RESET_LBN 7
+#define	P0_DSP_RESET_WIDTH 1
+
+#define	P1_IN 0x01
+#define	P1_OUT 0x03
+#define	P1_INVERT 0x05
+#define	P1_CONFIG 0x07
+
+#define	P1_AFE_PWD_LBN 0
+#define	P1_AFE_PWD_WIDTH 1
+#define	P1_DSP_PWD25_LBN 1
+#define	P1_DSP_PWD25_WIDTH 1
+#define	P1_RESERVED_LBN 2
+#define	P1_RESERVED_WIDTH 2
+#define	P1_SPARE_LBN 4
+#define	P1_SPARE_WIDTH 4
+
+/* Temperature Sensor */
+#define MAX664X_REG_RSL		0x02
+#define MAX664X_REG_WLHO	0x0B
+
+static void sfe4001_poweroff(struct efx_nic *efx)
+{
+	struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client;
+	struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client;
+
+	/* Turn off all power rails and disable outputs */
+	i2c_smbus_write_byte_data(ioexp_client, P0_OUT, 0xff);
+	i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG, 0xff);
+	i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0xff);
+
+	/* Clear any over-temperature alert */
+	i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
+}
+
+static int sfe4001_poweron(struct efx_nic *efx)
+{
+	struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client;
+	struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client;
+	unsigned int i, j;
+	int rc;
+	u8 out;
+
+	/* Clear any previous over-temperature alert */
+	rc = i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
+	if (rc < 0)
+		return rc;
+
+	/* Enable port 0 and port 1 outputs on IO expander */
+	rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00);
+	if (rc)
+		return rc;
+	rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG,
+				       0xff & ~(1 << P1_SPARE_LBN));
+	if (rc)
+		goto fail_on;
+
+	/* If PHY power is on, turn it all off and wait 1 second to
+	 * ensure a full reset.
+	 */
+	rc = i2c_smbus_read_byte_data(ioexp_client, P0_OUT);
+	if (rc < 0)
+		goto fail_on;
+	out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
+		       (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
+		       (0 << P0_EN_1V0X_LBN));
+	if (rc != out) {
+		EFX_INFO(efx, "power-cycling PHY\n");
+		rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
+		if (rc)
+			goto fail_on;
+		schedule_timeout_uninterruptible(HZ);
+	}
+
+	for (i = 0; i < 20; ++i) {
+		/* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */
+		out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
+			       (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
+			       (1 << P0_X_TRST_LBN));
+		if (efx->phy_mode & PHY_MODE_SPECIAL)
+			out |= 1 << P0_EN_3V3X_LBN;
+
+		rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
+		if (rc)
+			goto fail_on;
+		msleep(10);
+
+		/* Turn on 1V power rail */
+		out &= ~(1 << P0_EN_1V0X_LBN);
+		rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
+		if (rc)
+			goto fail_on;
+
+		EFX_INFO(efx, "waiting for DSP boot (attempt %d)...\n", i);
+
+		/* In flash config mode, DSP does not turn on AFE, so
+		 * just wait 1 second.
+		 */
+		if (efx->phy_mode & PHY_MODE_SPECIAL) {
+			schedule_timeout_uninterruptible(HZ);
+			return 0;
+		}
+
+		for (j = 0; j < 10; ++j) {
+			msleep(100);
+
+			/* Check DSP has asserted AFE power line */
+			rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN);
+			if (rc < 0)
+				goto fail_on;
+			if (rc & (1 << P1_AFE_PWD_LBN))
+				return 0;
+		}
+	}
+
+	EFX_INFO(efx, "timed out waiting for DSP boot\n");
+	rc = -ETIMEDOUT;
+fail_on:
+	sfe4001_poweroff(efx);
+	return rc;
+}
+
+static int sfn4111t_reset(struct efx_nic *efx)
+{
+	struct falcon_board *board = falcon_board(efx);
+	efx_oword_t reg;
+
+	/* GPIO 3 and the GPIO register are shared with I2C, so block that */
+	i2c_lock_adapter(&board->i2c_adap);
+
+	/* Pull RST_N (GPIO 2) low then let it up again, setting the
+	 * FLASH_CFG_1 strap (GPIO 3) appropriately.  Only change the
+	 * output enables; the output levels should always be 0 (low)
+	 * and we rely on external pull-ups. */
+	efx_reado(efx, &reg, FR_AB_GPIO_CTL);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO2_OEN, true);
+	efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
+	msleep(1000);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO2_OEN, false);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN,
+			    !!(efx->phy_mode & PHY_MODE_SPECIAL));
+	efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
+	msleep(1);
+
+	i2c_unlock_adapter(&board->i2c_adap);
+
+	ssleep(1);
+	return 0;
+}
+
+static ssize_t show_phy_flash_cfg(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+	return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL));
+}
+
+static ssize_t set_phy_flash_cfg(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t count)
+{
+	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+	enum efx_phy_mode old_mode, new_mode;
+	int err;
+
+	rtnl_lock();
+	old_mode = efx->phy_mode;
+	if (count == 0 || *buf == '0')
+		new_mode = old_mode & ~PHY_MODE_SPECIAL;
+	else
+		new_mode = PHY_MODE_SPECIAL;
+	if (old_mode == new_mode) {
+		err = 0;
+	} else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
+		err = -EBUSY;
+	} else {
+		/* Reset the PHY, reconfigure the MAC and enable/disable
+		 * MAC stats accordingly. */
+		efx->phy_mode = new_mode;
+		if (new_mode & PHY_MODE_SPECIAL)
+			falcon_stop_nic_stats(efx);
+		if (falcon_board(efx)->type->id == FALCON_BOARD_SFE4001)
+			err = sfe4001_poweron(efx);
+		else
+			err = sfn4111t_reset(efx);
+		if (!err)
+			err = efx_reconfigure_port(efx);
+		if (!(new_mode & PHY_MODE_SPECIAL))
+			falcon_start_nic_stats(efx);
+	}
+	rtnl_unlock();
+
+	return err ? err : count;
+}
+
+static DEVICE_ATTR(phy_flash_cfg, 0644, show_phy_flash_cfg, set_phy_flash_cfg);
+
+static void sfe4001_fini(struct efx_nic *efx)
+{
+	struct falcon_board *board = falcon_board(efx);
+
+	EFX_INFO(efx, "%s\n", __func__);
+
+	device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
+	sfe4001_poweroff(efx);
+	i2c_unregister_device(board->ioexp_client);
+	i2c_unregister_device(board->hwmon_client);
+}
+
+static int sfe4001_check_hw(struct efx_nic *efx)
+{
+	s32 status;
+
+	/* If XAUI link is up then do not monitor */
+	if (EFX_WORKAROUND_7884(efx) && !efx->xmac_poll_required)
+		return 0;
+
+	/* Check the powered status of the PHY. Lack of power implies that
+	 * the MAX6647 has shut down power to it, probably due to a temp.
+	 * alarm. Reading the power status rather than the MAX6647 status
+	 * directly because the later is read-to-clear and would thus
+	 * start to power up the PHY again when polled, causing us to blip
+	 * the power undesirably.
+	 * We know we can read from the IO expander because we did
+	 * it during power-on. Assume failure now is bad news. */
+	status = i2c_smbus_read_byte_data(falcon_board(efx)->ioexp_client, P1_IN);
+	if (status >= 0 &&
+	    (status & ((1 << P1_AFE_PWD_LBN) | (1 << P1_DSP_PWD25_LBN))) != 0)
+		return 0;
+
+	/* Use board power control, not PHY power control */
+	sfe4001_poweroff(efx);
+	efx->phy_mode = PHY_MODE_OFF;
+
+	return (status < 0) ? -EIO : -ERANGE;
+}
+
+static struct i2c_board_info sfe4001_hwmon_info = {
+	I2C_BOARD_INFO("max6647", 0x4e),
+};
+
+/* This board uses an I2C expander to provider power to the PHY, which needs to
+ * be turned on before the PHY can be used.
+ * Context: Process context, rtnl lock held
+ */
+static int sfe4001_init(struct efx_nic *efx)
+{
+	struct falcon_board *board = falcon_board(efx);
+	int rc;
+
+#if defined(CONFIG_SENSORS_LM90) || defined(CONFIG_SENSORS_LM90_MODULE)
+	board->hwmon_client =
+		i2c_new_device(&board->i2c_adap, &sfe4001_hwmon_info);
+#else
+	board->hwmon_client =
+		i2c_new_dummy(&board->i2c_adap, sfe4001_hwmon_info.addr);
+#endif
+	if (!board->hwmon_client)
+		return -EIO;
+
+	/* Raise board/PHY high limit from 85 to 90 degrees Celsius */
+	rc = i2c_smbus_write_byte_data(board->hwmon_client,
+				       MAX664X_REG_WLHO, 90);
+	if (rc)
+		goto fail_hwmon;
+
+	board->ioexp_client = i2c_new_dummy(&board->i2c_adap, PCA9539);
+	if (!board->ioexp_client) {
+		rc = -EIO;
+		goto fail_hwmon;
+	}
+
+	if (efx->phy_mode & PHY_MODE_SPECIAL) {
+		/* PHY won't generate a 156.25 MHz clock and MAC stats fetch
+		 * will fail. */
+		falcon_stop_nic_stats(efx);
+	}
+	rc = sfe4001_poweron(efx);
+	if (rc)
+		goto fail_ioexp;
+
+	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
+	if (rc)
+		goto fail_on;
+
+	EFX_INFO(efx, "PHY is powered on\n");
+	return 0;
+
+fail_on:
+	sfe4001_poweroff(efx);
+fail_ioexp:
+	i2c_unregister_device(board->ioexp_client);
+fail_hwmon:
+	i2c_unregister_device(board->hwmon_client);
+	return rc;
+}
+
+static int sfn4111t_check_hw(struct efx_nic *efx)
+{
+	s32 status;
+
+	/* If XAUI link is up then do not monitor */
+	if (EFX_WORKAROUND_7884(efx) && !efx->xmac_poll_required)
+		return 0;
+
+	/* Test LHIGH, RHIGH, FAULT, EOT and IOT alarms */
+	status = i2c_smbus_read_byte_data(falcon_board(efx)->hwmon_client,
+					  MAX664X_REG_RSL);
+	if (status < 0)
+		return -EIO;
+	if (status & 0x57)
+		return -ERANGE;
+	return 0;
+}
+
+static void sfn4111t_fini(struct efx_nic *efx)
+{
+	EFX_INFO(efx, "%s\n", __func__);
+
+	device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
+	i2c_unregister_device(falcon_board(efx)->hwmon_client);
+}
+
+static struct i2c_board_info sfn4111t_a0_hwmon_info = {
+	I2C_BOARD_INFO("max6647", 0x4e),
+};
+
+static struct i2c_board_info sfn4111t_r5_hwmon_info = {
+	I2C_BOARD_INFO("max6646", 0x4d),
+};
+
+static void sfn4111t_init_phy(struct efx_nic *efx)
+{
+	if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
+		if (sft9001_wait_boot(efx) != -EINVAL)
+			return;
+
+		efx->phy_mode = PHY_MODE_SPECIAL;
+		falcon_stop_nic_stats(efx);
+	}
+
+	sfn4111t_reset(efx);
+	sft9001_wait_boot(efx);
+}
+
+static int sfn4111t_init(struct efx_nic *efx)
+{
+	struct falcon_board *board = falcon_board(efx);
+	int rc;
+
+	board->hwmon_client =
+		i2c_new_device(&board->i2c_adap,
+			       (board->minor < 5) ?
+			       &sfn4111t_a0_hwmon_info :
+			       &sfn4111t_r5_hwmon_info);
+	if (!board->hwmon_client)
+		return -EIO;
+
+	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
+	if (rc)
+		goto fail_hwmon;
+
+	if (efx->phy_mode & PHY_MODE_SPECIAL)
+		/* PHY may not generate a 156.25 MHz clock and MAC
+		 * stats fetch will fail. */
+		falcon_stop_nic_stats(efx);
+
+	return 0;
+
+fail_hwmon:
+	i2c_unregister_device(board->hwmon_client);
+	return rc;
+}
+
+/*****************************************************************************
+ * Support for the SFE4002
+ *
+ */
+static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */
+
+static const u8 sfe4002_lm87_regs[] = {
+	LM87_IN_LIMITS(0, 0x83, 0x91),		/* 2.5V:  1.8V +/- 5% */
+	LM87_IN_LIMITS(1, 0x51, 0x5a),		/* Vccp1: 1.2V +/- 5% */
+	LM87_IN_LIMITS(2, 0xb6, 0xca),		/* 3.3V:  3.3V +/- 5% */
+	LM87_IN_LIMITS(3, 0xb0, 0xc9),		/* 5V:    4.6-5.2V */
+	LM87_IN_LIMITS(4, 0xb0, 0xe0),		/* 12V:   11-14V */
+	LM87_IN_LIMITS(5, 0x44, 0x4b),		/* Vccp2: 1.0V +/- 5% */
+	LM87_AIN_LIMITS(0, 0xa0, 0xb2),		/* AIN1:  1.66V +/- 5% */
+	LM87_AIN_LIMITS(1, 0x91, 0xa1),		/* AIN2:  1.5V +/- 5% */
+	LM87_TEMP_INT_LIMITS(10, 60),		/* board */
+	LM87_TEMP_EXT1_LIMITS(10, 70),		/* Falcon */
+	0
+};
+
+static struct i2c_board_info sfe4002_hwmon_info = {
+	I2C_BOARD_INFO("lm87", 0x2e),
+	.platform_data	= &sfe4002_lm87_channel,
+};
+
+/****************************************************************************/
+/* LED allocations. Note that on rev A0 boards the schematic and the reality
+ * differ: red and green are swapped. Below is the fixed (A1) layout (there
+ * are only 3 A0 boards in existence, so no real reason to make this
+ * conditional).
+ */
+#define SFE4002_FAULT_LED (2)	/* Red */
+#define SFE4002_RX_LED    (0)	/* Green */
+#define SFE4002_TX_LED    (1)	/* Amber */
+
+static void sfe4002_init_phy(struct efx_nic *efx)
+{
+	/* Set the TX and RX LEDs to reflect status and activity, and the
+	 * fault LED off */
+	falcon_qt202x_set_led(efx, SFE4002_TX_LED,
+			      QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT);
+	falcon_qt202x_set_led(efx, SFE4002_RX_LED,
+			      QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT);
+	falcon_qt202x_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF);
+}
+
+static void sfe4002_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+{
+	falcon_qt202x_set_led(
+		efx, SFE4002_FAULT_LED,
+		(mode == EFX_LED_ON) ? QUAKE_LED_ON : QUAKE_LED_OFF);
+}
+
+static int sfe4002_check_hw(struct efx_nic *efx)
+{
+	struct falcon_board *board = falcon_board(efx);
+
+	/* A0 board rev. 4002s report a temperature fault the whole time
+	 * (bad sensor) so we mask it out. */
+	unsigned alarm_mask =
+		(board->major == 0 && board->minor == 0) ?
+		~LM87_ALARM_TEMP_EXT1 : ~0;
+
+	return efx_check_lm87(efx, alarm_mask);
+}
+
+static int sfe4002_init(struct efx_nic *efx)
+{
+	return efx_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs);
+}
+
+/*****************************************************************************
+ * Support for the SFN4112F
+ *
+ */
+static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */
+
+static const u8 sfn4112f_lm87_regs[] = {
+	LM87_IN_LIMITS(0, 0x83, 0x91),		/* 2.5V:  1.8V +/- 5% */
+	LM87_IN_LIMITS(1, 0x51, 0x5a),		/* Vccp1: 1.2V +/- 5% */
+	LM87_IN_LIMITS(2, 0xb6, 0xca),		/* 3.3V:  3.3V +/- 5% */
+	LM87_IN_LIMITS(4, 0xb0, 0xe0),		/* 12V:   11-14V */
+	LM87_IN_LIMITS(5, 0x44, 0x4b),		/* Vccp2: 1.0V +/- 5% */
+	LM87_AIN_LIMITS(1, 0x91, 0xa1),		/* AIN2:  1.5V +/- 5% */
+	LM87_TEMP_INT_LIMITS(10, 60),		/* board */
+	LM87_TEMP_EXT1_LIMITS(10, 70),		/* Falcon */
+	0
+};
+
+static struct i2c_board_info sfn4112f_hwmon_info = {
+	I2C_BOARD_INFO("lm87", 0x2e),
+	.platform_data	= &sfn4112f_lm87_channel,
+};
+
+#define SFN4112F_ACT_LED	0
+#define SFN4112F_LINK_LED	1
+
+static void sfn4112f_init_phy(struct efx_nic *efx)
+{
+	falcon_qt202x_set_led(efx, SFN4112F_ACT_LED,
+			      QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACT);
+	falcon_qt202x_set_led(efx, SFN4112F_LINK_LED,
+			      QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT);
+}
+
+static void sfn4112f_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+{
+	int reg;
+
+	switch (mode) {
+	case EFX_LED_OFF:
+		reg = QUAKE_LED_OFF;
+		break;
+	case EFX_LED_ON:
+		reg = QUAKE_LED_ON;
+		break;
+	default:
+		reg = QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT;
+		break;
+	}
+
+	falcon_qt202x_set_led(efx, SFN4112F_LINK_LED, reg);
+}
+
+static int sfn4112f_check_hw(struct efx_nic *efx)
+{
+	/* Mask out unused sensors */
+	return efx_check_lm87(efx, ~0x48);
+}
+
+static int sfn4112f_init(struct efx_nic *efx)
+{
+	return efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs);
+}
+
+static const struct falcon_board_type board_types[] = {
+	{
+		.id		= FALCON_BOARD_SFE4001,
+		.ref_model	= "SFE4001",
+		.gen_type	= "10GBASE-T adapter",
+		.init		= sfe4001_init,
+		.init_phy	= efx_port_dummy_op_void,
+		.fini		= sfe4001_fini,
+		.set_id_led	= tenxpress_set_id_led,
+		.monitor	= sfe4001_check_hw,
+	},
+	{
+		.id		= FALCON_BOARD_SFE4002,
+		.ref_model	= "SFE4002",
+		.gen_type	= "XFP adapter",
+		.init		= sfe4002_init,
+		.init_phy	= sfe4002_init_phy,
+		.fini		= efx_fini_lm87,
+		.set_id_led	= sfe4002_set_id_led,
+		.monitor	= sfe4002_check_hw,
+	},
+	{
+		.id		= FALCON_BOARD_SFN4111T,
+		.ref_model	= "SFN4111T",
+		.gen_type	= "100/1000/10GBASE-T adapter",
+		.init		= sfn4111t_init,
+		.init_phy	= sfn4111t_init_phy,
+		.fini		= sfn4111t_fini,
+		.set_id_led	= tenxpress_set_id_led,
+		.monitor	= sfn4111t_check_hw,
+	},
+	{
+		.id		= FALCON_BOARD_SFN4112F,
+		.ref_model	= "SFN4112F",
+		.gen_type	= "SFP+ adapter",
+		.init		= sfn4112f_init,
+		.init_phy	= sfn4112f_init_phy,
+		.fini		= efx_fini_lm87,
+		.set_id_led	= sfn4112f_set_id_led,
+		.monitor	= sfn4112f_check_hw,
+	},
+};
+
+static const struct falcon_board_type falcon_dummy_board = {
+	.init		= efx_port_dummy_op_int,
+	.init_phy	= efx_port_dummy_op_void,
+	.fini		= efx_port_dummy_op_void,
+	.set_id_led	= efx_port_dummy_op_set_id_led,
+	.monitor	= efx_port_dummy_op_int,
+};
+
+void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
+{
+	struct falcon_board *board = falcon_board(efx);
+	u8 type_id = FALCON_BOARD_TYPE(revision_info);
+	int i;
+
+	board->major = FALCON_BOARD_MAJOR(revision_info);
+	board->minor = FALCON_BOARD_MINOR(revision_info);
+
+	for (i = 0; i < ARRAY_SIZE(board_types); i++)
+		if (board_types[i].id == type_id)
+			board->type = &board_types[i];
+
+	if (board->type) {
+		EFX_INFO(efx, "board is %s rev %c%d\n",
+			 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
+			 ? board->type->ref_model : board->type->gen_type,
+			 'A' + board->major, board->minor);
+	} else {
+		EFX_ERR(efx, "unknown board type %d\n", type_id);
+		board->type = &falcon_dummy_board;
+	}
+}
diff --git a/drivers/net/sfc/falcon_gmac.c b/drivers/net/sfc/falcon_gmac.c
index 8865eae20ac5..7dadfcbd6ce7 100644
--- a/drivers/net/sfc/falcon_gmac.c
+++ b/drivers/net/sfc/falcon_gmac.c
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2008 Solarflare Communications Inc.
+ * Copyright 2006-2009 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -11,11 +11,10 @@
 #include <linux/delay.h>
 #include "net_driver.h"
 #include "efx.h"
-#include "falcon.h"
+#include "nic.h"
 #include "mac.h"
-#include "falcon_hwdefs.h"
-#include "falcon_io.h"
-#include "gmii.h"
+#include "regs.h"
+#include "io.h"
 
 /**************************************************************************
  *
@@ -23,106 +22,109 @@
  *
  *************************************************************************/
 
-static void falcon_reconfigure_gmac(struct efx_nic *efx)
+static int falcon_reconfigure_gmac(struct efx_nic *efx)
 {
+	struct efx_link_state *link_state = &efx->link_state;
 	bool loopback, tx_fc, rx_fc, bytemode;
 	int if_mode;
 	unsigned int max_frame_len;
 	efx_oword_t reg;
 
 	/* Configuration register 1 */
-	tx_fc = (efx->link_fc & EFX_FC_TX) || !efx->link_fd;
-	rx_fc = !!(efx->link_fc & EFX_FC_RX);
+	tx_fc = (link_state->fc & EFX_FC_TX) || !link_state->fd;
+	rx_fc = !!(link_state->fc & EFX_FC_RX);
 	loopback = (efx->loopback_mode == LOOPBACK_GMAC);
-	bytemode = (efx->link_speed == 1000);
+	bytemode = (link_state->speed == 1000);
 
 	EFX_POPULATE_OWORD_5(reg,
-			     GM_LOOP, loopback,
-			     GM_TX_EN, 1,
-			     GM_TX_FC_EN, tx_fc,
-			     GM_RX_EN, 1,
-			     GM_RX_FC_EN, rx_fc);
-	falcon_write(efx, &reg, GM_CFG1_REG);
+			     FRF_AB_GM_LOOP, loopback,
+			     FRF_AB_GM_TX_EN, 1,
+			     FRF_AB_GM_TX_FC_EN, tx_fc,
+			     FRF_AB_GM_RX_EN, 1,
+			     FRF_AB_GM_RX_FC_EN, rx_fc);
+	efx_writeo(efx, &reg, FR_AB_GM_CFG1);
 	udelay(10);
 
 	/* Configuration register 2 */
 	if_mode = (bytemode) ? 2 : 1;
 	EFX_POPULATE_OWORD_5(reg,
-			     GM_IF_MODE, if_mode,
-			     GM_PAD_CRC_EN, 1,
-			     GM_LEN_CHK, 1,
-			     GM_FD, efx->link_fd,
-			     GM_PAMBL_LEN, 0x7/*datasheet recommended */);
+			     FRF_AB_GM_IF_MODE, if_mode,
+			     FRF_AB_GM_PAD_CRC_EN, 1,
+			     FRF_AB_GM_LEN_CHK, 1,
+			     FRF_AB_GM_FD, link_state->fd,
+			     FRF_AB_GM_PAMBL_LEN, 0x7/*datasheet recommended */);
 
-	falcon_write(efx, &reg, GM_CFG2_REG);
+	efx_writeo(efx, &reg, FR_AB_GM_CFG2);
 	udelay(10);
 
 	/* Max frame len register */
 	max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
-	EFX_POPULATE_OWORD_1(reg, GM_MAX_FLEN, max_frame_len);
-	falcon_write(efx, &reg, GM_MAX_FLEN_REG);
+	EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_MAX_FLEN, max_frame_len);
+	efx_writeo(efx, &reg, FR_AB_GM_MAX_FLEN);
 	udelay(10);
 
 	/* FIFO configuration register 0 */
 	EFX_POPULATE_OWORD_5(reg,
-			     GMF_FTFENREQ, 1,
-			     GMF_STFENREQ, 1,
-			     GMF_FRFENREQ, 1,
-			     GMF_SRFENREQ, 1,
-			     GMF_WTMENREQ, 1);
-	falcon_write(efx, &reg, GMF_CFG0_REG);
+			     FRF_AB_GMF_FTFENREQ, 1,
+			     FRF_AB_GMF_STFENREQ, 1,
+			     FRF_AB_GMF_FRFENREQ, 1,
+			     FRF_AB_GMF_SRFENREQ, 1,
+			     FRF_AB_GMF_WTMENREQ, 1);
+	efx_writeo(efx, &reg, FR_AB_GMF_CFG0);
 	udelay(10);
 
 	/* FIFO configuration register 1 */
 	EFX_POPULATE_OWORD_2(reg,
-			     GMF_CFGFRTH, 0x12,
-			     GMF_CFGXOFFRTX, 0xffff);
-	falcon_write(efx, &reg, GMF_CFG1_REG);
+			     FRF_AB_GMF_CFGFRTH, 0x12,
+			     FRF_AB_GMF_CFGXOFFRTX, 0xffff);
+	efx_writeo(efx, &reg, FR_AB_GMF_CFG1);
 	udelay(10);
 
 	/* FIFO configuration register 2 */
 	EFX_POPULATE_OWORD_2(reg,
-			     GMF_CFGHWM, 0x3f,
-			     GMF_CFGLWM, 0xa);
-	falcon_write(efx, &reg, GMF_CFG2_REG);
+			     FRF_AB_GMF_CFGHWM, 0x3f,
+			     FRF_AB_GMF_CFGLWM, 0xa);
+	efx_writeo(efx, &reg, FR_AB_GMF_CFG2);
 	udelay(10);
 
 	/* FIFO configuration register 3 */
 	EFX_POPULATE_OWORD_2(reg,
-			     GMF_CFGHWMFT, 0x1c,
-			     GMF_CFGFTTH, 0x08);
-	falcon_write(efx, &reg, GMF_CFG3_REG);
+			     FRF_AB_GMF_CFGHWMFT, 0x1c,
+			     FRF_AB_GMF_CFGFTTH, 0x08);
+	efx_writeo(efx, &reg, FR_AB_GMF_CFG3);
 	udelay(10);
 
 	/* FIFO configuration register 4 */
-	EFX_POPULATE_OWORD_1(reg, GMF_HSTFLTRFRM_PAUSE, 1);
-	falcon_write(efx, &reg, GMF_CFG4_REG);
+	EFX_POPULATE_OWORD_1(reg, FRF_AB_GMF_HSTFLTRFRM_PAUSE, 1);
+	efx_writeo(efx, &reg, FR_AB_GMF_CFG4);
 	udelay(10);
 
 	/* FIFO configuration register 5 */
-	falcon_read(efx, &reg, GMF_CFG5_REG);
-	EFX_SET_OWORD_FIELD(reg, GMF_CFGBYTMODE, bytemode);
-	EFX_SET_OWORD_FIELD(reg, GMF_CFGHDPLX, !efx->link_fd);
-	EFX_SET_OWORD_FIELD(reg, GMF_HSTDRPLT64, !efx->link_fd);
-	EFX_SET_OWORD_FIELD(reg, GMF_HSTFLTRFRMDC_PAUSE, 0);
-	falcon_write(efx, &reg, GMF_CFG5_REG);
+	efx_reado(efx, &reg, FR_AB_GMF_CFG5);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_CFGBYTMODE, bytemode);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_CFGHDPLX, !link_state->fd);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_HSTDRPLT64, !link_state->fd);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_HSTFLTRFRMDC_PAUSE, 0);
+	efx_writeo(efx, &reg, FR_AB_GMF_CFG5);
 	udelay(10);
 
 	/* MAC address */
 	EFX_POPULATE_OWORD_4(reg,
-			     GM_HWADDR_5, efx->net_dev->dev_addr[5],
-			     GM_HWADDR_4, efx->net_dev->dev_addr[4],
-			     GM_HWADDR_3, efx->net_dev->dev_addr[3],
-			     GM_HWADDR_2, efx->net_dev->dev_addr[2]);
-	falcon_write(efx, &reg, GM_ADR1_REG);
+			     FRF_AB_GM_ADR_B0, efx->net_dev->dev_addr[5],
+			     FRF_AB_GM_ADR_B1, efx->net_dev->dev_addr[4],
+			     FRF_AB_GM_ADR_B2, efx->net_dev->dev_addr[3],
+			     FRF_AB_GM_ADR_B3, efx->net_dev->dev_addr[2]);
+	efx_writeo(efx, &reg, FR_AB_GM_ADR1);
 	udelay(10);
 	EFX_POPULATE_OWORD_2(reg,
-			     GM_HWADDR_1, efx->net_dev->dev_addr[1],
-			     GM_HWADDR_0, efx->net_dev->dev_addr[0]);
-	falcon_write(efx, &reg, GM_ADR2_REG);
+			     FRF_AB_GM_ADR_B4, efx->net_dev->dev_addr[1],
+			     FRF_AB_GM_ADR_B5, efx->net_dev->dev_addr[0]);
+	efx_writeo(efx, &reg, FR_AB_GM_ADR2);
 	udelay(10);
 
 	falcon_reconfigure_mac_wrapper(efx);
+
+	return 0;
 }
 
 static void falcon_update_stats_gmac(struct efx_nic *efx)
@@ -130,11 +132,6 @@ static void falcon_update_stats_gmac(struct efx_nic *efx)
 	struct efx_mac_stats *mac_stats = &efx->mac_stats;
 	unsigned long old_rx_pause, old_tx_pause;
 	unsigned long new_rx_pause, new_tx_pause;
-	int rc;
-
-	rc = falcon_dma_stats(efx, GDmaDone_offset);
-	if (rc)
-		return;
 
 	/* Pause frames are erroneously counted as errors (SFC bug 3269) */
 	old_rx_pause = mac_stats->rx_pause;
@@ -221,9 +218,13 @@ static void falcon_update_stats_gmac(struct efx_nic *efx)
 	mac_stats->rx_lt64 = mac_stats->rx_good_lt64 + mac_stats->rx_bad_lt64;
 }
 
+static bool falcon_gmac_check_fault(struct efx_nic *efx)
+{
+	return false;
+}
+
 struct efx_mac_operations falcon_gmac_operations = {
 	.reconfigure	= falcon_reconfigure_gmac,
 	.update_stats	= falcon_update_stats_gmac,
-	.irq		= efx_port_dummy_op_void,
-	.poll		= efx_port_dummy_op_void,
+	.check_fault 	= falcon_gmac_check_fault,
 };
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h
deleted file mode 100644
index 2d2261117ace..000000000000
--- a/drivers/net/sfc/falcon_hwdefs.h
+++ /dev/null
@@ -1,1333 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2008 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#ifndef EFX_FALCON_HWDEFS_H
-#define EFX_FALCON_HWDEFS_H
-
-/*
- * Falcon hardware value definitions.
- * Falcon is the internal codename for the SFC4000 controller that is
- * present in SFE400X evaluation boards
- */
-
-/**************************************************************************
- *
- * Falcon registers
- *
- **************************************************************************
- */
-
-/* Address region register */
-#define ADR_REGION_REG_KER	0x00
-#define ADR_REGION0_LBN	0
-#define ADR_REGION0_WIDTH	18
-#define ADR_REGION1_LBN	32
-#define ADR_REGION1_WIDTH	18
-#define ADR_REGION2_LBN	64
-#define ADR_REGION2_WIDTH	18
-#define ADR_REGION3_LBN	96
-#define ADR_REGION3_WIDTH	18
-
-/* Interrupt enable register */
-#define INT_EN_REG_KER 0x0010
-#define KER_INT_KER_LBN 3
-#define KER_INT_KER_WIDTH 1
-#define DRV_INT_EN_KER_LBN 0
-#define DRV_INT_EN_KER_WIDTH 1
-
-/* Interrupt status address register */
-#define INT_ADR_REG_KER	0x0030
-#define NORM_INT_VEC_DIS_KER_LBN 64
-#define NORM_INT_VEC_DIS_KER_WIDTH 1
-#define INT_ADR_KER_LBN 0
-#define INT_ADR_KER_WIDTH EFX_DMA_TYPE_WIDTH(64) /* not 46 for this one */
-
-/* Interrupt status register (B0 only) */
-#define INT_ISR0_B0 0x90
-#define INT_ISR1_B0 0xA0
-
-/* Interrupt acknowledge register (A0/A1 only) */
-#define INT_ACK_REG_KER_A1 0x0050
-#define INT_ACK_DUMMY_DATA_LBN 0
-#define INT_ACK_DUMMY_DATA_WIDTH 32
-
-/* Interrupt acknowledge work-around register (A0/A1 only )*/
-#define WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1 0x0070
-
-/* SPI host command register */
-#define EE_SPI_HCMD_REG_KER 0x0100
-#define EE_SPI_HCMD_CMD_EN_LBN 31
-#define EE_SPI_HCMD_CMD_EN_WIDTH 1
-#define EE_WR_TIMER_ACTIVE_LBN 28
-#define EE_WR_TIMER_ACTIVE_WIDTH 1
-#define EE_SPI_HCMD_SF_SEL_LBN 24
-#define EE_SPI_HCMD_SF_SEL_WIDTH 1
-#define EE_SPI_EEPROM 0
-#define EE_SPI_FLASH 1
-#define EE_SPI_HCMD_DABCNT_LBN 16
-#define EE_SPI_HCMD_DABCNT_WIDTH 5
-#define EE_SPI_HCMD_READ_LBN 15
-#define EE_SPI_HCMD_READ_WIDTH 1
-#define EE_SPI_READ 1
-#define EE_SPI_WRITE 0
-#define EE_SPI_HCMD_DUBCNT_LBN 12
-#define EE_SPI_HCMD_DUBCNT_WIDTH 2
-#define EE_SPI_HCMD_ADBCNT_LBN 8
-#define EE_SPI_HCMD_ADBCNT_WIDTH 2
-#define EE_SPI_HCMD_ENC_LBN 0
-#define EE_SPI_HCMD_ENC_WIDTH 8
-
-/* SPI host address register */
-#define EE_SPI_HADR_REG_KER 0x0110
-#define EE_SPI_HADR_ADR_LBN 0
-#define EE_SPI_HADR_ADR_WIDTH 24
-
-/* SPI host data register */
-#define EE_SPI_HDATA_REG_KER 0x0120
-
-/* SPI/VPD config register */
-#define EE_VPD_CFG_REG_KER 0x0140
-#define EE_VPD_EN_LBN 0
-#define EE_VPD_EN_WIDTH 1
-#define EE_VPD_EN_AD9_MODE_LBN 1
-#define EE_VPD_EN_AD9_MODE_WIDTH 1
-#define EE_EE_CLOCK_DIV_LBN 112
-#define EE_EE_CLOCK_DIV_WIDTH 7
-#define EE_SF_CLOCK_DIV_LBN 120
-#define EE_SF_CLOCK_DIV_WIDTH 7
-
-/* PCIE CORE ACCESS REG */
-#define PCIE_CORE_ADDR_PCIE_DEVICE_CTRL_STAT 0x68
-#define PCIE_CORE_ADDR_PCIE_LINK_CTRL_STAT 0x70
-#define PCIE_CORE_ADDR_ACK_RPL_TIMER 0x700
-#define PCIE_CORE_ADDR_ACK_FREQ 0x70C
-
-/* NIC status register */
-#define NIC_STAT_REG 0x0200
-#define EE_STRAP_EN_LBN 31
-#define EE_STRAP_EN_WIDTH 1
-#define EE_STRAP_OVR_LBN 24
-#define EE_STRAP_OVR_WIDTH 4
-#define ONCHIP_SRAM_LBN 16
-#define ONCHIP_SRAM_WIDTH 1
-#define SF_PRST_LBN 9
-#define SF_PRST_WIDTH 1
-#define EE_PRST_LBN 8
-#define EE_PRST_WIDTH 1
-#define STRAP_PINS_LBN 0
-#define STRAP_PINS_WIDTH 3
-/* These bit definitions are extrapolated from the list of numerical
- * values for STRAP_PINS.
- */
-#define STRAP_10G_LBN 2
-#define STRAP_10G_WIDTH 1
-#define STRAP_PCIE_LBN 0
-#define STRAP_PCIE_WIDTH 1
-
-#define BOOTED_USING_NVDEVICE_LBN 3
-#define BOOTED_USING_NVDEVICE_WIDTH 1
-
-/* GPIO control register */
-#define GPIO_CTL_REG_KER 0x0210
-#define GPIO_USE_NIC_CLK_LBN (30)
-#define GPIO_USE_NIC_CLK_WIDTH (1)
-#define GPIO_OUTPUTS_LBN   (16)
-#define GPIO_OUTPUTS_WIDTH (4)
-#define GPIO_INPUTS_LBN (8)
-#define GPIO_DIRECTION_LBN (24)
-#define GPIO_DIRECTION_WIDTH (4)
-#define GPIO_DIRECTION_OUT (1)
-#define GPIO_SRAM_SLEEP (1 << 1)
-
-#define GPIO3_OEN_LBN (GPIO_DIRECTION_LBN + 3)
-#define	GPIO3_OEN_WIDTH 1
-#define	GPIO2_OEN_LBN (GPIO_DIRECTION_LBN + 2)
-#define	GPIO2_OEN_WIDTH 1
-#define	GPIO1_OEN_LBN (GPIO_DIRECTION_LBN + 1)
-#define	GPIO1_OEN_WIDTH 1
-#define GPIO0_OEN_LBN (GPIO_DIRECTION_LBN + 0)
-#define	GPIO0_OEN_WIDTH 1
-
-#define	GPIO3_OUT_LBN (GPIO_OUTPUTS_LBN + 3)
-#define	GPIO3_OUT_WIDTH 1
-#define	GPIO2_OUT_LBN (GPIO_OUTPUTS_LBN + 2)
-#define	GPIO2_OUT_WIDTH 1
-#define	GPIO1_OUT_LBN (GPIO_OUTPUTS_LBN + 1)
-#define	GPIO1_OUT_WIDTH 1
-#define	GPIO0_OUT_LBN (GPIO_OUTPUTS_LBN + 0)
-#define	GPIO0_OUT_WIDTH 1
-
-#define GPIO3_IN_LBN (GPIO_INPUTS_LBN + 3)
-#define	GPIO3_IN_WIDTH 1
-#define	GPIO2_IN_WIDTH 1
-#define	GPIO1_IN_WIDTH 1
-#define GPIO0_IN_LBN (GPIO_INPUTS_LBN + 0)
-#define	GPIO0_IN_WIDTH 1
-
-/* Global control register */
-#define GLB_CTL_REG_KER	0x0220
-#define EXT_PHY_RST_CTL_LBN 63
-#define EXT_PHY_RST_CTL_WIDTH 1
-#define PCIE_SD_RST_CTL_LBN 61
-#define PCIE_SD_RST_CTL_WIDTH 1
-
-#define PCIE_NSTCK_RST_CTL_LBN 58
-#define PCIE_NSTCK_RST_CTL_WIDTH 1
-#define PCIE_CORE_RST_CTL_LBN 57
-#define PCIE_CORE_RST_CTL_WIDTH 1
-#define EE_RST_CTL_LBN 49
-#define EE_RST_CTL_WIDTH 1
-#define RST_XGRX_LBN 24
-#define RST_XGRX_WIDTH 1
-#define RST_XGTX_LBN 23
-#define RST_XGTX_WIDTH 1
-#define RST_EM_LBN 22
-#define RST_EM_WIDTH 1
-#define EXT_PHY_RST_DUR_LBN 1
-#define EXT_PHY_RST_DUR_WIDTH 3
-#define SWRST_LBN 0
-#define SWRST_WIDTH 1
-#define INCLUDE_IN_RESET 0
-#define EXCLUDE_FROM_RESET 1
-
-/* Fatal interrupt register */
-#define FATAL_INTR_REG_KER 0x0230
-#define RBUF_OWN_INT_KER_EN_LBN 39
-#define RBUF_OWN_INT_KER_EN_WIDTH 1
-#define TBUF_OWN_INT_KER_EN_LBN 38
-#define TBUF_OWN_INT_KER_EN_WIDTH 1
-#define ILL_ADR_INT_KER_EN_LBN 33
-#define ILL_ADR_INT_KER_EN_WIDTH 1
-#define MEM_PERR_INT_KER_LBN 8
-#define MEM_PERR_INT_KER_WIDTH 1
-#define INT_KER_ERROR_LBN 0
-#define INT_KER_ERROR_WIDTH 12
-
-#define DP_CTRL_REG 0x250
-#define FLS_EVQ_ID_LBN 0
-#define FLS_EVQ_ID_WIDTH 11
-
-#define MEM_STAT_REG_KER 0x260
-
-/* Debug probe register */
-#define DEBUG_BLK_SEL_MISC 7
-#define DEBUG_BLK_SEL_SERDES 6
-#define DEBUG_BLK_SEL_EM 5
-#define DEBUG_BLK_SEL_SR 4
-#define DEBUG_BLK_SEL_EV 3
-#define DEBUG_BLK_SEL_RX 2
-#define DEBUG_BLK_SEL_TX 1
-#define DEBUG_BLK_SEL_BIU 0
-
-/* FPGA build version */
-#define ALTERA_BUILD_REG_KER 0x0300
-#define VER_ALL_LBN 0
-#define VER_ALL_WIDTH 32
-
-/* Spare EEPROM bits register (flash 0x390) */
-#define SPARE_REG_KER 0x310
-#define MEM_PERR_EN_TX_DATA_LBN 72
-#define MEM_PERR_EN_TX_DATA_WIDTH 2
-
-/* Timer table for kernel access */
-#define TIMER_CMD_REG_KER 0x420
-#define TIMER_MODE_LBN 12
-#define TIMER_MODE_WIDTH 2
-#define TIMER_MODE_DIS 0
-#define TIMER_MODE_INT_HLDOFF 2
-#define TIMER_VAL_LBN 0
-#define TIMER_VAL_WIDTH 12
-
-/* Driver generated event register */
-#define DRV_EV_REG_KER 0x440
-#define DRV_EV_QID_LBN 64
-#define DRV_EV_QID_WIDTH 12
-#define DRV_EV_DATA_LBN 0
-#define DRV_EV_DATA_WIDTH 64
-
-/* Buffer table configuration register */
-#define BUF_TBL_CFG_REG_KER 0x600
-#define BUF_TBL_MODE_LBN 3
-#define BUF_TBL_MODE_WIDTH 1
-#define BUF_TBL_MODE_HALF 0
-#define BUF_TBL_MODE_FULL 1
-
-/* SRAM receive descriptor cache configuration register */
-#define SRM_RX_DC_CFG_REG_KER 0x610
-#define SRM_RX_DC_BASE_ADR_LBN 0
-#define SRM_RX_DC_BASE_ADR_WIDTH 21
-
-/* SRAM transmit descriptor cache configuration register */
-#define SRM_TX_DC_CFG_REG_KER 0x620
-#define SRM_TX_DC_BASE_ADR_LBN 0
-#define SRM_TX_DC_BASE_ADR_WIDTH 21
-
-/* SRAM configuration register */
-#define SRM_CFG_REG_KER 0x630
-#define SRAM_OOB_BT_INIT_EN_LBN 3
-#define SRAM_OOB_BT_INIT_EN_WIDTH 1
-#define SRM_NUM_BANKS_AND_BANK_SIZE_LBN 0
-#define SRM_NUM_BANKS_AND_BANK_SIZE_WIDTH 3
-#define SRM_NB_BSZ_1BANKS_2M 0
-#define SRM_NB_BSZ_1BANKS_4M 1
-#define SRM_NB_BSZ_1BANKS_8M 2
-#define SRM_NB_BSZ_DEFAULT 3 /* char driver will set the default */
-#define SRM_NB_BSZ_2BANKS_4M 4
-#define SRM_NB_BSZ_2BANKS_8M 5
-#define SRM_NB_BSZ_2BANKS_16M 6
-#define SRM_NB_BSZ_RESERVED 7
-
-/* Special buffer table update register */
-#define BUF_TBL_UPD_REG_KER 0x0650
-#define BUF_UPD_CMD_LBN 63
-#define BUF_UPD_CMD_WIDTH 1
-#define BUF_CLR_CMD_LBN 62
-#define BUF_CLR_CMD_WIDTH 1
-#define BUF_CLR_END_ID_LBN 32
-#define BUF_CLR_END_ID_WIDTH 20
-#define BUF_CLR_START_ID_LBN 0
-#define BUF_CLR_START_ID_WIDTH 20
-
-/* Receive configuration register */
-#define RX_CFG_REG_KER 0x800
-
-/* B0 */
-#define RX_INGR_EN_B0_LBN 47
-#define RX_INGR_EN_B0_WIDTH 1
-#define RX_DESC_PUSH_EN_B0_LBN 43
-#define RX_DESC_PUSH_EN_B0_WIDTH 1
-#define RX_XON_TX_TH_B0_LBN 33
-#define RX_XON_TX_TH_B0_WIDTH 5
-#define RX_XOFF_TX_TH_B0_LBN 28
-#define RX_XOFF_TX_TH_B0_WIDTH 5
-#define RX_USR_BUF_SIZE_B0_LBN 19
-#define RX_USR_BUF_SIZE_B0_WIDTH 9
-#define RX_XON_MAC_TH_B0_LBN 10
-#define RX_XON_MAC_TH_B0_WIDTH 9
-#define RX_XOFF_MAC_TH_B0_LBN 1
-#define RX_XOFF_MAC_TH_B0_WIDTH 9
-#define RX_XOFF_MAC_EN_B0_LBN 0
-#define RX_XOFF_MAC_EN_B0_WIDTH 1
-
-/* A1 */
-#define RX_DESC_PUSH_EN_A1_LBN 35
-#define RX_DESC_PUSH_EN_A1_WIDTH 1
-#define RX_XON_TX_TH_A1_LBN 25
-#define RX_XON_TX_TH_A1_WIDTH 5
-#define RX_XOFF_TX_TH_A1_LBN 20
-#define RX_XOFF_TX_TH_A1_WIDTH 5
-#define RX_USR_BUF_SIZE_A1_LBN 11
-#define RX_USR_BUF_SIZE_A1_WIDTH 9
-#define RX_XON_MAC_TH_A1_LBN 6
-#define RX_XON_MAC_TH_A1_WIDTH 5
-#define RX_XOFF_MAC_TH_A1_LBN 1
-#define RX_XOFF_MAC_TH_A1_WIDTH 5
-#define RX_XOFF_MAC_EN_A1_LBN 0
-#define RX_XOFF_MAC_EN_A1_WIDTH 1
-
-/* Receive filter control register */
-#define RX_FILTER_CTL_REG 0x810
-#define UDP_FULL_SRCH_LIMIT_LBN 32
-#define UDP_FULL_SRCH_LIMIT_WIDTH 8
-#define NUM_KER_LBN 24
-#define NUM_KER_WIDTH 2
-#define UDP_WILD_SRCH_LIMIT_LBN 16
-#define UDP_WILD_SRCH_LIMIT_WIDTH 8
-#define TCP_WILD_SRCH_LIMIT_LBN 8
-#define TCP_WILD_SRCH_LIMIT_WIDTH 8
-#define TCP_FULL_SRCH_LIMIT_LBN 0
-#define TCP_FULL_SRCH_LIMIT_WIDTH 8
-
-/* RX queue flush register */
-#define RX_FLUSH_DESCQ_REG_KER 0x0820
-#define RX_FLUSH_DESCQ_CMD_LBN 24
-#define RX_FLUSH_DESCQ_CMD_WIDTH 1
-#define RX_FLUSH_DESCQ_LBN 0
-#define RX_FLUSH_DESCQ_WIDTH 12
-
-/* Receive descriptor update register */
-#define RX_DESC_UPD_REG_KER_DWORD (0x830 + 12)
-#define RX_DESC_WPTR_DWORD_LBN 0
-#define RX_DESC_WPTR_DWORD_WIDTH 12
-
-/* Receive descriptor cache configuration register */
-#define RX_DC_CFG_REG_KER 0x840
-#define RX_DC_SIZE_LBN 0
-#define RX_DC_SIZE_WIDTH 2
-
-#define RX_DC_PF_WM_REG_KER 0x850
-#define RX_DC_PF_LWM_LBN 0
-#define RX_DC_PF_LWM_WIDTH 6
-
-/* RX no descriptor drop counter */
-#define RX_NODESC_DROP_REG_KER 0x880
-#define RX_NODESC_DROP_CNT_LBN 0
-#define RX_NODESC_DROP_CNT_WIDTH 16
-
-/* RX black magic register */
-#define RX_SELF_RST_REG_KER 0x890
-#define RX_ISCSI_DIS_LBN 17
-#define RX_ISCSI_DIS_WIDTH 1
-#define RX_NODESC_WAIT_DIS_LBN 9
-#define RX_NODESC_WAIT_DIS_WIDTH 1
-#define RX_RECOVERY_EN_LBN 8
-#define RX_RECOVERY_EN_WIDTH 1
-
-/* TX queue flush register */
-#define TX_FLUSH_DESCQ_REG_KER 0x0a00
-#define TX_FLUSH_DESCQ_CMD_LBN 12
-#define TX_FLUSH_DESCQ_CMD_WIDTH 1
-#define TX_FLUSH_DESCQ_LBN 0
-#define TX_FLUSH_DESCQ_WIDTH 12
-
-/* Transmit descriptor update register */
-#define TX_DESC_UPD_REG_KER_DWORD (0xa10 + 12)
-#define TX_DESC_WPTR_DWORD_LBN 0
-#define TX_DESC_WPTR_DWORD_WIDTH 12
-
-/* Transmit descriptor cache configuration register */
-#define TX_DC_CFG_REG_KER 0xa20
-#define TX_DC_SIZE_LBN 0
-#define TX_DC_SIZE_WIDTH 2
-
-/* Transmit checksum configuration register (A0/A1 only) */
-#define TX_CHKSM_CFG_REG_KER_A1 0xa30
-
-/* Transmit configuration register */
-#define TX_CFG_REG_KER 0xa50
-#define TX_NO_EOP_DISC_EN_LBN 5
-#define TX_NO_EOP_DISC_EN_WIDTH 1
-
-/* Transmit configuration register 2 */
-#define TX_CFG2_REG_KER 0xa80
-#define TX_CSR_PUSH_EN_LBN 89
-#define TX_CSR_PUSH_EN_WIDTH 1
-#define TX_RX_SPACER_LBN 64
-#define TX_RX_SPACER_WIDTH 8
-#define TX_SW_EV_EN_LBN 59
-#define TX_SW_EV_EN_WIDTH 1
-#define TX_RX_SPACER_EN_LBN 57
-#define TX_RX_SPACER_EN_WIDTH 1
-#define TX_PREF_THRESHOLD_LBN 19
-#define TX_PREF_THRESHOLD_WIDTH 2
-#define TX_ONE_PKT_PER_Q_LBN 18
-#define TX_ONE_PKT_PER_Q_WIDTH 1
-#define TX_DIS_NON_IP_EV_LBN 17
-#define TX_DIS_NON_IP_EV_WIDTH 1
-#define TX_FLUSH_MIN_LEN_EN_B0_LBN 7
-#define TX_FLUSH_MIN_LEN_EN_B0_WIDTH 1
-
-/* PHY management transmit data register */
-#define MD_TXD_REG_KER 0xc00
-#define MD_TXD_LBN 0
-#define MD_TXD_WIDTH 16
-
-/* PHY management receive data register */
-#define MD_RXD_REG_KER 0xc10
-#define MD_RXD_LBN 0
-#define MD_RXD_WIDTH 16
-
-/* PHY management configuration & status register */
-#define MD_CS_REG_KER 0xc20
-#define MD_GC_LBN 4
-#define MD_GC_WIDTH 1
-#define MD_RIC_LBN 2
-#define MD_RIC_WIDTH 1
-#define MD_RDC_LBN 1
-#define MD_RDC_WIDTH 1
-#define MD_WRC_LBN 0
-#define MD_WRC_WIDTH 1
-
-/* PHY management PHY address register */
-#define MD_PHY_ADR_REG_KER 0xc30
-#define MD_PHY_ADR_LBN 0
-#define MD_PHY_ADR_WIDTH 16
-
-/* PHY management ID register */
-#define MD_ID_REG_KER 0xc40
-#define MD_PRT_ADR_LBN 11
-#define MD_PRT_ADR_WIDTH 5
-#define MD_DEV_ADR_LBN 6
-#define MD_DEV_ADR_WIDTH 5
-
-/* PHY management status & mask register (DWORD read only) */
-#define MD_STAT_REG_KER 0xc50
-#define MD_BSERR_LBN 2
-#define MD_BSERR_WIDTH 1
-#define MD_LNFL_LBN 1
-#define MD_LNFL_WIDTH 1
-#define MD_BSY_LBN 0
-#define MD_BSY_WIDTH 1
-
-/* Port 0 and 1 MAC stats registers */
-#define MAC0_STAT_DMA_REG_KER 0xc60
-#define MAC_STAT_DMA_CMD_LBN 48
-#define MAC_STAT_DMA_CMD_WIDTH 1
-#define MAC_STAT_DMA_ADR_LBN 0
-#define MAC_STAT_DMA_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
-
-/* Port 0 and 1 MAC control registers */
-#define MAC0_CTRL_REG_KER 0xc80
-#define MAC_XOFF_VAL_LBN 16
-#define MAC_XOFF_VAL_WIDTH 16
-#define TXFIFO_DRAIN_EN_B0_LBN 7
-#define TXFIFO_DRAIN_EN_B0_WIDTH 1
-#define MAC_BCAD_ACPT_LBN 4
-#define MAC_BCAD_ACPT_WIDTH 1
-#define MAC_UC_PROM_LBN 3
-#define MAC_UC_PROM_WIDTH 1
-#define MAC_LINK_STATUS_LBN 2
-#define MAC_LINK_STATUS_WIDTH 1
-#define MAC_SPEED_LBN 0
-#define MAC_SPEED_WIDTH 2
-
-/* 10G XAUI XGXS default values */
-#define XX_TXDRV_DEQ_DEFAULT 0xe /* deq=.6 */
-#define XX_TXDRV_DTX_DEFAULT 0x5 /* 1.25 */
-#define XX_SD_CTL_DRV_DEFAULT 0  /* 20mA */
-
-/* Multicast address hash table */
-#define MAC_MCAST_HASH_REG0_KER 0xca0
-#define MAC_MCAST_HASH_REG1_KER 0xcb0
-
-/* GMAC configuration register 1 */
-#define GM_CFG1_REG 0xe00
-#define GM_SW_RST_LBN 31
-#define GM_SW_RST_WIDTH 1
-#define GM_LOOP_LBN 8
-#define GM_LOOP_WIDTH 1
-#define GM_RX_FC_EN_LBN 5
-#define GM_RX_FC_EN_WIDTH 1
-#define GM_TX_FC_EN_LBN 4
-#define GM_TX_FC_EN_WIDTH 1
-#define GM_RX_EN_LBN 2
-#define GM_RX_EN_WIDTH 1
-#define GM_TX_EN_LBN 0
-#define GM_TX_EN_WIDTH 1
-
-/* GMAC configuration register 2 */
-#define GM_CFG2_REG 0xe10
-#define GM_PAMBL_LEN_LBN 12
-#define GM_PAMBL_LEN_WIDTH 4
-#define GM_IF_MODE_LBN 8
-#define GM_IF_MODE_WIDTH 2
-#define GM_LEN_CHK_LBN 4
-#define GM_LEN_CHK_WIDTH 1
-#define GM_PAD_CRC_EN_LBN 2
-#define GM_PAD_CRC_EN_WIDTH 1
-#define GM_FD_LBN 0
-#define GM_FD_WIDTH 1
-
-/* GMAC maximum frame length register */
-#define GM_MAX_FLEN_REG 0xe40
-#define GM_MAX_FLEN_LBN 0
-#define GM_MAX_FLEN_WIDTH 16
-
-/* GMAC station address register 1 */
-#define GM_ADR1_REG 0xf00
-#define GM_HWADDR_5_LBN 24
-#define GM_HWADDR_5_WIDTH 8
-#define GM_HWADDR_4_LBN 16
-#define GM_HWADDR_4_WIDTH 8
-#define GM_HWADDR_3_LBN 8
-#define GM_HWADDR_3_WIDTH 8
-#define GM_HWADDR_2_LBN 0
-#define GM_HWADDR_2_WIDTH 8
-
-/* GMAC station address register 2 */
-#define GM_ADR2_REG 0xf10
-#define GM_HWADDR_1_LBN 24
-#define GM_HWADDR_1_WIDTH 8
-#define GM_HWADDR_0_LBN 16
-#define GM_HWADDR_0_WIDTH 8
-
-/* GMAC FIFO configuration register 0 */
-#define GMF_CFG0_REG 0xf20
-#define GMF_FTFENREQ_LBN 12
-#define GMF_FTFENREQ_WIDTH 1
-#define GMF_STFENREQ_LBN 11
-#define GMF_STFENREQ_WIDTH 1
-#define GMF_FRFENREQ_LBN 10
-#define GMF_FRFENREQ_WIDTH 1
-#define GMF_SRFENREQ_LBN 9
-#define GMF_SRFENREQ_WIDTH 1
-#define GMF_WTMENREQ_LBN 8
-#define GMF_WTMENREQ_WIDTH 1
-
-/* GMAC FIFO configuration register 1 */
-#define GMF_CFG1_REG 0xf30
-#define GMF_CFGFRTH_LBN 16
-#define GMF_CFGFRTH_WIDTH 5
-#define GMF_CFGXOFFRTX_LBN 0
-#define GMF_CFGXOFFRTX_WIDTH 16
-
-/* GMAC FIFO configuration register 2 */
-#define GMF_CFG2_REG 0xf40
-#define GMF_CFGHWM_LBN 16
-#define GMF_CFGHWM_WIDTH 6
-#define GMF_CFGLWM_LBN 0
-#define GMF_CFGLWM_WIDTH 6
-
-/* GMAC FIFO configuration register 3 */
-#define GMF_CFG3_REG 0xf50
-#define GMF_CFGHWMFT_LBN 16
-#define GMF_CFGHWMFT_WIDTH 6
-#define GMF_CFGFTTH_LBN 0
-#define GMF_CFGFTTH_WIDTH 6
-
-/* GMAC FIFO configuration register 4 */
-#define GMF_CFG4_REG 0xf60
-#define GMF_HSTFLTRFRM_PAUSE_LBN 12
-#define GMF_HSTFLTRFRM_PAUSE_WIDTH 12
-
-/* GMAC FIFO configuration register 5 */
-#define GMF_CFG5_REG 0xf70
-#define GMF_CFGHDPLX_LBN 22
-#define GMF_CFGHDPLX_WIDTH 1
-#define GMF_CFGBYTMODE_LBN 19
-#define GMF_CFGBYTMODE_WIDTH 1
-#define GMF_HSTDRPLT64_LBN 18
-#define GMF_HSTDRPLT64_WIDTH 1
-#define GMF_HSTFLTRFRMDC_PAUSE_LBN 12
-#define GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1
-
-/* XGMAC address register low */
-#define XM_ADR_LO_REG 0x1200
-#define XM_ADR_3_LBN 24
-#define XM_ADR_3_WIDTH 8
-#define XM_ADR_2_LBN 16
-#define XM_ADR_2_WIDTH 8
-#define XM_ADR_1_LBN 8
-#define XM_ADR_1_WIDTH 8
-#define XM_ADR_0_LBN 0
-#define XM_ADR_0_WIDTH 8
-
-/* XGMAC address register high */
-#define XM_ADR_HI_REG 0x1210
-#define XM_ADR_5_LBN 8
-#define XM_ADR_5_WIDTH 8
-#define XM_ADR_4_LBN 0
-#define XM_ADR_4_WIDTH 8
-
-/* XGMAC global configuration */
-#define XM_GLB_CFG_REG 0x1220
-#define XM_RX_STAT_EN_LBN 11
-#define XM_RX_STAT_EN_WIDTH 1
-#define XM_TX_STAT_EN_LBN 10
-#define XM_TX_STAT_EN_WIDTH 1
-#define XM_RX_JUMBO_MODE_LBN 6
-#define XM_RX_JUMBO_MODE_WIDTH 1
-#define XM_INTCLR_MODE_LBN 3
-#define XM_INTCLR_MODE_WIDTH 1
-#define XM_CORE_RST_LBN 0
-#define XM_CORE_RST_WIDTH 1
-
-/* XGMAC transmit configuration */
-#define XM_TX_CFG_REG 0x1230
-#define XM_IPG_LBN 16
-#define XM_IPG_WIDTH 4
-#define XM_FCNTL_LBN 10
-#define XM_FCNTL_WIDTH 1
-#define XM_TXCRC_LBN 8
-#define XM_TXCRC_WIDTH 1
-#define XM_AUTO_PAD_LBN 5
-#define XM_AUTO_PAD_WIDTH 1
-#define XM_TX_PRMBL_LBN 2
-#define XM_TX_PRMBL_WIDTH 1
-#define XM_TXEN_LBN 1
-#define XM_TXEN_WIDTH 1
-
-/* XGMAC receive configuration */
-#define XM_RX_CFG_REG 0x1240
-#define XM_PASS_CRC_ERR_LBN 25
-#define XM_PASS_CRC_ERR_WIDTH 1
-#define XM_ACPT_ALL_MCAST_LBN 11
-#define XM_ACPT_ALL_MCAST_WIDTH 1
-#define XM_ACPT_ALL_UCAST_LBN 9
-#define XM_ACPT_ALL_UCAST_WIDTH 1
-#define XM_AUTO_DEPAD_LBN 8
-#define XM_AUTO_DEPAD_WIDTH 1
-#define XM_RXEN_LBN 1
-#define XM_RXEN_WIDTH 1
-
-/* XGMAC management interrupt mask register */
-#define XM_MGT_INT_MSK_REG_B0 0x1250
-#define XM_MSK_PRMBLE_ERR_LBN 2
-#define XM_MSK_PRMBLE_ERR_WIDTH 1
-#define XM_MSK_RMTFLT_LBN 1
-#define XM_MSK_RMTFLT_WIDTH 1
-#define XM_MSK_LCLFLT_LBN 0
-#define XM_MSK_LCLFLT_WIDTH 1
-
-/* XGMAC flow control register */
-#define XM_FC_REG 0x1270
-#define XM_PAUSE_TIME_LBN 16
-#define XM_PAUSE_TIME_WIDTH 16
-#define XM_DIS_FCNTL_LBN 0
-#define XM_DIS_FCNTL_WIDTH 1
-
-/* XGMAC pause time count register */
-#define XM_PAUSE_TIME_REG 0x1290
-
-/* XGMAC transmit parameter register */
-#define XM_TX_PARAM_REG 0x012d0
-#define XM_TX_JUMBO_MODE_LBN 31
-#define XM_TX_JUMBO_MODE_WIDTH 1
-#define XM_MAX_TX_FRM_SIZE_LBN 16
-#define XM_MAX_TX_FRM_SIZE_WIDTH 14
-
-/* XGMAC receive parameter register */
-#define XM_RX_PARAM_REG 0x12e0
-#define XM_MAX_RX_FRM_SIZE_LBN 0
-#define XM_MAX_RX_FRM_SIZE_WIDTH 14
-
-/* XGMAC management interrupt status register */
-#define XM_MGT_INT_REG_B0 0x12f0
-#define XM_PRMBLE_ERR 2
-#define XM_PRMBLE_WIDTH 1
-#define XM_RMTFLT_LBN 1
-#define XM_RMTFLT_WIDTH 1
-#define XM_LCLFLT_LBN 0
-#define XM_LCLFLT_WIDTH 1
-
-/* XGXS/XAUI powerdown/reset register */
-#define XX_PWR_RST_REG 0x1300
-
-#define XX_SD_RST_ACT_LBN 16
-#define XX_SD_RST_ACT_WIDTH 1
-#define XX_PWRDND_EN_LBN 15
-#define XX_PWRDND_EN_WIDTH 1
-#define XX_PWRDNC_EN_LBN 14
-#define XX_PWRDNC_EN_WIDTH 1
-#define XX_PWRDNB_EN_LBN 13
-#define XX_PWRDNB_EN_WIDTH 1
-#define XX_PWRDNA_EN_LBN 12
-#define XX_PWRDNA_EN_WIDTH 1
-#define XX_RSTPLLCD_EN_LBN 9
-#define XX_RSTPLLCD_EN_WIDTH 1
-#define XX_RSTPLLAB_EN_LBN 8
-#define XX_RSTPLLAB_EN_WIDTH 1
-#define XX_RESETD_EN_LBN 7
-#define XX_RESETD_EN_WIDTH 1
-#define XX_RESETC_EN_LBN 6
-#define XX_RESETC_EN_WIDTH 1
-#define XX_RESETB_EN_LBN 5
-#define XX_RESETB_EN_WIDTH 1
-#define XX_RESETA_EN_LBN 4
-#define XX_RESETA_EN_WIDTH 1
-#define XX_RSTXGXSRX_EN_LBN 2
-#define XX_RSTXGXSRX_EN_WIDTH 1
-#define XX_RSTXGXSTX_EN_LBN 1
-#define XX_RSTXGXSTX_EN_WIDTH 1
-#define XX_RST_XX_EN_LBN 0
-#define XX_RST_XX_EN_WIDTH 1
-
-/* XGXS/XAUI powerdown/reset control register */
-#define XX_SD_CTL_REG 0x1310
-#define XX_HIDRVD_LBN 15
-#define XX_HIDRVD_WIDTH 1
-#define XX_LODRVD_LBN 14
-#define XX_LODRVD_WIDTH 1
-#define XX_HIDRVC_LBN 13
-#define XX_HIDRVC_WIDTH 1
-#define XX_LODRVC_LBN 12
-#define XX_LODRVC_WIDTH 1
-#define XX_HIDRVB_LBN 11
-#define XX_HIDRVB_WIDTH 1
-#define XX_LODRVB_LBN 10
-#define XX_LODRVB_WIDTH 1
-#define XX_HIDRVA_LBN 9
-#define XX_HIDRVA_WIDTH 1
-#define XX_LODRVA_LBN 8
-#define XX_LODRVA_WIDTH 1
-#define XX_LPBKD_LBN 3
-#define XX_LPBKD_WIDTH 1
-#define XX_LPBKC_LBN 2
-#define XX_LPBKC_WIDTH 1
-#define XX_LPBKB_LBN 1
-#define XX_LPBKB_WIDTH 1
-#define XX_LPBKA_LBN 0
-#define XX_LPBKA_WIDTH 1
-
-#define XX_TXDRV_CTL_REG 0x1320
-#define XX_DEQD_LBN 28
-#define XX_DEQD_WIDTH 4
-#define XX_DEQC_LBN 24
-#define XX_DEQC_WIDTH 4
-#define XX_DEQB_LBN 20
-#define XX_DEQB_WIDTH 4
-#define XX_DEQA_LBN 16
-#define XX_DEQA_WIDTH 4
-#define XX_DTXD_LBN 12
-#define XX_DTXD_WIDTH 4
-#define XX_DTXC_LBN 8
-#define XX_DTXC_WIDTH 4
-#define XX_DTXB_LBN 4
-#define XX_DTXB_WIDTH 4
-#define XX_DTXA_LBN 0
-#define XX_DTXA_WIDTH 4
-
-/* XAUI XGXS core status register */
-#define XX_CORE_STAT_REG 0x1360
-#define XX_FORCE_SIG_LBN 24
-#define XX_FORCE_SIG_WIDTH 8
-#define XX_FORCE_SIG_DECODE_FORCED 0xff
-#define XX_XGXS_LB_EN_LBN 23
-#define XX_XGXS_LB_EN_WIDTH 1
-#define XX_XGMII_LB_EN_LBN 22
-#define XX_XGMII_LB_EN_WIDTH 1
-#define XX_ALIGN_DONE_LBN 20
-#define XX_ALIGN_DONE_WIDTH 1
-#define XX_SYNC_STAT_LBN 16
-#define XX_SYNC_STAT_WIDTH 4
-#define XX_SYNC_STAT_DECODE_SYNCED 0xf
-#define XX_COMMA_DET_LBN 12
-#define XX_COMMA_DET_WIDTH 4
-#define XX_COMMA_DET_DECODE_DETECTED 0xf
-#define XX_COMMA_DET_RESET 0xf
-#define XX_CHARERR_LBN 4
-#define XX_CHARERR_WIDTH 4
-#define XX_CHARERR_RESET 0xf
-#define XX_DISPERR_LBN 0
-#define XX_DISPERR_WIDTH 4
-#define XX_DISPERR_RESET 0xf
-
-/* Receive filter table */
-#define RX_FILTER_TBL0 0xF00000
-
-/* Receive descriptor pointer table */
-#define RX_DESC_PTR_TBL_KER_A1 0x11800
-#define RX_DESC_PTR_TBL_KER_B0 0xF40000
-#define RX_DESC_PTR_TBL_KER_P0 0x900
-#define RX_ISCSI_DDIG_EN_LBN 88
-#define RX_ISCSI_DDIG_EN_WIDTH 1
-#define RX_ISCSI_HDIG_EN_LBN 87
-#define RX_ISCSI_HDIG_EN_WIDTH 1
-#define RX_DESCQ_BUF_BASE_ID_LBN 36
-#define RX_DESCQ_BUF_BASE_ID_WIDTH 20
-#define RX_DESCQ_EVQ_ID_LBN 24
-#define RX_DESCQ_EVQ_ID_WIDTH 12
-#define RX_DESCQ_OWNER_ID_LBN 10
-#define RX_DESCQ_OWNER_ID_WIDTH 14
-#define RX_DESCQ_LABEL_LBN 5
-#define RX_DESCQ_LABEL_WIDTH 5
-#define RX_DESCQ_SIZE_LBN 3
-#define RX_DESCQ_SIZE_WIDTH 2
-#define RX_DESCQ_SIZE_4K 3
-#define RX_DESCQ_SIZE_2K 2
-#define RX_DESCQ_SIZE_1K 1
-#define RX_DESCQ_SIZE_512 0
-#define RX_DESCQ_TYPE_LBN 2
-#define RX_DESCQ_TYPE_WIDTH 1
-#define RX_DESCQ_JUMBO_LBN 1
-#define RX_DESCQ_JUMBO_WIDTH 1
-#define RX_DESCQ_EN_LBN 0
-#define RX_DESCQ_EN_WIDTH 1
-
-/* Transmit descriptor pointer table */
-#define TX_DESC_PTR_TBL_KER_A1 0x11900
-#define TX_DESC_PTR_TBL_KER_B0 0xF50000
-#define TX_DESC_PTR_TBL_KER_P0 0xa40
-#define TX_NON_IP_DROP_DIS_B0_LBN 91
-#define TX_NON_IP_DROP_DIS_B0_WIDTH 1
-#define TX_IP_CHKSM_DIS_B0_LBN 90
-#define TX_IP_CHKSM_DIS_B0_WIDTH 1
-#define TX_TCP_CHKSM_DIS_B0_LBN 89
-#define TX_TCP_CHKSM_DIS_B0_WIDTH 1
-#define TX_DESCQ_EN_LBN 88
-#define TX_DESCQ_EN_WIDTH 1
-#define TX_ISCSI_DDIG_EN_LBN 87
-#define TX_ISCSI_DDIG_EN_WIDTH 1
-#define TX_ISCSI_HDIG_EN_LBN 86
-#define TX_ISCSI_HDIG_EN_WIDTH 1
-#define TX_DESCQ_BUF_BASE_ID_LBN 36
-#define TX_DESCQ_BUF_BASE_ID_WIDTH 20
-#define TX_DESCQ_EVQ_ID_LBN 24
-#define TX_DESCQ_EVQ_ID_WIDTH 12
-#define TX_DESCQ_OWNER_ID_LBN 10
-#define TX_DESCQ_OWNER_ID_WIDTH 14
-#define TX_DESCQ_LABEL_LBN 5
-#define TX_DESCQ_LABEL_WIDTH 5
-#define TX_DESCQ_SIZE_LBN 3
-#define TX_DESCQ_SIZE_WIDTH 2
-#define TX_DESCQ_SIZE_4K 3
-#define TX_DESCQ_SIZE_2K 2
-#define TX_DESCQ_SIZE_1K 1
-#define TX_DESCQ_SIZE_512 0
-#define TX_DESCQ_TYPE_LBN 1
-#define TX_DESCQ_TYPE_WIDTH 2
-
-/* Event queue pointer */
-#define EVQ_PTR_TBL_KER_A1 0x11a00
-#define EVQ_PTR_TBL_KER_B0 0xf60000
-#define EVQ_PTR_TBL_KER_P0 0x500
-#define EVQ_EN_LBN 23
-#define EVQ_EN_WIDTH 1
-#define EVQ_SIZE_LBN 20
-#define EVQ_SIZE_WIDTH 3
-#define EVQ_SIZE_32K 6
-#define EVQ_SIZE_16K 5
-#define EVQ_SIZE_8K 4
-#define EVQ_SIZE_4K 3
-#define EVQ_SIZE_2K 2
-#define EVQ_SIZE_1K 1
-#define EVQ_SIZE_512 0
-#define EVQ_BUF_BASE_ID_LBN 0
-#define EVQ_BUF_BASE_ID_WIDTH 20
-
-/* Event queue read pointer */
-#define EVQ_RPTR_REG_KER_A1 0x11b00
-#define EVQ_RPTR_REG_KER_B0 0xfa0000
-#define EVQ_RPTR_REG_KER_DWORD (EVQ_RPTR_REG_KER + 0)
-#define EVQ_RPTR_DWORD_LBN 0
-#define EVQ_RPTR_DWORD_WIDTH 14
-
-/* RSS indirection table */
-#define RX_RSS_INDIR_TBL_B0 0xFB0000
-#define RX_RSS_INDIR_ENT_B0_LBN 0
-#define RX_RSS_INDIR_ENT_B0_WIDTH 6
-
-/* Special buffer descriptors (full-mode) */
-#define BUF_FULL_TBL_KER_A1 0x8000
-#define BUF_FULL_TBL_KER_B0 0x800000
-#define IP_DAT_BUF_SIZE_LBN 50
-#define IP_DAT_BUF_SIZE_WIDTH 1
-#define IP_DAT_BUF_SIZE_8K 1
-#define IP_DAT_BUF_SIZE_4K 0
-#define BUF_ADR_REGION_LBN 48
-#define BUF_ADR_REGION_WIDTH 2
-#define BUF_ADR_FBUF_LBN 14
-#define BUF_ADR_FBUF_WIDTH 34
-#define BUF_OWNER_ID_FBUF_LBN 0
-#define BUF_OWNER_ID_FBUF_WIDTH 14
-
-/* Transmit descriptor */
-#define TX_KER_PORT_LBN 63
-#define TX_KER_PORT_WIDTH 1
-#define TX_KER_CONT_LBN 62
-#define TX_KER_CONT_WIDTH 1
-#define TX_KER_BYTE_CNT_LBN 48
-#define TX_KER_BYTE_CNT_WIDTH 14
-#define TX_KER_BUF_REGION_LBN 46
-#define TX_KER_BUF_REGION_WIDTH 2
-#define TX_KER_BUF_REGION0_DECODE 0
-#define TX_KER_BUF_REGION1_DECODE 1
-#define TX_KER_BUF_REGION2_DECODE 2
-#define TX_KER_BUF_REGION3_DECODE 3
-#define TX_KER_BUF_ADR_LBN 0
-#define TX_KER_BUF_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
-
-/* Receive descriptor */
-#define RX_KER_BUF_SIZE_LBN 48
-#define RX_KER_BUF_SIZE_WIDTH 14
-#define RX_KER_BUF_REGION_LBN 46
-#define RX_KER_BUF_REGION_WIDTH 2
-#define RX_KER_BUF_REGION0_DECODE 0
-#define RX_KER_BUF_REGION1_DECODE 1
-#define RX_KER_BUF_REGION2_DECODE 2
-#define RX_KER_BUF_REGION3_DECODE 3
-#define RX_KER_BUF_ADR_LBN 0
-#define RX_KER_BUF_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
-
-/**************************************************************************
- *
- * Falcon events
- *
- **************************************************************************
- */
-
-/* Event queue entries */
-#define EV_CODE_LBN 60
-#define EV_CODE_WIDTH 4
-#define RX_IP_EV_DECODE 0
-#define TX_IP_EV_DECODE 2
-#define DRIVER_EV_DECODE 5
-#define GLOBAL_EV_DECODE 6
-#define DRV_GEN_EV_DECODE 7
-#define WHOLE_EVENT_LBN 0
-#define WHOLE_EVENT_WIDTH 64
-
-/* Receive events */
-#define RX_EV_PKT_OK_LBN 56
-#define RX_EV_PKT_OK_WIDTH 1
-#define RX_EV_PAUSE_FRM_ERR_LBN 55
-#define RX_EV_PAUSE_FRM_ERR_WIDTH 1
-#define RX_EV_BUF_OWNER_ID_ERR_LBN 54
-#define RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
-#define RX_EV_IF_FRAG_ERR_LBN 53
-#define RX_EV_IF_FRAG_ERR_WIDTH 1
-#define RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
-#define RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
-#define RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
-#define RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
-#define RX_EV_ETH_CRC_ERR_LBN 50
-#define RX_EV_ETH_CRC_ERR_WIDTH 1
-#define RX_EV_FRM_TRUNC_LBN 49
-#define RX_EV_FRM_TRUNC_WIDTH 1
-#define RX_EV_DRIB_NIB_LBN 48
-#define RX_EV_DRIB_NIB_WIDTH 1
-#define RX_EV_TOBE_DISC_LBN 47
-#define RX_EV_TOBE_DISC_WIDTH 1
-#define RX_EV_PKT_TYPE_LBN 44
-#define RX_EV_PKT_TYPE_WIDTH 3
-#define RX_EV_PKT_TYPE_ETH_DECODE 0
-#define RX_EV_PKT_TYPE_LLC_DECODE 1
-#define RX_EV_PKT_TYPE_JUMBO_DECODE 2
-#define RX_EV_PKT_TYPE_VLAN_DECODE 3
-#define RX_EV_PKT_TYPE_VLAN_LLC_DECODE 4
-#define RX_EV_PKT_TYPE_VLAN_JUMBO_DECODE 5
-#define RX_EV_HDR_TYPE_LBN 42
-#define RX_EV_HDR_TYPE_WIDTH 2
-#define RX_EV_HDR_TYPE_TCP_IPV4_DECODE 0
-#define RX_EV_HDR_TYPE_UDP_IPV4_DECODE 1
-#define RX_EV_HDR_TYPE_OTHER_IP_DECODE 2
-#define RX_EV_HDR_TYPE_NON_IP_DECODE 3
-#define RX_EV_HDR_TYPE_HAS_CHECKSUMS(hdr_type) \
-	((hdr_type) <= RX_EV_HDR_TYPE_UDP_IPV4_DECODE)
-#define RX_EV_MCAST_HASH_MATCH_LBN 40
-#define RX_EV_MCAST_HASH_MATCH_WIDTH 1
-#define RX_EV_MCAST_PKT_LBN 39
-#define RX_EV_MCAST_PKT_WIDTH 1
-#define RX_EV_Q_LABEL_LBN 32
-#define RX_EV_Q_LABEL_WIDTH 5
-#define RX_EV_JUMBO_CONT_LBN 31
-#define RX_EV_JUMBO_CONT_WIDTH 1
-#define RX_EV_BYTE_CNT_LBN 16
-#define RX_EV_BYTE_CNT_WIDTH 14
-#define RX_EV_SOP_LBN 15
-#define RX_EV_SOP_WIDTH 1
-#define RX_EV_DESC_PTR_LBN 0
-#define RX_EV_DESC_PTR_WIDTH 12
-
-/* Transmit events */
-#define TX_EV_PKT_ERR_LBN 38
-#define TX_EV_PKT_ERR_WIDTH 1
-#define TX_EV_Q_LABEL_LBN 32
-#define TX_EV_Q_LABEL_WIDTH 5
-#define TX_EV_WQ_FF_FULL_LBN 15
-#define TX_EV_WQ_FF_FULL_WIDTH 1
-#define TX_EV_COMP_LBN 12
-#define TX_EV_COMP_WIDTH 1
-#define TX_EV_DESC_PTR_LBN 0
-#define TX_EV_DESC_PTR_WIDTH 12
-
-/* Driver events */
-#define DRIVER_EV_SUB_CODE_LBN 56
-#define DRIVER_EV_SUB_CODE_WIDTH 4
-#define DRIVER_EV_SUB_DATA_LBN 0
-#define DRIVER_EV_SUB_DATA_WIDTH 14
-#define TX_DESCQ_FLS_DONE_EV_DECODE 0
-#define RX_DESCQ_FLS_DONE_EV_DECODE 1
-#define EVQ_INIT_DONE_EV_DECODE 2
-#define EVQ_NOT_EN_EV_DECODE 3
-#define RX_DESCQ_FLSFF_OVFL_EV_DECODE 4
-#define SRM_UPD_DONE_EV_DECODE 5
-#define WAKE_UP_EV_DECODE 6
-#define TX_PKT_NON_TCP_UDP_DECODE 9
-#define TIMER_EV_DECODE 10
-#define RX_RECOVERY_EV_DECODE 11
-#define RX_DSC_ERROR_EV_DECODE 14
-#define TX_DSC_ERROR_EV_DECODE 15
-#define DRIVER_EV_TX_DESCQ_ID_LBN 0
-#define DRIVER_EV_TX_DESCQ_ID_WIDTH 12
-#define DRIVER_EV_RX_FLUSH_FAIL_LBN 12
-#define DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
-#define DRIVER_EV_RX_DESCQ_ID_LBN 0
-#define DRIVER_EV_RX_DESCQ_ID_WIDTH 12
-#define SRM_CLR_EV_DECODE 0
-#define SRM_UPD_EV_DECODE 1
-#define SRM_ILLCLR_EV_DECODE 2
-
-/* Global events */
-#define RX_RECOVERY_B0_LBN 12
-#define RX_RECOVERY_B0_WIDTH 1
-#define XG_MNT_INTR_B0_LBN 11
-#define XG_MNT_INTR_B0_WIDTH 1
-#define RX_RECOVERY_A1_LBN 11
-#define RX_RECOVERY_A1_WIDTH 1
-#define XFP_PHY_INTR_LBN 10
-#define XFP_PHY_INTR_WIDTH 1
-#define XG_PHY_INTR_LBN 9
-#define XG_PHY_INTR_WIDTH 1
-#define G_PHY1_INTR_LBN 8
-#define G_PHY1_INTR_WIDTH 1
-#define G_PHY0_INTR_LBN 7
-#define G_PHY0_INTR_WIDTH 1
-
-/* Driver-generated test events */
-#define EVQ_MAGIC_LBN 0
-#define EVQ_MAGIC_WIDTH 32
-
-/**************************************************************************
- *
- * Falcon MAC stats
- *
- **************************************************************************
- *
- */
-
-#define GRxGoodOct_offset 0x0
-#define GRxGoodOct_WIDTH 48
-#define GRxBadOct_offset 0x8
-#define GRxBadOct_WIDTH 48
-#define GRxMissPkt_offset 0x10
-#define GRxMissPkt_WIDTH 32
-#define GRxFalseCRS_offset 0x14
-#define GRxFalseCRS_WIDTH 32
-#define GRxPausePkt_offset 0x18
-#define GRxPausePkt_WIDTH 32
-#define GRxBadPkt_offset 0x1C
-#define GRxBadPkt_WIDTH 32
-#define GRxUcastPkt_offset 0x20
-#define GRxUcastPkt_WIDTH 32
-#define GRxMcastPkt_offset 0x24
-#define GRxMcastPkt_WIDTH 32
-#define GRxBcastPkt_offset 0x28
-#define GRxBcastPkt_WIDTH 32
-#define GRxGoodLt64Pkt_offset 0x2C
-#define GRxGoodLt64Pkt_WIDTH 32
-#define GRxBadLt64Pkt_offset 0x30
-#define GRxBadLt64Pkt_WIDTH 32
-#define GRx64Pkt_offset 0x34
-#define GRx64Pkt_WIDTH 32
-#define GRx65to127Pkt_offset 0x38
-#define GRx65to127Pkt_WIDTH 32
-#define GRx128to255Pkt_offset 0x3C
-#define GRx128to255Pkt_WIDTH 32
-#define GRx256to511Pkt_offset 0x40
-#define GRx256to511Pkt_WIDTH 32
-#define GRx512to1023Pkt_offset 0x44
-#define GRx512to1023Pkt_WIDTH 32
-#define GRx1024to15xxPkt_offset 0x48
-#define GRx1024to15xxPkt_WIDTH 32
-#define GRx15xxtoJumboPkt_offset 0x4C
-#define GRx15xxtoJumboPkt_WIDTH 32
-#define GRxGtJumboPkt_offset 0x50
-#define GRxGtJumboPkt_WIDTH 32
-#define GRxFcsErr64to15xxPkt_offset 0x54
-#define GRxFcsErr64to15xxPkt_WIDTH 32
-#define GRxFcsErr15xxtoJumboPkt_offset 0x58
-#define GRxFcsErr15xxtoJumboPkt_WIDTH 32
-#define GRxFcsErrGtJumboPkt_offset 0x5C
-#define GRxFcsErrGtJumboPkt_WIDTH 32
-#define GTxGoodBadOct_offset 0x80
-#define GTxGoodBadOct_WIDTH 48
-#define GTxGoodOct_offset 0x88
-#define GTxGoodOct_WIDTH 48
-#define GTxSglColPkt_offset 0x90
-#define GTxSglColPkt_WIDTH 32
-#define GTxMultColPkt_offset 0x94
-#define GTxMultColPkt_WIDTH 32
-#define GTxExColPkt_offset 0x98
-#define GTxExColPkt_WIDTH 32
-#define GTxDefPkt_offset 0x9C
-#define GTxDefPkt_WIDTH 32
-#define GTxLateCol_offset 0xA0
-#define GTxLateCol_WIDTH 32
-#define GTxExDefPkt_offset 0xA4
-#define GTxExDefPkt_WIDTH 32
-#define GTxPausePkt_offset 0xA8
-#define GTxPausePkt_WIDTH 32
-#define GTxBadPkt_offset 0xAC
-#define GTxBadPkt_WIDTH 32
-#define GTxUcastPkt_offset 0xB0
-#define GTxUcastPkt_WIDTH 32
-#define GTxMcastPkt_offset 0xB4
-#define GTxMcastPkt_WIDTH 32
-#define GTxBcastPkt_offset 0xB8
-#define GTxBcastPkt_WIDTH 32
-#define GTxLt64Pkt_offset 0xBC
-#define GTxLt64Pkt_WIDTH 32
-#define GTx64Pkt_offset 0xC0
-#define GTx64Pkt_WIDTH 32
-#define GTx65to127Pkt_offset 0xC4
-#define GTx65to127Pkt_WIDTH 32
-#define GTx128to255Pkt_offset 0xC8
-#define GTx128to255Pkt_WIDTH 32
-#define GTx256to511Pkt_offset 0xCC
-#define GTx256to511Pkt_WIDTH 32
-#define GTx512to1023Pkt_offset 0xD0
-#define GTx512to1023Pkt_WIDTH 32
-#define GTx1024to15xxPkt_offset 0xD4
-#define GTx1024to15xxPkt_WIDTH 32
-#define GTx15xxtoJumboPkt_offset 0xD8
-#define GTx15xxtoJumboPkt_WIDTH 32
-#define GTxGtJumboPkt_offset 0xDC
-#define GTxGtJumboPkt_WIDTH 32
-#define GTxNonTcpUdpPkt_offset 0xE0
-#define GTxNonTcpUdpPkt_WIDTH 16
-#define GTxMacSrcErrPkt_offset 0xE4
-#define GTxMacSrcErrPkt_WIDTH 16
-#define GTxIpSrcErrPkt_offset 0xE8
-#define GTxIpSrcErrPkt_WIDTH 16
-#define GDmaDone_offset 0xEC
-#define GDmaDone_WIDTH 32
-
-#define XgRxOctets_offset 0x0
-#define XgRxOctets_WIDTH 48
-#define XgRxOctetsOK_offset 0x8
-#define XgRxOctetsOK_WIDTH 48
-#define XgRxPkts_offset 0x10
-#define XgRxPkts_WIDTH 32
-#define XgRxPktsOK_offset 0x14
-#define XgRxPktsOK_WIDTH 32
-#define XgRxBroadcastPkts_offset 0x18
-#define XgRxBroadcastPkts_WIDTH 32
-#define XgRxMulticastPkts_offset 0x1C
-#define XgRxMulticastPkts_WIDTH 32
-#define XgRxUnicastPkts_offset 0x20
-#define XgRxUnicastPkts_WIDTH 32
-#define XgRxUndersizePkts_offset 0x24
-#define XgRxUndersizePkts_WIDTH 32
-#define XgRxOversizePkts_offset 0x28
-#define XgRxOversizePkts_WIDTH 32
-#define XgRxJabberPkts_offset 0x2C
-#define XgRxJabberPkts_WIDTH 32
-#define XgRxUndersizeFCSerrorPkts_offset 0x30
-#define XgRxUndersizeFCSerrorPkts_WIDTH 32
-#define XgRxDropEvents_offset 0x34
-#define XgRxDropEvents_WIDTH 32
-#define XgRxFCSerrorPkts_offset 0x38
-#define XgRxFCSerrorPkts_WIDTH 32
-#define XgRxAlignError_offset 0x3C
-#define XgRxAlignError_WIDTH 32
-#define XgRxSymbolError_offset 0x40
-#define XgRxSymbolError_WIDTH 32
-#define XgRxInternalMACError_offset 0x44
-#define XgRxInternalMACError_WIDTH 32
-#define XgRxControlPkts_offset 0x48
-#define XgRxControlPkts_WIDTH 32
-#define XgRxPausePkts_offset 0x4C
-#define XgRxPausePkts_WIDTH 32
-#define XgRxPkts64Octets_offset 0x50
-#define XgRxPkts64Octets_WIDTH 32
-#define XgRxPkts65to127Octets_offset 0x54
-#define XgRxPkts65to127Octets_WIDTH 32
-#define XgRxPkts128to255Octets_offset 0x58
-#define XgRxPkts128to255Octets_WIDTH 32
-#define XgRxPkts256to511Octets_offset 0x5C
-#define XgRxPkts256to511Octets_WIDTH 32
-#define XgRxPkts512to1023Octets_offset 0x60
-#define XgRxPkts512to1023Octets_WIDTH 32
-#define XgRxPkts1024to15xxOctets_offset 0x64
-#define XgRxPkts1024to15xxOctets_WIDTH 32
-#define XgRxPkts15xxtoMaxOctets_offset 0x68
-#define XgRxPkts15xxtoMaxOctets_WIDTH 32
-#define XgRxLengthError_offset 0x6C
-#define XgRxLengthError_WIDTH 32
-#define XgTxPkts_offset 0x80
-#define XgTxPkts_WIDTH 32
-#define XgTxOctets_offset 0x88
-#define XgTxOctets_WIDTH 48
-#define XgTxMulticastPkts_offset 0x90
-#define XgTxMulticastPkts_WIDTH 32
-#define XgTxBroadcastPkts_offset 0x94
-#define XgTxBroadcastPkts_WIDTH 32
-#define XgTxUnicastPkts_offset 0x98
-#define XgTxUnicastPkts_WIDTH 32
-#define XgTxControlPkts_offset 0x9C
-#define XgTxControlPkts_WIDTH 32
-#define XgTxPausePkts_offset 0xA0
-#define XgTxPausePkts_WIDTH 32
-#define XgTxPkts64Octets_offset 0xA4
-#define XgTxPkts64Octets_WIDTH 32
-#define XgTxPkts65to127Octets_offset 0xA8
-#define XgTxPkts65to127Octets_WIDTH 32
-#define XgTxPkts128to255Octets_offset 0xAC
-#define XgTxPkts128to255Octets_WIDTH 32
-#define XgTxPkts256to511Octets_offset 0xB0
-#define XgTxPkts256to511Octets_WIDTH 32
-#define XgTxPkts512to1023Octets_offset 0xB4
-#define XgTxPkts512to1023Octets_WIDTH 32
-#define XgTxPkts1024to15xxOctets_offset 0xB8
-#define XgTxPkts1024to15xxOctets_WIDTH 32
-#define XgTxPkts1519toMaxOctets_offset 0xBC
-#define XgTxPkts1519toMaxOctets_WIDTH 32
-#define XgTxUndersizePkts_offset 0xC0
-#define XgTxUndersizePkts_WIDTH 32
-#define XgTxOversizePkts_offset 0xC4
-#define XgTxOversizePkts_WIDTH 32
-#define XgTxNonTcpUdpPkt_offset 0xC8
-#define XgTxNonTcpUdpPkt_WIDTH 16
-#define XgTxMacSrcErrPkt_offset 0xCC
-#define XgTxMacSrcErrPkt_WIDTH 16
-#define XgTxIpSrcErrPkt_offset 0xD0
-#define XgTxIpSrcErrPkt_WIDTH 16
-#define XgDmaDone_offset 0xD4
-
-#define FALCON_STATS_NOT_DONE 0x00000000
-#define FALCON_STATS_DONE 0xffffffff
-
-/* Interrupt status register bits */
-#define FATAL_INT_LBN 64
-#define FATAL_INT_WIDTH 1
-#define INT_EVQS_LBN 40
-#define INT_EVQS_WIDTH 4
-
-/**************************************************************************
- *
- * Falcon non-volatile configuration
- *
- **************************************************************************
- */
-
-/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
-struct falcon_nvconfig_board_v2 {
-	__le16 nports;
-	u8 port0_phy_addr;
-	u8 port0_phy_type;
-	u8 port1_phy_addr;
-	u8 port1_phy_type;
-	__le16 asic_sub_revision;
-	__le16 board_revision;
-} __packed;
-
-/* Board configuration v3 extra information */
-struct falcon_nvconfig_board_v3 {
-	__le32 spi_device_type[2];
-} __packed;
-
-/* Bit numbers for spi_device_type */
-#define SPI_DEV_TYPE_SIZE_LBN 0
-#define SPI_DEV_TYPE_SIZE_WIDTH 5
-#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
-#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
-#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
-#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
-#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
-#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
-#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
-#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
-#define SPI_DEV_TYPE_FIELD(type, field)					\
-	(((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
-
-#define NVCONFIG_OFFSET 0x300
-
-#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
-struct falcon_nvconfig {
-	efx_oword_t ee_vpd_cfg_reg;			/* 0x300 */
-	u8 mac_address[2][8];			/* 0x310 */
-	efx_oword_t pcie_sd_ctl0123_reg;		/* 0x320 */
-	efx_oword_t pcie_sd_ctl45_reg;			/* 0x330 */
-	efx_oword_t pcie_pcs_ctl_stat_reg;		/* 0x340 */
-	efx_oword_t hw_init_reg;			/* 0x350 */
-	efx_oword_t nic_stat_reg;			/* 0x360 */
-	efx_oword_t glb_ctl_reg;			/* 0x370 */
-	efx_oword_t srm_cfg_reg;			/* 0x380 */
-	efx_oword_t spare_reg;				/* 0x390 */
-	__le16 board_magic_num;			/* 0x3A0 */
-	__le16 board_struct_ver;
-	__le16 board_checksum;
-	struct falcon_nvconfig_board_v2 board_v2;
-	efx_oword_t ee_base_page_reg;			/* 0x3B0 */
-	struct falcon_nvconfig_board_v3 board_v3;
-} __packed;
-
-#endif /* EFX_FALCON_HWDEFS_H */
diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h
deleted file mode 100644
index 8883092dae97..000000000000
--- a/drivers/net/sfc/falcon_io.h
+++ /dev/null
@@ -1,258 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2008 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#ifndef EFX_FALCON_IO_H
-#define EFX_FALCON_IO_H
-
-#include <linux/io.h>
-#include <linux/spinlock.h>
-
-/**************************************************************************
- *
- * Falcon hardware access
- *
- **************************************************************************
- *
- * Notes on locking strategy:
- *
- * Most Falcon registers require 16-byte (or 8-byte, for SRAM
- * registers) atomic writes which necessitates locking.
- * Under normal operation few writes to the Falcon BAR are made and these
- * registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and TX_DESC_UPD_REG) are special
- * cased to allow 4-byte (hence lockless) accesses.
- *
- * It *is* safe to write to these 4-byte registers in the middle of an
- * access to an 8-byte or 16-byte register.  We therefore use a
- * spinlock to protect accesses to the larger registers, but no locks
- * for the 4-byte registers.
- *
- * A write barrier is needed to ensure that DW3 is written after DW0/1/2
- * due to the way the 16byte registers are "collected" in the Falcon BIU
- *
- * We also lock when carrying out reads, to ensure consistency of the
- * data (made possible since the BIU reads all 128 bits into a cache).
- * Reads are very rare, so this isn't a significant performance
- * impact.  (Most data transferred from NIC to host is DMAed directly
- * into host memory).
- *
- * I/O BAR access uses locks for both reads and writes (but is only provided
- * for testing purposes).
- */
-
-/* Special buffer descriptors (Falcon SRAM) */
-#define BUF_TBL_KER_A1 0x18000
-#define BUF_TBL_KER_B0 0x800000
-
-
-#if BITS_PER_LONG == 64
-#define FALCON_USE_QWORD_IO 1
-#endif
-
-#ifdef FALCON_USE_QWORD_IO
-static inline void _falcon_writeq(struct efx_nic *efx, __le64 value,
-				  unsigned int reg)
-{
-	__raw_writeq((__force u64)value, efx->membase + reg);
-}
-static inline __le64 _falcon_readq(struct efx_nic *efx, unsigned int reg)
-{
-	return (__force __le64)__raw_readq(efx->membase + reg);
-}
-#endif
-
-static inline void _falcon_writel(struct efx_nic *efx, __le32 value,
-				  unsigned int reg)
-{
-	__raw_writel((__force u32)value, efx->membase + reg);
-}
-static inline __le32 _falcon_readl(struct efx_nic *efx, unsigned int reg)
-{
-	return (__force __le32)__raw_readl(efx->membase + reg);
-}
-
-/* Writes to a normal 16-byte Falcon register, locking as appropriate. */
-static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value,
-				unsigned int reg)
-{
-	unsigned long flags;
-
-	EFX_REGDUMP(efx, "writing register %x with " EFX_OWORD_FMT "\n", reg,
-		    EFX_OWORD_VAL(*value));
-
-	spin_lock_irqsave(&efx->biu_lock, flags);
-#ifdef FALCON_USE_QWORD_IO
-	_falcon_writeq(efx, value->u64[0], reg + 0);
-	wmb();
-	_falcon_writeq(efx, value->u64[1], reg + 8);
-#else
-	_falcon_writel(efx, value->u32[0], reg + 0);
-	_falcon_writel(efx, value->u32[1], reg + 4);
-	_falcon_writel(efx, value->u32[2], reg + 8);
-	wmb();
-	_falcon_writel(efx, value->u32[3], reg + 12);
-#endif
-	mmiowb();
-	spin_unlock_irqrestore(&efx->biu_lock, flags);
-}
-
-/* Writes to an 8-byte Falcon SRAM register, locking as appropriate. */
-static inline void falcon_write_sram(struct efx_nic *efx, efx_qword_t *value,
-				     unsigned int index)
-{
-	unsigned int reg = efx->type->buf_tbl_base + (index * sizeof(*value));
-	unsigned long flags;
-
-	EFX_REGDUMP(efx, "writing SRAM register %x with " EFX_QWORD_FMT "\n",
-		    reg, EFX_QWORD_VAL(*value));
-
-	spin_lock_irqsave(&efx->biu_lock, flags);
-#ifdef FALCON_USE_QWORD_IO
-	_falcon_writeq(efx, value->u64[0], reg + 0);
-#else
-	_falcon_writel(efx, value->u32[0], reg + 0);
-	wmb();
-	_falcon_writel(efx, value->u32[1], reg + 4);
-#endif
-	mmiowb();
-	spin_unlock_irqrestore(&efx->biu_lock, flags);
-}
-
-/* Write dword to Falcon register that allows partial writes
- *
- * Some Falcon registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and
- * TX_DESC_UPD_REG) can be written to as a single dword.  This allows
- * for lockless writes.
- */
-static inline void falcon_writel(struct efx_nic *efx, efx_dword_t *value,
-				 unsigned int reg)
-{
-	EFX_REGDUMP(efx, "writing partial register %x with "EFX_DWORD_FMT"\n",
-		    reg, EFX_DWORD_VAL(*value));
-
-	/* No lock required */
-	_falcon_writel(efx, value->u32[0], reg);
-}
-
-/* Read from a Falcon register
- *
- * This reads an entire 16-byte Falcon register in one go, locking as
- * appropriate.  It is essential to read the first dword first, as this
- * prompts Falcon to load the current value into the shadow register.
- */
-static inline void falcon_read(struct efx_nic *efx, efx_oword_t *value,
-			       unsigned int reg)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&efx->biu_lock, flags);
-	value->u32[0] = _falcon_readl(efx, reg + 0);
-	rmb();
-	value->u32[1] = _falcon_readl(efx, reg + 4);
-	value->u32[2] = _falcon_readl(efx, reg + 8);
-	value->u32[3] = _falcon_readl(efx, reg + 12);
-	spin_unlock_irqrestore(&efx->biu_lock, flags);
-
-	EFX_REGDUMP(efx, "read from register %x, got " EFX_OWORD_FMT "\n", reg,
-		    EFX_OWORD_VAL(*value));
-}
-
-/* This reads an 8-byte Falcon SRAM entry in one go. */
-static inline void falcon_read_sram(struct efx_nic *efx, efx_qword_t *value,
-				    unsigned int index)
-{
-	unsigned int reg = efx->type->buf_tbl_base + (index * sizeof(*value));
-	unsigned long flags;
-
-	spin_lock_irqsave(&efx->biu_lock, flags);
-#ifdef FALCON_USE_QWORD_IO
-	value->u64[0] = _falcon_readq(efx, reg + 0);
-#else
-	value->u32[0] = _falcon_readl(efx, reg + 0);
-	rmb();
-	value->u32[1] = _falcon_readl(efx, reg + 4);
-#endif
-	spin_unlock_irqrestore(&efx->biu_lock, flags);
-
-	EFX_REGDUMP(efx, "read from SRAM register %x, got "EFX_QWORD_FMT"\n",
-		    reg, EFX_QWORD_VAL(*value));
-}
-
-/* Read dword from Falcon register that allows partial writes (sic) */
-static inline void falcon_readl(struct efx_nic *efx, efx_dword_t *value,
-				unsigned int reg)
-{
-	value->u32[0] = _falcon_readl(efx, reg);
-	EFX_REGDUMP(efx, "read from register %x, got "EFX_DWORD_FMT"\n",
-		    reg, EFX_DWORD_VAL(*value));
-}
-
-/* Write to a register forming part of a table */
-static inline void falcon_write_table(struct efx_nic *efx, efx_oword_t *value,
-				      unsigned int reg, unsigned int index)
-{
-	falcon_write(efx, value, reg + index * sizeof(efx_oword_t));
-}
-
-/* Read to a register forming part of a table */
-static inline void falcon_read_table(struct efx_nic *efx, efx_oword_t *value,
-				     unsigned int reg, unsigned int index)
-{
-	falcon_read(efx, value, reg + index * sizeof(efx_oword_t));
-}
-
-/* Write to a dword register forming part of a table */
-static inline void falcon_writel_table(struct efx_nic *efx, efx_dword_t *value,
-				       unsigned int reg, unsigned int index)
-{
-	falcon_writel(efx, value, reg + index * sizeof(efx_oword_t));
-}
-
-/* Page-mapped register block size */
-#define FALCON_PAGE_BLOCK_SIZE 0x2000
-
-/* Calculate offset to page-mapped register block */
-#define FALCON_PAGED_REG(page, reg) \
-	((page) * FALCON_PAGE_BLOCK_SIZE + (reg))
-
-/* As for falcon_write(), but for a page-mapped register. */
-static inline void falcon_write_page(struct efx_nic *efx, efx_oword_t *value,
-				     unsigned int reg, unsigned int page)
-{
-	falcon_write(efx, value, FALCON_PAGED_REG(page, reg));
-}
-
-/* As for falcon_writel(), but for a page-mapped register. */
-static inline void falcon_writel_page(struct efx_nic *efx, efx_dword_t *value,
-				      unsigned int reg, unsigned int page)
-{
-	falcon_writel(efx, value, FALCON_PAGED_REG(page, reg));
-}
-
-/* Write dword to Falcon page-mapped register with an extra lock.
- *
- * As for falcon_writel_page(), but for a register that suffers from
- * SFC bug 3181.  If writing to page 0, take out a lock so the BIU
- * collector cannot be confused.
- */
-static inline void falcon_writel_page_locked(struct efx_nic *efx,
-					     efx_dword_t *value,
-					     unsigned int reg,
-					     unsigned int page)
-{
-	unsigned long flags = 0;
-
-	if (page == 0)
-		spin_lock_irqsave(&efx->biu_lock, flags);
-	falcon_writel(efx, value, FALCON_PAGED_REG(page, reg));
-	if (page == 0)
-		spin_unlock_irqrestore(&efx->biu_lock, flags);
-}
-
-#endif /* EFX_FALCON_IO_H */
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index bec52ca37eee..3da933f8f079 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2008 Solarflare Communications Inc.
+ * Copyright 2006-2009 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -11,13 +11,12 @@
 #include <linux/delay.h>
 #include "net_driver.h"
 #include "efx.h"
-#include "falcon.h"
-#include "falcon_hwdefs.h"
-#include "falcon_io.h"
+#include "nic.h"
+#include "regs.h"
+#include "io.h"
 #include "mac.h"
 #include "mdio_10g.h"
 #include "phy.h"
-#include "boards.h"
 #include "workarounds.h"
 
 /**************************************************************************
@@ -36,43 +35,47 @@ static void falcon_setup_xaui(struct efx_nic *efx)
 	if (efx->phy_type == PHY_TYPE_NONE)
 		return;
 
-	falcon_read(efx, &sdctl, XX_SD_CTL_REG);
-	EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVD, XX_SD_CTL_DRV_DEFAULT);
-	EFX_SET_OWORD_FIELD(sdctl, XX_LODRVD, XX_SD_CTL_DRV_DEFAULT);
-	EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVC, XX_SD_CTL_DRV_DEFAULT);
-	EFX_SET_OWORD_FIELD(sdctl, XX_LODRVC, XX_SD_CTL_DRV_DEFAULT);
-	EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVB, XX_SD_CTL_DRV_DEFAULT);
-	EFX_SET_OWORD_FIELD(sdctl, XX_LODRVB, XX_SD_CTL_DRV_DEFAULT);
-	EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVA, XX_SD_CTL_DRV_DEFAULT);
-	EFX_SET_OWORD_FIELD(sdctl, XX_LODRVA, XX_SD_CTL_DRV_DEFAULT);
-	falcon_write(efx, &sdctl, XX_SD_CTL_REG);
+	efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
+	EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
+	EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
+	EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
+	EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
+	EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
+	EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
+	EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
+	EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
+	efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
 
 	EFX_POPULATE_OWORD_8(txdrv,
-			     XX_DEQD, XX_TXDRV_DEQ_DEFAULT,
-			     XX_DEQC, XX_TXDRV_DEQ_DEFAULT,
-			     XX_DEQB, XX_TXDRV_DEQ_DEFAULT,
-			     XX_DEQA, XX_TXDRV_DEQ_DEFAULT,
-			     XX_DTXD, XX_TXDRV_DTX_DEFAULT,
-			     XX_DTXC, XX_TXDRV_DTX_DEFAULT,
-			     XX_DTXB, XX_TXDRV_DTX_DEFAULT,
-			     XX_DTXA, XX_TXDRV_DTX_DEFAULT);
-	falcon_write(efx, &txdrv, XX_TXDRV_CTL_REG);
+			     FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
+			     FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
+			     FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
+			     FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
+			     FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
+			     FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
+			     FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
+			     FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
+	efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
 }
 
 int falcon_reset_xaui(struct efx_nic *efx)
 {
+	struct falcon_nic_data *nic_data = efx->nic_data;
 	efx_oword_t reg;
 	int count;
 
+	/* Don't fetch MAC statistics over an XMAC reset */
+	WARN_ON(nic_data->stats_disable_count == 0);
+
 	/* Start reset sequence */
-	EFX_POPULATE_DWORD_1(reg, XX_RST_XX_EN, 1);
-	falcon_write(efx, &reg, XX_PWR_RST_REG);
+	EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
+	efx_writeo(efx, &reg, FR_AB_XX_PWR_RST);
 
 	/* Wait up to 10 ms for completion, then reinitialise */
 	for (count = 0; count < 1000; count++) {
-		falcon_read(efx, &reg, XX_PWR_RST_REG);
-		if (EFX_OWORD_FIELD(reg, XX_RST_XX_EN) == 0 &&
-		    EFX_OWORD_FIELD(reg, XX_SD_RST_ACT) == 0) {
+		efx_reado(efx, &reg, FR_AB_XX_PWR_RST);
+		if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
+		    EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
 			falcon_setup_xaui(efx);
 			return 0;
 		}
@@ -86,30 +89,30 @@ static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
 {
 	efx_oword_t reg;
 
-	if ((falcon_rev(efx) != FALCON_REV_B0) || LOOPBACK_INTERNAL(efx))
+	if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
 		return;
 
 	/* We expect xgmii faults if the wireside link is up */
-	if (!EFX_WORKAROUND_5147(efx) || !efx->link_up)
+	if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up)
 		return;
 
 	/* We can only use this interrupt to signal the negative edge of
 	 * xaui_align [we have to poll the positive edge]. */
-	if (!efx->mac_up)
+	if (efx->xmac_poll_required)
 		return;
 
 	/* Flush the ISR */
 	if (enable)
-		falcon_read(efx, &reg, XM_MGT_INT_REG_B0);
+		efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
 
 	EFX_POPULATE_OWORD_2(reg,
-			     XM_MSK_RMTFLT, !enable,
-			     XM_MSK_LCLFLT, !enable);
-	falcon_write(efx, &reg, XM_MGT_INT_MSK_REG_B0);
+			     FRF_AB_XM_MSK_RMTFLT, !enable,
+			     FRF_AB_XM_MSK_LCLFLT, !enable);
+	efx_writeo(efx, &reg, FR_AB_XM_MGT_INT_MASK);
 }
 
 /* Get status of XAUI link */
-bool falcon_xaui_link_ok(struct efx_nic *efx)
+static bool falcon_xaui_link_ok(struct efx_nic *efx)
 {
 	efx_oword_t reg;
 	bool align_done, link_ok = false;
@@ -119,84 +122,79 @@ bool falcon_xaui_link_ok(struct efx_nic *efx)
 		return true;
 
 	/* Read link status */
-	falcon_read(efx, &reg, XX_CORE_STAT_REG);
+	efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
 
-	align_done = EFX_OWORD_FIELD(reg, XX_ALIGN_DONE);
-	sync_status = EFX_OWORD_FIELD(reg, XX_SYNC_STAT);
-	if (align_done && (sync_status == XX_SYNC_STAT_DECODE_SYNCED))
+	align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
+	sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
+	if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
 		link_ok = true;
 
 	/* Clear link status ready for next read */
-	EFX_SET_OWORD_FIELD(reg, XX_COMMA_DET, XX_COMMA_DET_RESET);
-	EFX_SET_OWORD_FIELD(reg, XX_CHARERR, XX_CHARERR_RESET);
-	EFX_SET_OWORD_FIELD(reg, XX_DISPERR, XX_DISPERR_RESET);
-	falcon_write(efx, &reg, XX_CORE_STAT_REG);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
+	efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
 
 	/* If the link is up, then check the phy side of the xaui link */
-	if (efx->link_up && link_ok)
-		if (efx->phy_op->mmds & (1 << MDIO_MMD_PHYXS))
+	if (efx->link_state.up && link_ok)
+		if (efx->mdio.mmds & (1 << MDIO_MMD_PHYXS))
 			link_ok = efx_mdio_phyxgxs_lane_sync(efx);
 
 	return link_ok;
 }
 
-static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
+void falcon_reconfigure_xmac_core(struct efx_nic *efx)
 {
 	unsigned int max_frame_len;
 	efx_oword_t reg;
-	bool rx_fc = !!(efx->link_fc & EFX_FC_RX);
+	bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX);
+	bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX);
 
 	/* Configure MAC  - cut-thru mode is hard wired on */
-	EFX_POPULATE_DWORD_3(reg,
-			     XM_RX_JUMBO_MODE, 1,
-			     XM_TX_STAT_EN, 1,
-			     XM_RX_STAT_EN, 1);
-	falcon_write(efx, &reg, XM_GLB_CFG_REG);
+	EFX_POPULATE_OWORD_3(reg,
+			     FRF_AB_XM_RX_JUMBO_MODE, 1,
+			     FRF_AB_XM_TX_STAT_EN, 1,
+			     FRF_AB_XM_RX_STAT_EN, 1);
+	efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
 
 	/* Configure TX */
-	EFX_POPULATE_DWORD_6(reg,
-			     XM_TXEN, 1,
-			     XM_TX_PRMBL, 1,
-			     XM_AUTO_PAD, 1,
-			     XM_TXCRC, 1,
-			     XM_FCNTL, 1,
-			     XM_IPG, 0x3);
-	falcon_write(efx, &reg, XM_TX_CFG_REG);
+	EFX_POPULATE_OWORD_6(reg,
+			     FRF_AB_XM_TXEN, 1,
+			     FRF_AB_XM_TX_PRMBL, 1,
+			     FRF_AB_XM_AUTO_PAD, 1,
+			     FRF_AB_XM_TXCRC, 1,
+			     FRF_AB_XM_FCNTL, tx_fc,
+			     FRF_AB_XM_IPG, 0x3);
+	efx_writeo(efx, &reg, FR_AB_XM_TX_CFG);
 
 	/* Configure RX */
-	EFX_POPULATE_DWORD_5(reg,
-			     XM_RXEN, 1,
-			     XM_AUTO_DEPAD, 0,
-			     XM_ACPT_ALL_MCAST, 1,
-			     XM_ACPT_ALL_UCAST, efx->promiscuous,
-			     XM_PASS_CRC_ERR, 1);
-	falcon_write(efx, &reg, XM_RX_CFG_REG);
+	EFX_POPULATE_OWORD_5(reg,
+			     FRF_AB_XM_RXEN, 1,
+			     FRF_AB_XM_AUTO_DEPAD, 0,
+			     FRF_AB_XM_ACPT_ALL_MCAST, 1,
+			     FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous,
+			     FRF_AB_XM_PASS_CRC_ERR, 1);
+	efx_writeo(efx, &reg, FR_AB_XM_RX_CFG);
 
 	/* Set frame length */
 	max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
-	EFX_POPULATE_DWORD_1(reg, XM_MAX_RX_FRM_SIZE, max_frame_len);
-	falcon_write(efx, &reg, XM_RX_PARAM_REG);
-	EFX_POPULATE_DWORD_2(reg,
-			     XM_MAX_TX_FRM_SIZE, max_frame_len,
-			     XM_TX_JUMBO_MODE, 1);
-	falcon_write(efx, &reg, XM_TX_PARAM_REG);
-
-	EFX_POPULATE_DWORD_2(reg,
-			     XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
-			     XM_DIS_FCNTL, !rx_fc);
-	falcon_write(efx, &reg, XM_FC_REG);
+	EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
+	efx_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
+	EFX_POPULATE_OWORD_2(reg,
+			     FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
+			     FRF_AB_XM_TX_JUMBO_MODE, 1);
+	efx_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
+
+	EFX_POPULATE_OWORD_2(reg,
+			     FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
+			     FRF_AB_XM_DIS_FCNTL, !rx_fc);
+	efx_writeo(efx, &reg, FR_AB_XM_FC);
 
 	/* Set MAC address */
-	EFX_POPULATE_DWORD_4(reg,
-			     XM_ADR_0, efx->net_dev->dev_addr[0],
-			     XM_ADR_1, efx->net_dev->dev_addr[1],
-			     XM_ADR_2, efx->net_dev->dev_addr[2],
-			     XM_ADR_3, efx->net_dev->dev_addr[3]);
-	falcon_write(efx, &reg, XM_ADR_LO_REG);
-	EFX_POPULATE_DWORD_2(reg,
-			     XM_ADR_4, efx->net_dev->dev_addr[4],
-			     XM_ADR_5, efx->net_dev->dev_addr[5]);
-	falcon_write(efx, &reg, XM_ADR_HI_REG);
+	memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
+	efx_writeo(efx, &reg, FR_AB_XM_ADR_LO);
+	memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
+	efx_writeo(efx, &reg, FR_AB_XM_ADR_HI);
 }
 
 static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
@@ -212,12 +210,13 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
 		bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
 		bool reset_xgxs;
 
-		falcon_read(efx, &reg, XX_CORE_STAT_REG);
-		old_xgxs_loopback = EFX_OWORD_FIELD(reg, XX_XGXS_LB_EN);
-		old_xgmii_loopback = EFX_OWORD_FIELD(reg, XX_XGMII_LB_EN);
+		efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
+		old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
+		old_xgmii_loopback =
+			EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
 
-		falcon_read(efx, &reg, XX_SD_CTL_REG);
-		old_xaui_loopback = EFX_OWORD_FIELD(reg, XX_LPBKA);
+		efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
+		old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
 
 		/* The PHY driver may have turned XAUI off */
 		reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
@@ -228,45 +227,55 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
 			falcon_reset_xaui(efx);
 	}
 
-	falcon_read(efx, &reg, XX_CORE_STAT_REG);
-	EFX_SET_OWORD_FIELD(reg, XX_FORCE_SIG,
+	efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
 			    (xgxs_loopback || xaui_loopback) ?
-			    XX_FORCE_SIG_DECODE_FORCED : 0);
-	EFX_SET_OWORD_FIELD(reg, XX_XGXS_LB_EN, xgxs_loopback);
-	EFX_SET_OWORD_FIELD(reg, XX_XGMII_LB_EN, xgmii_loopback);
-	falcon_write(efx, &reg, XX_CORE_STAT_REG);
-
-	falcon_read(efx, &reg, XX_SD_CTL_REG);
-	EFX_SET_OWORD_FIELD(reg, XX_LPBKD, xaui_loopback);
-	EFX_SET_OWORD_FIELD(reg, XX_LPBKC, xaui_loopback);
-	EFX_SET_OWORD_FIELD(reg, XX_LPBKB, xaui_loopback);
-	EFX_SET_OWORD_FIELD(reg, XX_LPBKA, xaui_loopback);
-	falcon_write(efx, &reg, XX_SD_CTL_REG);
+			    FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
+	efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
+
+	efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
+	efx_writeo(efx, &reg, FR_AB_XX_SD_CTL);
 }
 
 
-/* Try and bring the Falcon side of the Falcon-Phy XAUI link fails
- * to come back up. Bash it until it comes back up */
-static void falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
+/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
+static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
 {
-	efx->mac_up = falcon_xaui_link_ok(efx);
+	bool mac_up = falcon_xaui_link_ok(efx);
 
-	if ((efx->loopback_mode == LOOPBACK_NETWORK) ||
+	if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
 	    efx_phy_mode_disabled(efx->phy_mode))
 		/* XAUI link is expected to be down */
-		return;
+		return mac_up;
 
-	while (!efx->mac_up && tries) {
+	falcon_stop_nic_stats(efx);
+
+	while (!mac_up && tries) {
 		EFX_LOG(efx, "bashing xaui\n");
 		falcon_reset_xaui(efx);
 		udelay(200);
 
-		efx->mac_up = falcon_xaui_link_ok(efx);
+		mac_up = falcon_xaui_link_ok(efx);
 		--tries;
 	}
+
+	falcon_start_nic_stats(efx);
+
+	return mac_up;
 }
 
-static void falcon_reconfigure_xmac(struct efx_nic *efx)
+static bool falcon_xmac_check_fault(struct efx_nic *efx)
+{
+	return !falcon_check_xaui_link_up(efx, 5);
+}
+
+static int falcon_reconfigure_xmac(struct efx_nic *efx)
 {
 	falcon_mask_status_intr(efx, false);
 
@@ -275,18 +284,15 @@ static void falcon_reconfigure_xmac(struct efx_nic *efx)
 
 	falcon_reconfigure_mac_wrapper(efx);
 
-	falcon_check_xaui_link_up(efx, 5);
+	efx->xmac_poll_required = !falcon_check_xaui_link_up(efx, 5);
 	falcon_mask_status_intr(efx, true);
+
+	return 0;
 }
 
 static void falcon_update_stats_xmac(struct efx_nic *efx)
 {
 	struct efx_mac_stats *mac_stats = &efx->mac_stats;
-	int rc;
-
-	rc = falcon_dma_stats(efx, XgDmaDone_offset);
-	if (rc)
-		return;
 
 	/* Update MAC stats from DMAed values */
 	FALCON_STAT(efx, XgRxOctets, rx_bytes);
@@ -344,35 +350,19 @@ static void falcon_update_stats_xmac(struct efx_nic *efx)
 		 mac_stats->rx_control * 64);
 }
 
-static void falcon_xmac_irq(struct efx_nic *efx)
-{
-	/* The XGMII link has a transient fault, which indicates either:
-	 *   - there's a transient xgmii fault
-	 *   - falcon's end of the xaui link may need a kick
-	 *   - the wire-side link may have gone down, but the lasi/poll()
-	 *     hasn't noticed yet.
-	 *
-	 * We only want to even bother polling XAUI if we're confident it's
-	 * not (1) or (3). In both cases, the only reliable way to spot this
-	 * is to wait a bit. We do this here by forcing the mac link state
-	 * to down, and waiting for the mac poll to come round and check
-	 */
-	efx->mac_up = false;
-}
-
-static void falcon_poll_xmac(struct efx_nic *efx)
+void falcon_poll_xmac(struct efx_nic *efx)
 {
-	if (!EFX_WORKAROUND_5147(efx) || !efx->link_up || efx->mac_up)
+	if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up ||
+	    !efx->xmac_poll_required)
 		return;
 
 	falcon_mask_status_intr(efx, false);
-	falcon_check_xaui_link_up(efx, 1);
+	efx->xmac_poll_required = !falcon_check_xaui_link_up(efx, 1);
 	falcon_mask_status_intr(efx, true);
 }
 
 struct efx_mac_operations falcon_xmac_operations = {
 	.reconfigure	= falcon_reconfigure_xmac,
 	.update_stats	= falcon_update_stats_xmac,
-	.irq		= falcon_xmac_irq,
-	.poll		= falcon_poll_xmac,
+	.check_fault	= falcon_xmac_check_fault,
 };
diff --git a/drivers/net/sfc/gmii.h b/drivers/net/sfc/gmii.h
deleted file mode 100644
index dfccaa7b573e..000000000000
--- a/drivers/net/sfc/gmii.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2008 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#ifndef EFX_GMII_H
-#define EFX_GMII_H
-
-/*
- * GMII interface
- */
-
-#include <linux/mii.h>
-
-/* GMII registers, excluding registers already defined as MII
- * registers in mii.h
- */
-#define GMII_IER		0x12	/* Interrupt enable register */
-#define GMII_ISR		0x13	/* Interrupt status register */
-
-/* Interrupt enable register */
-#define IER_ANEG_ERR		0x8000	/* Bit 15 - autonegotiation error */
-#define IER_SPEED_CHG		0x4000	/* Bit 14 - speed changed */
-#define IER_DUPLEX_CHG		0x2000	/* Bit 13 - duplex changed */
-#define IER_PAGE_RCVD		0x1000	/* Bit 12 - page received */
-#define IER_ANEG_DONE		0x0800	/* Bit 11 - autonegotiation complete */
-#define IER_LINK_CHG		0x0400	/* Bit 10 - link status changed */
-#define IER_SYM_ERR		0x0200	/* Bit 9 - symbol error */
-#define IER_FALSE_CARRIER	0x0100	/* Bit 8 - false carrier */
-#define IER_FIFO_ERR		0x0080	/* Bit 7 - FIFO over/underflow */
-#define IER_MDIX_CHG		0x0040	/* Bit 6 - MDI crossover changed */
-#define IER_DOWNSHIFT		0x0020	/* Bit 5 - downshift */
-#define IER_ENERGY		0x0010	/* Bit 4 - energy detect */
-#define IER_DTE_POWER		0x0004	/* Bit 2 - DTE power detect */
-#define IER_POLARITY_CHG	0x0002	/* Bit 1 - polarity changed */
-#define IER_JABBER		0x0001	/* Bit 0 - jabber */
-
-/* Interrupt status register */
-#define ISR_ANEG_ERR		0x8000	/* Bit 15 - autonegotiation error */
-#define ISR_SPEED_CHG		0x4000	/* Bit 14 - speed changed */
-#define ISR_DUPLEX_CHG		0x2000	/* Bit 13 - duplex changed */
-#define ISR_PAGE_RCVD		0x1000	/* Bit 12 - page received */
-#define ISR_ANEG_DONE		0x0800	/* Bit 11 - autonegotiation complete */
-#define ISR_LINK_CHG		0x0400	/* Bit 10 - link status changed */
-#define ISR_SYM_ERR		0x0200	/* Bit 9 - symbol error */
-#define ISR_FALSE_CARRIER	0x0100	/* Bit 8 - false carrier */
-#define ISR_FIFO_ERR		0x0080	/* Bit 7 - FIFO over/underflow */
-#define ISR_MDIX_CHG		0x0040	/* Bit 6 - MDI crossover changed */
-#define ISR_DOWNSHIFT		0x0020	/* Bit 5 - downshift */
-#define ISR_ENERGY		0x0010	/* Bit 4 - energy detect */
-#define ISR_DTE_POWER		0x0004	/* Bit 2 - DTE power detect */
-#define ISR_POLARITY_CHG	0x0002	/* Bit 1 - polarity changed */
-#define ISR_JABBER		0x0001	/* Bit 0 - jabber */
-
-#endif /* EFX_GMII_H */
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
new file mode 100644
index 000000000000..b89177c27f4a
--- /dev/null
+++ b/drivers/net/sfc/io.h
@@ -0,0 +1,256 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2009 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_IO_H
+#define EFX_IO_H
+
+#include <linux/io.h>
+#include <linux/spinlock.h>
+
+/**************************************************************************
+ *
+ * NIC register I/O
+ *
+ **************************************************************************
+ *
+ * Notes on locking strategy:
+ *
+ * Most NIC registers require 16-byte (or 8-byte, for SRAM) atomic writes
+ * which necessitates locking.
+ * Under normal operation few writes to NIC registers are made and these
+ * registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and TX_DESC_UPD_REG) are special
+ * cased to allow 4-byte (hence lockless) accesses.
+ *
+ * It *is* safe to write to these 4-byte registers in the middle of an
+ * access to an 8-byte or 16-byte register.  We therefore use a
+ * spinlock to protect accesses to the larger registers, but no locks
+ * for the 4-byte registers.
+ *
+ * A write barrier is needed to ensure that DW3 is written after DW0/1/2
+ * due to the way the 16byte registers are "collected" in the BIU.
+ *
+ * We also lock when carrying out reads, to ensure consistency of the
+ * data (made possible since the BIU reads all 128 bits into a cache).
+ * Reads are very rare, so this isn't a significant performance
+ * impact.  (Most data transferred from NIC to host is DMAed directly
+ * into host memory).
+ *
+ * I/O BAR access uses locks for both reads and writes (but is only provided
+ * for testing purposes).
+ */
+
+#if BITS_PER_LONG == 64
+#define EFX_USE_QWORD_IO 1
+#endif
+
+#ifdef EFX_USE_QWORD_IO
+static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
+				  unsigned int reg)
+{
+	__raw_writeq((__force u64)value, efx->membase + reg);
+}
+static inline __le64 _efx_readq(struct efx_nic *efx, unsigned int reg)
+{
+	return (__force __le64)__raw_readq(efx->membase + reg);
+}
+#endif
+
+static inline void _efx_writed(struct efx_nic *efx, __le32 value,
+				  unsigned int reg)
+{
+	__raw_writel((__force u32)value, efx->membase + reg);
+}
+static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
+{
+	return (__force __le32)__raw_readl(efx->membase + reg);
+}
+
+/* Writes to a normal 16-byte Efx register, locking as appropriate. */
+static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
+			      unsigned int reg)
+{
+	unsigned long flags __attribute__ ((unused));
+
+	EFX_REGDUMP(efx, "writing register %x with " EFX_OWORD_FMT "\n", reg,
+		    EFX_OWORD_VAL(*value));
+
+	spin_lock_irqsave(&efx->biu_lock, flags);
+#ifdef EFX_USE_QWORD_IO
+	_efx_writeq(efx, value->u64[0], reg + 0);
+	wmb();
+	_efx_writeq(efx, value->u64[1], reg + 8);
+#else
+	_efx_writed(efx, value->u32[0], reg + 0);
+	_efx_writed(efx, value->u32[1], reg + 4);
+	_efx_writed(efx, value->u32[2], reg + 8);
+	wmb();
+	_efx_writed(efx, value->u32[3], reg + 12);
+#endif
+	mmiowb();
+	spin_unlock_irqrestore(&efx->biu_lock, flags);
+}
+
+/* Write an 8-byte NIC SRAM entry through the supplied mapping,
+ * locking as appropriate. */
+static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
+				   efx_qword_t *value, unsigned int index)
+{
+	unsigned int addr = index * sizeof(*value);
+	unsigned long flags __attribute__ ((unused));
+
+	EFX_REGDUMP(efx, "writing SRAM address %x with " EFX_QWORD_FMT "\n",
+		    addr, EFX_QWORD_VAL(*value));
+
+	spin_lock_irqsave(&efx->biu_lock, flags);
+#ifdef EFX_USE_QWORD_IO
+	__raw_writeq((__force u64)value->u64[0], membase + addr);
+#else
+	__raw_writel((__force u32)value->u32[0], membase + addr);
+	wmb();
+	__raw_writel((__force u32)value->u32[1], membase + addr + 4);
+#endif
+	mmiowb();
+	spin_unlock_irqrestore(&efx->biu_lock, flags);
+}
+
+/* Write dword to NIC register that allows partial writes
+ *
+ * Some registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and
+ * TX_DESC_UPD_REG) can be written to as a single dword.  This allows
+ * for lockless writes.
+ */
+static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
+			      unsigned int reg)
+{
+	EFX_REGDUMP(efx, "writing partial register %x with "EFX_DWORD_FMT"\n",
+		    reg, EFX_DWORD_VAL(*value));
+
+	/* No lock required */
+	_efx_writed(efx, value->u32[0], reg);
+}
+
+/* Read from a NIC register
+ *
+ * This reads an entire 16-byte register in one go, locking as
+ * appropriate.  It is essential to read the first dword first, as this
+ * prompts the NIC to load the current value into the shadow register.
+ */
+static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
+			     unsigned int reg)
+{
+	unsigned long flags __attribute__ ((unused));
+
+	spin_lock_irqsave(&efx->biu_lock, flags);
+	value->u32[0] = _efx_readd(efx, reg + 0);
+	rmb();
+	value->u32[1] = _efx_readd(efx, reg + 4);
+	value->u32[2] = _efx_readd(efx, reg + 8);
+	value->u32[3] = _efx_readd(efx, reg + 12);
+	spin_unlock_irqrestore(&efx->biu_lock, flags);
+
+	EFX_REGDUMP(efx, "read from register %x, got " EFX_OWORD_FMT "\n", reg,
+		    EFX_OWORD_VAL(*value));
+}
+
+/* Read an 8-byte SRAM entry through supplied mapping,
+ * locking as appropriate. */
+static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
+				  efx_qword_t *value, unsigned int index)
+{
+	unsigned int addr = index * sizeof(*value);
+	unsigned long flags __attribute__ ((unused));
+
+	spin_lock_irqsave(&efx->biu_lock, flags);
+#ifdef EFX_USE_QWORD_IO
+	value->u64[0] = (__force __le64)__raw_readq(membase + addr);
+#else
+	value->u32[0] = (__force __le32)__raw_readl(membase + addr);
+	rmb();
+	value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
+#endif
+	spin_unlock_irqrestore(&efx->biu_lock, flags);
+
+	EFX_REGDUMP(efx, "read from SRAM address %x, got "EFX_QWORD_FMT"\n",
+		    addr, EFX_QWORD_VAL(*value));
+}
+
+/* Read dword from register that allows partial writes (sic) */
+static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
+				unsigned int reg)
+{
+	value->u32[0] = _efx_readd(efx, reg);
+	EFX_REGDUMP(efx, "read from register %x, got "EFX_DWORD_FMT"\n",
+		    reg, EFX_DWORD_VAL(*value));
+}
+
+/* Write to a register forming part of a table */
+static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value,
+				      unsigned int reg, unsigned int index)
+{
+	efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
+}
+
+/* Read to a register forming part of a table */
+static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
+				     unsigned int reg, unsigned int index)
+{
+	efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
+}
+
+/* Write to a dword register forming part of a table */
+static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value,
+				       unsigned int reg, unsigned int index)
+{
+	efx_writed(efx, value, reg + index * sizeof(efx_oword_t));
+}
+
+/* Page-mapped register block size */
+#define EFX_PAGE_BLOCK_SIZE 0x2000
+
+/* Calculate offset to page-mapped register block */
+#define EFX_PAGED_REG(page, reg) \
+	((page) * EFX_PAGE_BLOCK_SIZE + (reg))
+
+/* As for efx_writeo(), but for a page-mapped register. */
+static inline void efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
+				   unsigned int reg, unsigned int page)
+{
+	efx_writeo(efx, value, EFX_PAGED_REG(page, reg));
+}
+
+/* As for efx_writed(), but for a page-mapped register. */
+static inline void efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
+				   unsigned int reg, unsigned int page)
+{
+	efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+}
+
+/* Write dword to page-mapped register with an extra lock.
+ *
+ * As for efx_writed_page(), but for a register that suffers from
+ * SFC bug 3181. Take out a lock so the BIU collector cannot be
+ * confused. */
+static inline void efx_writed_page_locked(struct efx_nic *efx,
+					  efx_dword_t *value,
+					  unsigned int reg,
+					  unsigned int page)
+{
+	unsigned long flags __attribute__ ((unused));
+
+	if (page == 0) {
+		spin_lock_irqsave(&efx->biu_lock, flags);
+		efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+		spin_unlock_irqrestore(&efx->biu_lock, flags);
+	} else {
+		efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+	}
+}
+
+#endif /* EFX_IO_H */
diff --git a/drivers/net/sfc/mac.h b/drivers/net/sfc/mac.h
index 4e7074278fe1..f1aa5f374890 100644
--- a/drivers/net/sfc/mac.h
+++ b/drivers/net/sfc/mac.h
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2008 Solarflare Communications Inc.
+ * Copyright 2006-2009 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -15,5 +15,9 @@
 
 extern struct efx_mac_operations falcon_gmac_operations;
 extern struct efx_mac_operations falcon_xmac_operations;
+extern struct efx_mac_operations efx_mcdi_mac_operations;
+extern void falcon_reconfigure_xmac_core(struct efx_nic *efx);
+extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
+			      u32 dma_len, int enable, int clear);
 
 #endif
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
new file mode 100644
index 000000000000..683353b904c7
--- /dev/null
+++ b/drivers/net/sfc/mcdi.c
@@ -0,0 +1,1112 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2008-2009 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/delay.h>
+#include "net_driver.h"
+#include "nic.h"
+#include "io.h"
+#include "regs.h"
+#include "mcdi_pcol.h"
+#include "phy.h"
+
+/**************************************************************************
+ *
+ * Management-Controller-to-Driver Interface
+ *
+ **************************************************************************
+ */
+
+/* Software-defined structure to the shared-memory */
+#define CMD_NOTIFY_PORT0 0
+#define CMD_NOTIFY_PORT1 4
+#define CMD_PDU_PORT0    0x008
+#define CMD_PDU_PORT1    0x108
+#define REBOOT_FLAG_PORT0 0x3f8
+#define REBOOT_FLAG_PORT1 0x3fc
+
+#define MCDI_RPC_TIMEOUT       10 /*seconds */
+
+#define MCDI_PDU(efx)							\
+	(efx_port_num(efx) ? CMD_PDU_PORT1 : CMD_PDU_PORT0)
+#define MCDI_DOORBELL(efx)						\
+	(efx_port_num(efx) ? CMD_NOTIFY_PORT1 : CMD_NOTIFY_PORT0)
+#define MCDI_REBOOT_FLAG(efx)						\
+	(efx_port_num(efx) ? REBOOT_FLAG_PORT1 : REBOOT_FLAG_PORT0)
+
+#define SEQ_MASK							\
+	EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
+
+static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
+{
+	struct siena_nic_data *nic_data;
+	EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
+	nic_data = efx->nic_data;
+	return &nic_data->mcdi;
+}
+
+void efx_mcdi_init(struct efx_nic *efx)
+{
+	struct efx_mcdi_iface *mcdi;
+
+	if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
+		return;
+
+	mcdi = efx_mcdi(efx);
+	init_waitqueue_head(&mcdi->wq);
+	spin_lock_init(&mcdi->iface_lock);
+	atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
+	mcdi->mode = MCDI_MODE_POLL;
+
+	(void) efx_mcdi_poll_reboot(efx);
+}
+
+static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
+			    const u8 *inbuf, size_t inlen)
+{
+	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+	unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+	unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
+	unsigned int i;
+	efx_dword_t hdr;
+	u32 xflags, seqno;
+
+	BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
+	BUG_ON(inlen & 3 || inlen >= 0x100);
+
+	seqno = mcdi->seqno & SEQ_MASK;
+	xflags = 0;
+	if (mcdi->mode == MCDI_MODE_EVENTS)
+		xflags |= MCDI_HEADER_XFLAGS_EVREQ;
+
+	EFX_POPULATE_DWORD_6(hdr,
+			     MCDI_HEADER_RESPONSE, 0,
+			     MCDI_HEADER_RESYNC, 1,
+			     MCDI_HEADER_CODE, cmd,
+			     MCDI_HEADER_DATALEN, inlen,
+			     MCDI_HEADER_SEQ, seqno,
+			     MCDI_HEADER_XFLAGS, xflags);
+
+	efx_writed(efx, &hdr, pdu);
+
+	for (i = 0; i < inlen; i += 4)
+		_efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
+
+	/* Ensure the payload is written out before the header */
+	wmb();
+
+	/* ring the doorbell with a distinctive value */
+	_efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
+}
+
+static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
+{
+	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+	unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+	int i;
+
+	BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
+	BUG_ON(outlen & 3 || outlen >= 0x100);
+
+	for (i = 0; i < outlen; i += 4)
+		*((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
+}
+
+static int efx_mcdi_poll(struct efx_nic *efx)
+{
+	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+	unsigned int time, finish;
+	unsigned int respseq, respcmd, error;
+	unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+	unsigned int rc, spins;
+	efx_dword_t reg;
+
+	/* Check for a reboot atomically with respect to efx_mcdi_copyout() */
+	rc = efx_mcdi_poll_reboot(efx);
+	if (rc)
+		goto out;
+
+	/* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
+	 * because generally mcdi responses are fast. After that, back off
+	 * and poll once a jiffy (approximately)
+	 */
+	spins = TICK_USEC;
+	finish = get_seconds() + MCDI_RPC_TIMEOUT;
+
+	while (1) {
+		if (spins != 0) {
+			--spins;
+			udelay(1);
+		} else
+			schedule();
+
+		time = get_seconds();
+
+		rmb();
+		efx_readd(efx, &reg, pdu);
+
+		/* All 1's indicates that shared memory is in reset (and is
+		 * not a valid header). Wait for it to come out reset before
+		 * completing the command */
+		if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff &&
+		    EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE))
+			break;
+
+		if (time >= finish)
+			return -ETIMEDOUT;
+	}
+
+	mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN);
+	respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ);
+	respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE);
+	error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR);
+
+	if (error && mcdi->resplen == 0) {
+		EFX_ERR(efx, "MC rebooted\n");
+		rc = EIO;
+	} else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
+		EFX_ERR(efx, "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
+			respseq, mcdi->seqno);
+		rc = EIO;
+	} else if (error) {
+		efx_readd(efx, &reg, pdu + 4);
+		switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
+#define TRANSLATE_ERROR(name)					\
+		case MC_CMD_ERR_ ## name:			\
+			rc = name;				\
+			break
+			TRANSLATE_ERROR(ENOENT);
+			TRANSLATE_ERROR(EINTR);
+			TRANSLATE_ERROR(EACCES);
+			TRANSLATE_ERROR(EBUSY);
+			TRANSLATE_ERROR(EINVAL);
+			TRANSLATE_ERROR(EDEADLK);
+			TRANSLATE_ERROR(ENOSYS);
+			TRANSLATE_ERROR(ETIME);
+#undef TRANSLATE_ERROR
+		default:
+			rc = EIO;
+			break;
+		}
+	} else
+		rc = 0;
+
+out:
+	mcdi->resprc = rc;
+	if (rc)
+		mcdi->resplen = 0;
+
+	/* Return rc=0 like wait_event_timeout() */
+	return 0;
+}
+
+/* Test and clear MC-rebooted flag for this port/function */
+int efx_mcdi_poll_reboot(struct efx_nic *efx)
+{
+	unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx);
+	efx_dword_t reg;
+	uint32_t value;
+
+	if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
+		return false;
+
+	efx_readd(efx, &reg, addr);
+	value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
+
+	if (value == 0)
+		return 0;
+
+	EFX_ZERO_DWORD(reg);
+	efx_writed(efx, &reg, addr);
+
+	if (value == MC_STATUS_DWORD_ASSERT)
+		return -EINTR;
+	else
+		return -EIO;
+}
+
+static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi)
+{
+	/* Wait until the interface becomes QUIESCENT and we win the race
+	 * to mark it RUNNING. */
+	wait_event(mcdi->wq,
+		   atomic_cmpxchg(&mcdi->state,
+				  MCDI_STATE_QUIESCENT,
+				  MCDI_STATE_RUNNING)
+		   == MCDI_STATE_QUIESCENT);
+}
+
+static int efx_mcdi_await_completion(struct efx_nic *efx)
+{
+	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+	if (wait_event_timeout(
+		    mcdi->wq,
+		    atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
+		    msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0)
+		return -ETIMEDOUT;
+
+	/* Check if efx_mcdi_set_mode() switched us back to polled completions.
+	 * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
+	 * completed the request first, then we'll just end up completing the
+	 * request again, which is safe.
+	 *
+	 * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which
+	 * wait_event_timeout() implicitly provides.
+	 */
+	if (mcdi->mode == MCDI_MODE_POLL)
+		return efx_mcdi_poll(efx);
+
+	return 0;
+}
+
+static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
+{
+	/* If the interface is RUNNING, then move to COMPLETED and wake any
+	 * waiters. If the interface isn't in RUNNING then we've received a
+	 * duplicate completion after we've already transitioned back to
+	 * QUIESCENT. [A subsequent invocation would increment seqno, so would
+	 * have failed the seqno check].
+	 */
+	if (atomic_cmpxchg(&mcdi->state,
+			   MCDI_STATE_RUNNING,
+			   MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) {
+		wake_up(&mcdi->wq);
+		return true;
+	}
+
+	return false;
+}
+
+static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
+{
+	atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
+	wake_up(&mcdi->wq);
+}
+
+static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
+			    unsigned int datalen, unsigned int errno)
+{
+	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+	bool wake = false;
+
+	spin_lock(&mcdi->iface_lock);
+
+	if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
+		if (mcdi->credits)
+			/* The request has been cancelled */
+			--mcdi->credits;
+		else
+			EFX_ERR(efx, "MC response mismatch tx seq 0x%x rx "
+				"seq 0x%x\n", seqno, mcdi->seqno);
+	} else {
+		mcdi->resprc = errno;
+		mcdi->resplen = datalen;
+
+		wake = true;
+	}
+
+	spin_unlock(&mcdi->iface_lock);
+
+	if (wake)
+		efx_mcdi_complete(mcdi);
+}
+
+/* Issue the given command by writing the data into the shared memory PDU,
+ * ring the doorbell and wait for completion. Copyout the result. */
+int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
+		 const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen,
+		 size_t *outlen_actual)
+{
+	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+	int rc;
+	BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
+
+	efx_mcdi_acquire(mcdi);
+
+	/* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
+	spin_lock_bh(&mcdi->iface_lock);
+	++mcdi->seqno;
+	spin_unlock_bh(&mcdi->iface_lock);
+
+	efx_mcdi_copyin(efx, cmd, inbuf, inlen);
+
+	if (mcdi->mode == MCDI_MODE_POLL)
+		rc = efx_mcdi_poll(efx);
+	else
+		rc = efx_mcdi_await_completion(efx);
+
+	if (rc != 0) {
+		/* Close the race with efx_mcdi_ev_cpl() executing just too late
+		 * and completing a request we've just cancelled, by ensuring
+		 * that the seqno check therein fails.
+		 */
+		spin_lock_bh(&mcdi->iface_lock);
+		++mcdi->seqno;
+		++mcdi->credits;
+		spin_unlock_bh(&mcdi->iface_lock);
+
+		EFX_ERR(efx, "MC command 0x%x inlen %d mode %d timed out\n",
+			cmd, (int)inlen, mcdi->mode);
+	} else {
+		size_t resplen;
+
+		/* At the very least we need a memory barrier here to ensure
+		 * we pick up changes from efx_mcdi_ev_cpl(). Protect against
+		 * a spurious efx_mcdi_ev_cpl() running concurrently by
+		 * acquiring the iface_lock. */
+		spin_lock_bh(&mcdi->iface_lock);
+		rc = -mcdi->resprc;
+		resplen = mcdi->resplen;
+		spin_unlock_bh(&mcdi->iface_lock);
+
+		if (rc == 0) {
+			efx_mcdi_copyout(efx, outbuf,
+					 min(outlen, mcdi->resplen + 3) & ~0x3);
+			if (outlen_actual != NULL)
+				*outlen_actual = resplen;
+		} else if (cmd == MC_CMD_REBOOT && rc == -EIO)
+			; /* Don't reset if MC_CMD_REBOOT returns EIO */
+		else if (rc == -EIO || rc == -EINTR) {
+			EFX_ERR(efx, "MC fatal error %d\n", -rc);
+			efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+		} else
+			EFX_ERR(efx, "MC command 0x%x inlen %d failed rc=%d\n",
+				cmd, (int)inlen, -rc);
+	}
+
+	efx_mcdi_release(mcdi);
+	return rc;
+}
+
+void efx_mcdi_mode_poll(struct efx_nic *efx)
+{
+	struct efx_mcdi_iface *mcdi;
+
+	if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
+		return;
+
+	mcdi = efx_mcdi(efx);
+	if (mcdi->mode == MCDI_MODE_POLL)
+		return;
+
+	/* We can switch from event completion to polled completion, because
+	 * mcdi requests are always completed in shared memory. We do this by
+	 * switching the mode to POLL'd then completing the request.
+	 * efx_mcdi_await_completion() will then call efx_mcdi_poll().
+	 *
+	 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
+	 * which efx_mcdi_complete() provides for us.
+	 */
+	mcdi->mode = MCDI_MODE_POLL;
+
+	efx_mcdi_complete(mcdi);
+}
+
+void efx_mcdi_mode_event(struct efx_nic *efx)
+{
+	struct efx_mcdi_iface *mcdi;
+
+	if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
+		return;
+
+	mcdi = efx_mcdi(efx);
+
+	if (mcdi->mode == MCDI_MODE_EVENTS)
+		return;
+
+	/* We can't switch from polled to event completion in the middle of a
+	 * request, because the completion method is specified in the request.
+	 * So acquire the interface to serialise the requestors. We don't need
+	 * to acquire the iface_lock to change the mode here, but we do need a
+	 * write memory barrier ensure that efx_mcdi_rpc() sees it, which
+	 * efx_mcdi_acquire() provides.
+	 */
+	efx_mcdi_acquire(mcdi);
+	mcdi->mode = MCDI_MODE_EVENTS;
+	efx_mcdi_release(mcdi);
+}
+
+static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
+{
+	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+	/* If there is an outstanding MCDI request, it has been terminated
+	 * either by a BADASSERT or REBOOT event. If the mcdi interface is
+	 * in polled mode, then do nothing because the MC reboot handler will
+	 * set the header correctly. However, if the mcdi interface is waiting
+	 * for a CMDDONE event it won't receive it [and since all MCDI events
+	 * are sent to the same queue, we can't be racing with
+	 * efx_mcdi_ev_cpl()]
+	 *
+	 * There's a race here with efx_mcdi_rpc(), because we might receive
+	 * a REBOOT event *before* the request has been copied out. In polled
+	 * mode (during startup) this is irrelevent, because efx_mcdi_complete()
+	 * is ignored. In event mode, this condition is just an edge-case of
+	 * receiving a REBOOT event after posting the MCDI request. Did the mc
+	 * reboot before or after the copyout? The best we can do always is
+	 * just return failure.
+	 */
+	spin_lock(&mcdi->iface_lock);
+	if (efx_mcdi_complete(mcdi)) {
+		if (mcdi->mode == MCDI_MODE_EVENTS) {
+			mcdi->resprc = rc;
+			mcdi->resplen = 0;
+		}
+	} else
+		/* Nobody was waiting for an MCDI request, so trigger a reset */
+		efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+
+	spin_unlock(&mcdi->iface_lock);
+}
+
+static unsigned int efx_mcdi_event_link_speed[] = {
+	[MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
+	[MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
+	[MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
+};
+
+
+static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
+{
+	u32 flags, fcntl, speed, lpa;
+
+	speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
+	EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
+	speed = efx_mcdi_event_link_speed[speed];
+
+	flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
+	fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
+	lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
+
+	/* efx->link_state is only modified by efx_mcdi_phy_get_link(),
+	 * which is only run after flushing the event queues. Therefore, it
+	 * is safe to modify the link state outside of the mac_lock here.
+	 */
+	efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
+
+	efx_mcdi_phy_check_fcntl(efx, lpa);
+
+	efx_link_status_changed(efx);
+}
+
+static const char *sensor_names[] = {
+	[MC_CMD_SENSOR_CONTROLLER_TEMP] = "Controller temp. sensor",
+	[MC_CMD_SENSOR_PHY_COMMON_TEMP] = "PHY shared temp. sensor",
+	[MC_CMD_SENSOR_CONTROLLER_COOLING] = "Controller cooling",
+	[MC_CMD_SENSOR_PHY0_TEMP] = "PHY 0 temp. sensor",
+	[MC_CMD_SENSOR_PHY0_COOLING] = "PHY 0 cooling",
+	[MC_CMD_SENSOR_PHY1_TEMP] = "PHY 1 temp. sensor",
+	[MC_CMD_SENSOR_PHY1_COOLING] = "PHY 1 cooling",
+	[MC_CMD_SENSOR_IN_1V0] = "1.0V supply sensor",
+	[MC_CMD_SENSOR_IN_1V2] = "1.2V supply sensor",
+	[MC_CMD_SENSOR_IN_1V8] = "1.8V supply sensor",
+	[MC_CMD_SENSOR_IN_2V5] = "2.5V supply sensor",
+	[MC_CMD_SENSOR_IN_3V3] = "3.3V supply sensor",
+	[MC_CMD_SENSOR_IN_12V0] = "12V supply sensor"
+};
+
+static const char *sensor_status_names[] = {
+	[MC_CMD_SENSOR_STATE_OK] = "OK",
+	[MC_CMD_SENSOR_STATE_WARNING] = "Warning",
+	[MC_CMD_SENSOR_STATE_FATAL] = "Fatal",
+	[MC_CMD_SENSOR_STATE_BROKEN] = "Device failure",
+};
+
+static void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
+{
+	unsigned int monitor, state, value;
+	const char *name, *state_txt;
+	monitor = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR);
+	state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE);
+	value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE);
+	/* Deal gracefully with the board having more drivers than we
+	 * know about, but do not expect new sensor states. */
+	name = (monitor >= ARRAY_SIZE(sensor_names))
+				    ? "No sensor name available" :
+				    sensor_names[monitor];
+	EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
+	state_txt = sensor_status_names[state];
+
+	EFX_ERR(efx, "Sensor %d (%s) reports condition '%s' for raw value %d\n",
+		monitor, name, state_txt, value);
+}
+
+/* Called from  falcon_process_eventq for MCDI events */
+void efx_mcdi_process_event(struct efx_channel *channel,
+			    efx_qword_t *event)
+{
+	struct efx_nic *efx = channel->efx;
+	int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
+	u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
+
+	switch (code) {
+	case MCDI_EVENT_CODE_BADSSERT:
+		EFX_ERR(efx, "MC watchdog or assertion failure at 0x%x\n", data);
+		efx_mcdi_ev_death(efx, EINTR);
+		break;
+
+	case MCDI_EVENT_CODE_PMNOTICE:
+		EFX_INFO(efx, "MCDI PM event.\n");
+		break;
+
+	case MCDI_EVENT_CODE_CMDDONE:
+		efx_mcdi_ev_cpl(efx,
+				MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
+				MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
+				MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
+		break;
+
+	case MCDI_EVENT_CODE_LINKCHANGE:
+		efx_mcdi_process_link_change(efx, event);
+		break;
+	case MCDI_EVENT_CODE_SENSOREVT:
+		efx_mcdi_sensor_event(efx, event);
+		break;
+	case MCDI_EVENT_CODE_SCHEDERR:
+		EFX_INFO(efx, "MC Scheduler error address=0x%x\n", data);
+		break;
+	case MCDI_EVENT_CODE_REBOOT:
+		EFX_INFO(efx, "MC Reboot\n");
+		efx_mcdi_ev_death(efx, EIO);
+		break;
+	case MCDI_EVENT_CODE_MAC_STATS_DMA:
+		/* MAC stats are gather lazily.  We can ignore this. */
+		break;
+
+	default:
+		EFX_ERR(efx, "Unknown MCDI event 0x%x\n", code);
+	}
+}
+
+/**************************************************************************
+ *
+ * Specific request functions
+ *
+ **************************************************************************
+ */
+
+int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build)
+{
+	u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)];
+	size_t outlength;
+	const __le16 *ver_words;
+	int rc;
+
+	BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
+			  outbuf, sizeof(outbuf), &outlength);
+	if (rc)
+		goto fail;
+
+	if (outlength == MC_CMD_GET_VERSION_V0_OUT_LEN) {
+		*version = 0;
+		*build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE);
+		return 0;
+	}
+
+	if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) {
+		rc = -EMSGSIZE;
+		goto fail;
+	}
+
+	ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
+	*version = (((u64)le16_to_cpu(ver_words[0]) << 48) |
+		    ((u64)le16_to_cpu(ver_words[1]) << 32) |
+		    ((u64)le16_to_cpu(ver_words[2]) << 16) |
+		    le16_to_cpu(ver_words[3]));
+	*build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE);
+
+	return 0;
+
+fail:
+	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
+			bool *was_attached)
+{
+	u8 inbuf[MC_CMD_DRV_ATTACH_IN_LEN];
+	u8 outbuf[MC_CMD_DRV_ATTACH_OUT_LEN];
+	size_t outlen;
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
+		       driver_operating ? 1 : 0);
+	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		goto fail;
+	if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN)
+		goto fail;
+
+	if (was_attached != NULL)
+		*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
+	return 0;
+
+fail:
+	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
+			   u16 *fw_subtype_list)
+{
+	uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LEN];
+	size_t outlen;
+	int port_num = efx_port_num(efx);
+	int offset;
+	int rc;
+
+	BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		goto fail;
+
+	if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LEN) {
+		rc = -EMSGSIZE;
+		goto fail;
+	}
+
+	offset = (port_num)
+		? MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST
+		: MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST;
+	if (mac_address)
+		memcpy(mac_address, outbuf + offset, ETH_ALEN);
+	if (fw_subtype_list)
+		memcpy(fw_subtype_list,
+		       outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST,
+		       MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN);
+
+	return 0;
+
+fail:
+	EFX_ERR(efx, "%s: failed rc=%d len=%d\n", __func__, rc, (int)outlen);
+
+	return rc;
+}
+
+int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
+{
+	u8 inbuf[MC_CMD_LOG_CTRL_IN_LEN];
+	u32 dest = 0;
+	int rc;
+
+	if (uart)
+		dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
+	if (evq)
+		dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;
+
+	MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
+	MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
+
+	BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
+			  NULL, 0, NULL);
+	if (rc)
+		goto fail;
+
+	return 0;
+
+fail:
+	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
+{
+	u8 outbuf[MC_CMD_NVRAM_TYPES_OUT_LEN];
+	size_t outlen;
+	int rc;
+
+	BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		goto fail;
+	if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN)
+		goto fail;
+
+	*nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
+	return 0;
+
+fail:
+	EFX_ERR(efx, "%s: failed rc=%d\n",
+		__func__, rc);
+	return rc;
+}
+
+int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
+			size_t *size_out, size_t *erase_size_out,
+			bool *protected_out)
+{
+	u8 inbuf[MC_CMD_NVRAM_INFO_IN_LEN];
+	u8 outbuf[MC_CMD_NVRAM_INFO_OUT_LEN];
+	size_t outlen;
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		goto fail;
+	if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN)
+		goto fail;
+
+	*size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
+	*erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
+	*protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
+				(1 << MC_CMD_NVRAM_PROTECTED_LBN));
+	return 0;
+
+fail:
+	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
+{
+	u8 inbuf[MC_CMD_NVRAM_UPDATE_START_IN_LEN];
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
+
+	BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
+			  NULL, 0, NULL);
+	if (rc)
+		goto fail;
+
+	return 0;
+
+fail:
+	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
+			loff_t offset, u8 *buffer, size_t length)
+{
+	u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN];
+	u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(length)];
+	size_t outlen;
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
+	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
+	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		goto fail;
+
+	memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
+	return 0;
+
+fail:
+	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
+			   loff_t offset, const u8 *buffer, size_t length)
+{
+	u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(length)];
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
+	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
+	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
+	memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
+
+	BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, sizeof(inbuf),
+			  NULL, 0, NULL);
+	if (rc)
+		goto fail;
+
+	return 0;
+
+fail:
+	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
+			 loff_t offset, size_t length)
+{
+	u8 inbuf[MC_CMD_NVRAM_ERASE_IN_LEN];
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
+	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
+	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
+
+	BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
+			  NULL, 0, NULL);
+	if (rc)
+		goto fail;
+
+	return 0;
+
+fail:
+	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
+{
+	u8 inbuf[MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN];
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
+
+	BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
+			  NULL, 0, NULL);
+	if (rc)
+		goto fail;
+
+	return 0;
+
+fail:
+	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+int efx_mcdi_handle_assertion(struct efx_nic *efx)
+{
+	union {
+		u8 asserts[MC_CMD_GET_ASSERTS_IN_LEN];
+		u8 reboot[MC_CMD_REBOOT_IN_LEN];
+	} inbuf;
+	u8 assertion[MC_CMD_GET_ASSERTS_OUT_LEN];
+	unsigned int flags, index, ofst;
+	const char *reason;
+	size_t outlen;
+	int retry;
+	int rc;
+
+	/* Check if the MC is in the assertion handler, retrying twice. Once
+	 * because a boot-time assertion might cause this command to fail
+	 * with EINTR. And once again because GET_ASSERTS can race with
+	 * MC_CMD_REBOOT running on the other port. */
+	retry = 2;
+	do {
+		MCDI_SET_DWORD(inbuf.asserts, GET_ASSERTS_IN_CLEAR, 0);
+		rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
+				  inbuf.asserts, MC_CMD_GET_ASSERTS_IN_LEN,
+				  assertion, sizeof(assertion), &outlen);
+	} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
+
+	if (rc)
+		return rc;
+	if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
+		return -EINVAL;
+
+	flags = MCDI_DWORD(assertion, GET_ASSERTS_OUT_GLOBAL_FLAGS);
+	if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
+		return 0;
+
+	/* Reset the hardware atomically such that only one port with succeed.
+	 * This command will succeed if a reboot is no longer required (because
+	 * the other port did it first), but fail with EIO if it succeeds.
+	 */
+	BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
+	MCDI_SET_DWORD(inbuf.reboot, REBOOT_IN_FLAGS,
+		       MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
+	efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf.reboot, MC_CMD_REBOOT_IN_LEN,
+		     NULL, 0, NULL);
+
+	/* Print out the assertion */
+	reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
+		? "system-level assertion"
+		: (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
+		? "thread-level assertion"
+		: (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
+		? "watchdog reset"
+		: "unknown assertion";
+	EFX_ERR(efx, "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
+		MCDI_DWORD(assertion, GET_ASSERTS_OUT_SAVED_PC_OFFS),
+		MCDI_DWORD(assertion, GET_ASSERTS_OUT_THREAD_OFFS));
+
+	/* Print out the registers */
+	ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
+	for (index = 1; index < 32; index++) {
+		EFX_ERR(efx, "R%.2d (?): 0x%.8x\n", index,
+			MCDI_DWORD2(assertion, ofst));
+		ofst += sizeof(efx_dword_t);
+	}
+
+	return 0;
+}
+
+void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+{
+	u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN];
+	int rc;
+
+	BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
+	BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
+	BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);
+
+	BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);
+
+	MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
+			  NULL, 0, NULL);
+	if (rc)
+		EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+}
+
+int efx_mcdi_reset_port(struct efx_nic *efx)
+{
+	int rc = efx_mcdi_rpc(efx, MC_CMD_PORT_RESET, NULL, 0, NULL, 0, NULL);
+	if (rc)
+		EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+int efx_mcdi_reset_mc(struct efx_nic *efx)
+{
+	u8 inbuf[MC_CMD_REBOOT_IN_LEN];
+	int rc;
+
+	BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
+	MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
+	rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
+			  NULL, 0, NULL);
+	/* White is black, and up is down */
+	if (rc == -EIO)
+		return 0;
+	if (rc == 0)
+		rc = -EIO;
+	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
+			    const u8 *mac, int *id_out)
+{
+	u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN];
+	u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN];
+	size_t outlen;
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
+	MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
+		       MC_CMD_FILTER_MODE_SIMPLE);
+	memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		goto fail;
+
+	if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
+		rc = -EMSGSIZE;
+		goto fail;
+	}
+
+	*id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
+
+	return 0;
+
+fail:
+	*id_out = -1;
+	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+
+}
+
+
+int
+efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,  const u8 *mac, int *id_out)
+{
+	return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
+}
+
+
+int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
+{
+	u8 outbuf[MC_CMD_WOL_FILTER_GET_OUT_LEN];
+	size_t outlen;
+	int rc;
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		goto fail;
+
+	if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
+		rc = -EMSGSIZE;
+		goto fail;
+	}
+
+	*id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
+
+	return 0;
+
+fail:
+	*id_out = -1;
+	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+
+int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
+{
+	u8 inbuf[MC_CMD_WOL_FILTER_REMOVE_IN_LEN];
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
+			  NULL, 0, NULL);
+	if (rc)
+		goto fail;
+
+	return 0;
+
+fail:
+	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+
+int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
+{
+	int rc;
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
+	if (rc)
+		goto fail;
+
+	return 0;
+
+fail:
+	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
diff --git a/drivers/net/sfc/mcdi.h b/drivers/net/sfc/mcdi.h
new file mode 100644
index 000000000000..de916728c2e3
--- /dev/null
+++ b/drivers/net/sfc/mcdi.h
@@ -0,0 +1,130 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2008-2009 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_MCDI_H
+#define EFX_MCDI_H
+
+/**
+ * enum efx_mcdi_state
+ * @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the
+ *	mcdi_lock then they are able to move to MCDI_STATE_RUNNING
+ * @MCDI_STATE_RUNNING: There is an MCDI request pending. Only the thread that
+ *	moved into this state is allowed to move out of it.
+ * @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread
+ *	has not yet consumed the result. For all other threads, equivalent to
+ *	MCDI_STATE_RUNNING.
+ */
+enum efx_mcdi_state {
+	MCDI_STATE_QUIESCENT,
+	MCDI_STATE_RUNNING,
+	MCDI_STATE_COMPLETED,
+};
+
+enum efx_mcdi_mode {
+	MCDI_MODE_POLL,
+	MCDI_MODE_EVENTS,
+};
+
+/**
+ * struct efx_mcdi_iface
+ * @state: Interface state. Waited for by mcdi_wq.
+ * @wq: Wait queue for threads waiting for state != STATE_RUNNING
+ * @iface_lock: Protects @credits, @seqno, @resprc, @resplen
+ * @mode: Poll for mcdi completion, or wait for an mcdi_event.
+ *	Serialised by @lock
+ * @seqno: The next sequence number to use for mcdi requests.
+ *	Serialised by @lock
+ * @credits: Number of spurious MCDI completion events allowed before we
+ *	trigger a fatal error. Protected by @lock
+ * @resprc: Returned MCDI completion
+ * @resplen: Returned payload length
+ */
+struct efx_mcdi_iface {
+	atomic_t state;
+	wait_queue_head_t wq;
+	spinlock_t iface_lock;
+	enum efx_mcdi_mode mode;
+	unsigned int credits;
+	unsigned int seqno;
+	unsigned int resprc;
+	size_t resplen;
+};
+
+extern void efx_mcdi_init(struct efx_nic *efx);
+
+extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf,
+			size_t inlen, u8 *outbuf, size_t outlen,
+			size_t *outlen_actual);
+
+extern int efx_mcdi_poll_reboot(struct efx_nic *efx);
+extern void efx_mcdi_mode_poll(struct efx_nic *efx);
+extern void efx_mcdi_mode_event(struct efx_nic *efx);
+
+extern void efx_mcdi_process_event(struct efx_channel *channel,
+				   efx_qword_t *event);
+
+#define MCDI_PTR2(_buf, _ofst)						\
+	(((u8 *)_buf) + _ofst)
+#define MCDI_SET_DWORD2(_buf, _ofst, _value)				\
+	EFX_POPULATE_DWORD_1(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)),	\
+			     EFX_DWORD_0, _value)
+#define MCDI_DWORD2(_buf, _ofst)					\
+	EFX_DWORD_FIELD(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)),	\
+			EFX_DWORD_0)
+#define MCDI_QWORD2(_buf, _ofst)					\
+	EFX_QWORD_FIELD64(*((efx_qword_t *)MCDI_PTR2(_buf, _ofst)),	\
+			  EFX_QWORD_0)
+
+#define MCDI_PTR(_buf, _ofst)						\
+	MCDI_PTR2(_buf, MC_CMD_ ## _ofst ## _OFST)
+#define MCDI_SET_DWORD(_buf, _ofst, _value)				\
+	MCDI_SET_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST, _value)
+#define MCDI_DWORD(_buf, _ofst)						\
+	MCDI_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST)
+#define MCDI_QWORD(_buf, _ofst)						\
+	MCDI_QWORD2(_buf, MC_CMD_ ## _ofst ## _OFST)
+
+#define MCDI_EVENT_FIELD(_ev, _field)			\
+	EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
+
+extern int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build);
+extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
+			       bool *was_attached_out);
+extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
+				  u16 *fw_subtype_list);
+extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart,
+			     u32 dest_evq);
+extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
+extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
+			       size_t *size_out, size_t *erase_size_out,
+			       bool *protected_out);
+extern int efx_mcdi_nvram_update_start(struct efx_nic *efx,
+				       unsigned int type);
+extern int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
+			       loff_t offset, u8 *buffer, size_t length);
+extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
+				loff_t offset, const u8 *buffer,
+				size_t length);
+extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
+				loff_t offset, size_t length);
+extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx,
+					unsigned int type);
+extern int efx_mcdi_handle_assertion(struct efx_nic *efx);
+extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
+extern int efx_mcdi_reset_port(struct efx_nic *efx);
+extern int efx_mcdi_reset_mc(struct efx_nic *efx);
+extern int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
+				   const u8 *mac, int *id_out);
+extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,
+					 const u8 *mac, int *id_out);
+extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
+extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
+extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
+
+#endif /* EFX_MCDI_H */
diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c
new file mode 100644
index 000000000000..06d24a1e412a
--- /dev/null
+++ b/drivers/net/sfc/mcdi_mac.c
@@ -0,0 +1,152 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2009 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include "efx.h"
+#include "mac.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+
+static int efx_mcdi_set_mac(struct efx_nic *efx)
+{
+	u32 reject, fcntl;
+	u8 cmdbytes[MC_CMD_SET_MAC_IN_LEN];
+
+	memcpy(cmdbytes + MC_CMD_SET_MAC_IN_ADDR_OFST,
+	       efx->net_dev->dev_addr, ETH_ALEN);
+
+	MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
+			EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
+	MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
+
+	/* The MCDI command provides for controlling accept/reject
+	 * of broadcast packets too, but the driver doesn't currently
+	 * expose this. */
+	reject = (efx->promiscuous) ? 0 :
+		(1 << MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN);
+	MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_REJECT, reject);
+
+	switch (efx->wanted_fc) {
+	case EFX_FC_RX | EFX_FC_TX:
+		fcntl = MC_CMD_FCNTL_BIDIR;
+		break;
+	case EFX_FC_RX:
+		fcntl = MC_CMD_FCNTL_RESPOND;
+		break;
+	default:
+		fcntl = MC_CMD_FCNTL_OFF;
+		break;
+	}
+	if (efx->wanted_fc & EFX_FC_AUTO)
+		fcntl = MC_CMD_FCNTL_AUTO;
+
+	MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
+
+	return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
+			    NULL, 0, NULL);
+}
+
+static int efx_mcdi_get_mac_faults(struct efx_nic *efx, u32 *faults)
+{
+	u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
+	size_t outlength;
+	int rc;
+
+	BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+			  outbuf, sizeof(outbuf), &outlength);
+	if (rc)
+		goto fail;
+
+	*faults = MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT);
+	return 0;
+
+fail:
+	EFX_ERR(efx, "%s: failed rc=%d\n",
+		__func__, rc);
+	return rc;
+}
+
+int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
+		       u32 dma_len, int enable, int clear)
+{
+	u8 inbuf[MC_CMD_MAC_STATS_IN_LEN];
+	int rc;
+	efx_dword_t *cmd_ptr;
+	int period = 1000;
+	u32 addr_hi;
+	u32 addr_lo;
+
+	BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_LEN != 0);
+
+	addr_lo = ((u64)dma_addr) >> 0;
+	addr_hi = ((u64)dma_addr) >> 32;
+
+	MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_LO, addr_lo);
+	MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi);
+	cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD);
+	if (enable)
+		EFX_POPULATE_DWORD_6(*cmd_ptr,
+				     MC_CMD_MAC_STATS_CMD_DMA, 1,
+				     MC_CMD_MAC_STATS_CMD_CLEAR, clear,
+				     MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1,
+				     MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, 1,
+				     MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0,
+				     MC_CMD_MAC_STATS_CMD_PERIOD_MS, period);
+	else
+		EFX_POPULATE_DWORD_5(*cmd_ptr,
+				     MC_CMD_MAC_STATS_CMD_DMA, 0,
+				     MC_CMD_MAC_STATS_CMD_CLEAR, clear,
+				     MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1,
+				     MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, 0,
+				     MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0);
+	MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
+			  NULL, 0, NULL);
+	if (rc)
+		goto fail;
+
+	return 0;
+
+fail:
+	EFX_ERR(efx, "%s: %s failed rc=%d\n",
+		__func__, enable ? "enable" : "disable", rc);
+	return rc;
+}
+
+static int efx_mcdi_mac_reconfigure(struct efx_nic *efx)
+{
+	int rc;
+
+	rc = efx_mcdi_set_mac(efx);
+	if (rc != 0)
+		return rc;
+
+	/* Restore the multicast hash registers. */
+	efx->type->push_multicast_hash(efx);
+
+	return 0;
+}
+
+
+static bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
+{
+	u32 faults;
+	int rc = efx_mcdi_get_mac_faults(efx, &faults);
+	return (rc != 0) || (faults != 0);
+}
+
+
+struct efx_mac_operations efx_mcdi_mac_operations = {
+	.reconfigure	= efx_mcdi_mac_reconfigure,
+	.update_stats	= efx_port_dummy_op_void,
+	.check_fault 	= efx_mcdi_mac_check_fault,
+};
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h
new file mode 100644
index 000000000000..2a85360a46f0
--- /dev/null
+++ b/drivers/net/sfc/mcdi_pcol.h
@@ -0,0 +1,1578 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2009 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+
+#ifndef MCDI_PCOL_H
+#define MCDI_PCOL_H
+
+/* Values to be written into FMCR_CZ_RESET_STATE_REG to control boot. */
+/* Power-on reset state */
+#define MC_FW_STATE_POR (1)
+/* If this is set in MC_RESET_STATE_REG then it should be
+ * possible to jump into IMEM without loading code from flash. */
+#define MC_FW_WARM_BOOT_OK (2)
+/* The MC main image has started to boot. */
+#define MC_FW_STATE_BOOTING (4)
+/* The Scheduler has started. */
+#define MC_FW_STATE_SCHED (8)
+
+/* Values to be written to the per-port status dword in shared
+ * memory on reboot and assert */
+#define MC_STATUS_DWORD_REBOOT (0xb007b007)
+#define MC_STATUS_DWORD_ASSERT (0xdeaddead)
+
+/* The current version of the MCDI protocol.
+ *
+ * Note that the ROM burnt into the card only talks V0, so at the very
+ * least every driver must support version 0 and MCDI_PCOL_VERSION
+ */
+#define MCDI_PCOL_VERSION 1
+
+/**
+ * MCDI version 1
+ *
+ * Each MCDI request starts with an MCDI_HEADER, which is a 32byte
+ * structure, filled in by the client.
+ *
+ *       0       7  8     16    20     22  23  24    31
+ *      | CODE | R | LEN | SEQ | Rsvd | E | R | XFLAGS |
+ *               |                      |   |
+ *               |                      |   \--- Response
+ *               |                      \------- Error
+ *               \------------------------------ Resync (always set)
+ *
+ * The client writes it's request into MC shared memory, and rings the
+ * doorbell. Each request is completed by either by the MC writting
+ * back into shared memory, or by writting out an event.
+ *
+ * All MCDI commands support completion by shared memory response. Each
+ * request may also contain additional data (accounted for by HEADER.LEN),
+ * and some response's may also contain additional data (again, accounted
+ * for by HEADER.LEN).
+ *
+ * Some MCDI commands support completion by event, in which any associated
+ * response data is included in the event.
+ *
+ * The protocol requires one response to be delivered for every request, a
+ * request should not be sent unless the response for the previous request
+ * has been received (either by polling shared memory, or by receiving
+ * an event).
+ */
+
+/** Request/Response structure */
+#define MCDI_HEADER_OFST 0
+#define MCDI_HEADER_CODE_LBN 0
+#define MCDI_HEADER_CODE_WIDTH 7
+#define MCDI_HEADER_RESYNC_LBN 7
+#define MCDI_HEADER_RESYNC_WIDTH 1
+#define MCDI_HEADER_DATALEN_LBN 8
+#define MCDI_HEADER_DATALEN_WIDTH 8
+#define MCDI_HEADER_SEQ_LBN 16
+#define MCDI_HEADER_RSVD_LBN 20
+#define MCDI_HEADER_RSVD_WIDTH 2
+#define MCDI_HEADER_SEQ_WIDTH 4
+#define MCDI_HEADER_ERROR_LBN 22
+#define MCDI_HEADER_ERROR_WIDTH 1
+#define MCDI_HEADER_RESPONSE_LBN 23
+#define MCDI_HEADER_RESPONSE_WIDTH 1
+#define MCDI_HEADER_XFLAGS_LBN 24
+#define MCDI_HEADER_XFLAGS_WIDTH 8
+/* Request response using event */
+#define MCDI_HEADER_XFLAGS_EVREQ 0x01
+
+/* Maximum number of payload bytes */
+#define MCDI_CTL_SDU_LEN_MAX 0xfc
+
+/* The MC can generate events for two reasons:
+ *   - To complete a shared memory request if XFLAGS_EVREQ was set
+ *   - As a notification (link state, i2c event), controlled
+ *     via MC_CMD_LOG_CTRL
+ *
+ * Both events share a common structure:
+ *
+ *  0      32     33      36    44     52     60
+ * | Data | Cont | Level | Src | Code | Rsvd |
+ *           |
+ *           \ There is another event pending in this notification
+ *
+ * If Code==CMDDONE, then the fields are further interpreted as:
+ *
+ *   - LEVEL==INFO    Command succeded
+ *   - LEVEL==ERR     Command failed
+ *
+ *    0     8         16      24     32
+ *   | Seq | Datalen | Errno | Rsvd |
+ *
+ *   These fields are taken directly out of the standard MCDI header, i.e.,
+ *   LEVEL==ERR, Datalen == 0 => Reboot
+ *
+ * Events can be squirted out of the UART (using LOG_CTRL) without a
+ * MCDI header.  An event can be distinguished from a MCDI response by
+ * examining the first byte which is 0xc0.  This corresponds to the
+ * non-existent MCDI command MC_CMD_DEBUG_LOG.
+ *
+ *      0         7        8
+ *     | command | Resync |     = 0xc0
+ *
+ * Since the event is written in big-endian byte order, this works
+ * providing bits 56-63 of the event are 0xc0.
+ *
+ *      56     60  63
+ *     | Rsvd | Code |    = 0xc0
+ *
+ * Which means for convenience the event code is 0xc for all MC
+ * generated events.
+ */
+#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
+
+#define MCDI_EVENT_DATA_LBN 0
+#define MCDI_EVENT_DATA_WIDTH 32
+#define MCDI_EVENT_CONT_LBN 32
+#define MCDI_EVENT_CONT_WIDTH 1
+#define MCDI_EVENT_LEVEL_LBN 33
+#define MCDI_EVENT_LEVEL_WIDTH 3
+#define MCDI_EVENT_LEVEL_INFO (0)
+#define MCDI_EVENT_LEVEL_WARN (1)
+#define MCDI_EVENT_LEVEL_ERR (2)
+#define MCDI_EVENT_LEVEL_FATAL (3)
+#define MCDI_EVENT_SRC_LBN 36
+#define MCDI_EVENT_SRC_WIDTH 8
+#define MCDI_EVENT_CODE_LBN 44
+#define MCDI_EVENT_CODE_WIDTH 8
+#define MCDI_EVENT_CODE_BADSSERT (1)
+#define MCDI_EVENT_CODE_PMNOTICE (2)
+#define MCDI_EVENT_CODE_CMDDONE (3)
+#define  MCDI_EVENT_CMDDONE_SEQ_LBN 0
+#define  MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
+#define  MCDI_EVENT_CMDDONE_DATALEN_LBN 8
+#define  MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8
+#define  MCDI_EVENT_CMDDONE_ERRNO_LBN 16
+#define  MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8
+#define MCDI_EVENT_CODE_LINKCHANGE (4)
+#define  MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0
+#define  MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
+#define  MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
+#define  MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
+#define  MCDI_EVENT_LINKCHANGE_SPEED_100M 1
+#define  MCDI_EVENT_LINKCHANGE_SPEED_1G 2
+#define  MCDI_EVENT_LINKCHANGE_SPEED_10G 3
+#define  MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
+#define  MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
+#define  MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
+#define  MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8
+#define MCDI_EVENT_CODE_SENSOREVT (5)
+#define  MCDI_EVENT_SENSOREVT_MONITOR_LBN 0
+#define  MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8
+#define  MCDI_EVENT_SENSOREVT_STATE_LBN 8
+#define  MCDI_EVENT_SENSOREVT_STATE_WIDTH 8
+#define  MCDI_EVENT_SENSOREVT_VALUE_LBN 16
+#define  MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16
+#define MCDI_EVENT_CODE_SCHEDERR (6)
+#define MCDI_EVENT_CODE_REBOOT (7)
+#define MCDI_EVENT_CODE_MAC_STATS_DMA (8)
+#define  MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
+#define  MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
+
+/* Non-existent command target */
+#define MC_CMD_ERR_ENOENT 2
+/* assert() has killed the MC */
+#define MC_CMD_ERR_EINTR 4
+/* Caller does not hold required locks */
+#define MC_CMD_ERR_EACCES 13
+/* Resource is currently unavailable (e.g. lock contention) */
+#define MC_CMD_ERR_EBUSY 16
+/* Invalid argument to target */
+#define MC_CMD_ERR_EINVAL 22
+/* Non-recursive resource is already acquired */
+#define MC_CMD_ERR_EDEADLK 35
+/* Operation not implemented */
+#define MC_CMD_ERR_ENOSYS 38
+/* Operation timed out */
+#define MC_CMD_ERR_ETIME 62
+
+#define MC_CMD_ERR_CODE_OFST 0
+
+
+/* MC_CMD_READ32: (debug, variadic out)
+ * Read multiple 32byte words from MC memory
+ */
+#define MC_CMD_READ32 0x01
+#define MC_CMD_READ32_IN_LEN 8
+#define MC_CMD_READ32_IN_ADDR_OFST 0
+#define MC_CMD_READ32_IN_NUMWORDS_OFST 4
+#define MC_CMD_READ32_OUT_LEN(_numwords) \
+	(4 * (_numwords))
+#define MC_CMD_READ32_OUT_BUFFER_OFST 0
+
+/* MC_CMD_WRITE32: (debug, variadic in)
+ * Write multiple 32byte words to MC memory
+ */
+#define MC_CMD_WRITE32 0x02
+#define MC_CMD_WRITE32_IN_LEN(_numwords) (((_numwords) * 4) + 4)
+#define MC_CMD_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_WRITE32_IN_BUFFER_OFST 4
+#define MC_CMD_WRITE32_OUT_LEN 0
+
+/* MC_CMD_COPYCODE: (debug)
+ * Copy MC code between two locations and jump
+ */
+#define MC_CMD_COPYCODE 0x03
+#define MC_CMD_COPYCODE_IN_LEN 16
+#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
+#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
+#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
+/* Control should return to the caller rather than jumping */
+#define MC_CMD_COPYCODE_JUMP_NONE 1
+#define MC_CMD_COPYCODE_OUT_LEN 0
+
+/* MC_CMD_SET_FUNC: (debug)
+ * Select function for function-specific commands.
+ */
+#define MC_CMD_SET_FUNC 0x04
+#define MC_CMD_SET_FUNC_IN_LEN 4
+#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
+#define MC_CMD_SET_FUNC_OUT_LEN 0
+
+/* MC_CMD_GET_BOOT_STATUS:
+ * Get the instruction address from which the MC booted.
+ */
+#define MC_CMD_GET_BOOT_STATUS 0x05
+#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0
+#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
+/* Reboot caused by watchdog */
+#define MC_CMD_GET_BOOT_STATUS_FLAGS_WATCHDOG_LBN   (0)
+#define MC_CMD_GET_BOOT_STATUS_FLAGS_WATCHDOG_WIDTH (1)
+/* MC booted from primary flash partition */
+#define MC_CMD_GET_BOOT_STATUS_FLAGS_PRIMARY_LBN    (1)
+#define MC_CMD_GET_BOOT_STATUS_FLAGS_PRIMARY_WIDTH  (1)
+/* MC booted from backup flash partition */
+#define MC_CMD_GET_BOOT_STATUS_FLAGS_BACKUP_LBN     (2)
+#define MC_CMD_GET_BOOT_STATUS_FLAGS_BACKUP_WIDTH   (1)
+
+/* MC_CMD_GET_ASSERTS: (debug, variadic out)
+ * Get (and optionally clear) the current assertion status.
+ *
+ * Only OUT.GLOBAL_FLAGS is guaranteed to exist in the completion
+ * payload. The other fields will only be present if
+ * OUT.GLOBAL_FLAGS != NO_FAILS
+ */
+#define MC_CMD_GET_ASSERTS 0x06
+#define MC_CMD_GET_ASSERTS_IN_LEN 4
+#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
+#define MC_CMD_GET_ASSERTS_OUT_LEN 140
+/* Assertion status flag */
+#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
+/*! No assertions have failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 1
+/*! A system-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 2
+/*! A thread-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 3
+/*! The system was reset by the watchdog. */
+#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 4
+/* Failing PC value */
+#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
+/* Saved GP regs */
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_LEN 124
+/* Failing thread address */
+#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
+
+/* MC_CMD_LOG_CTRL:
+ * Determine the output stream for various events and messages
+ */
+#define MC_CMD_LOG_CTRL 0x07
+#define MC_CMD_LOG_CTRL_IN_LEN 8
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART (1)
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ (2)
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
+#define MC_CMD_LOG_CTRL_OUT_LEN 0
+
+/* MC_CMD_GET_VERSION:
+ * Get version information about the MC firmware
+ */
+#define MC_CMD_GET_VERSION 0x08
+#define MC_CMD_GET_VERSION_IN_LEN 0
+#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
+#define MC_CMD_GET_VERSION_V1_OUT_LEN 32
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
+/* Reserved version number to indicate "any" version. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
+/* The version response of a boot ROM awaiting rescue */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_BOOTROM 0xb0070000
+#define MC_CMD_GET_VERSION_V1_OUT_PCOL_OFST 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_V1_OUT_SUPPORTED_FUNCS_OFST 8
+/* The command set exported by the boot ROM (MCDI v0) */
+#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS {		\
+	(1 << MC_CMD_READ32)	|			\
+	(1 << MC_CMD_WRITE32)	|			\
+	(1 << MC_CMD_COPYCODE)	|			\
+	(1 << MC_CMD_GET_VERSION),			\
+	0, 0, 0 }
+#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
+
+/* Vectors in the boot ROM */
+/* Point to the copycode entry point. */
+#define MC_BOOTROM_COPYCODE_VEC (0x7f4)
+/* Points to the recovery mode entry point. */
+#define MC_BOOTROM_NOFLASH_VEC (0x7f8)
+
+/* Test execution limits */
+#define MC_TESTEXEC_VARIANT_COUNT 16
+#define MC_TESTEXEC_RESULT_COUNT 7
+
+/* MC_CMD_SET_TESTVARS: (debug, variadic in)
+ * Write variant words for test.
+ *
+ * The user supplies a bitmap of the variants they wish to set.
+ * They must ensure that IN.LEN >= 4 + 4 * ffs(BITMAP)
+ */
+#define MC_CMD_SET_TESTVARS 0x09
+#define MC_CMD_SET_TESTVARS_IN_LEN(_numwords)	\
+  (4 + 4*(_numwords))
+#define MC_CMD_SET_TESTVARS_IN_ARGS_BITMAP_OFST 0
+/* Up to MC_TESTEXEC_VARIANT_COUNT of 32byte words start here */
+#define MC_CMD_SET_TESTVARS_IN_ARGS_BUFFER_OFST 4
+#define MC_CMD_SET_TESTVARS_OUT_LEN 0
+
+/* MC_CMD_GET_TESTRCS: (debug, variadic out)
+ * Return result words from test.
+ */
+#define MC_CMD_GET_TESTRCS 0x0a
+#define MC_CMD_GET_TESTRCS_IN_LEN 4
+#define MC_CMD_GET_TESTRCS_IN_NUMWORDS_OFST 0
+#define MC_CMD_GET_TESTRCS_OUT_LEN(_numwords) \
+	(4 * (_numwords))
+#define MC_CMD_GET_TESTRCS_OUT_BUFFER_OFST 0
+
+/* MC_CMD_RUN_TEST: (debug)
+ * Run the test exported by this firmware image
+ */
+#define MC_CMD_RUN_TEST 0x0b
+#define MC_CMD_RUN_TEST_IN_LEN 0
+#define MC_CMD_RUN_TEST_OUT_LEN 0
+
+/* MC_CMD_CSR_READ32: (debug, variadic out)
+ * Read 32bit words from the indirect memory map
+ */
+#define MC_CMD_CSR_READ32 0x0c
+#define MC_CMD_CSR_READ32_IN_LEN 12
+#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
+#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
+#define MC_CMD_CSR_READ32_OUT_LEN(_numwords)	\
+	(((_numwords) * 4) + 4)
+/* IN.NUMWORDS of 32bit words start here */
+#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
+#define MC_CMD_CSR_READ32_OUT_IREG_STATUS_OFST(_numwords)	\
+	((_numwords) * 4)
+
+/* MC_CMD_CSR_WRITE32: (debug, variadic in)
+ * Write 32bit dwords to the indirect memory map
+ */
+#define MC_CMD_CSR_WRITE32 0x0d
+#define MC_CMD_CSR_WRITE32_IN_LEN(_numwords)	\
+	(((_numwords) * 4) + 8)
+#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
+/* Multiple 32bit words of data to write start here */
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
+#define MC_CMD_CSR_WRITE32_OUT_LEN 4
+#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
+
+/* MC_CMD_JTAG_WORK: (debug, fpga only)
+ * Process JTAG work buffer for RBF acceleration.
+ *
+ *  Host: bit count, (up to) 32 words of data to clock out to JTAG
+ *   (bits 1,0=TMS,TDO for first bit; bits 3,2=TMS,TDO for second bit, etc.)
+ *  MC: bit count, (up to) 32 words of data clocked in from JTAG
+ *   (bit 0=TDI for first bit, bit 1=TDI for second bit, etc.; [31:16] unused)
+ */
+#define MC_CMD_JTAG_WORK 0x0e
+
+/* MC_CMD_STACKINFO: (debug, variadic out)
+ * Get stack information
+ *
+ * Host: nothing
+ * MC: (thread ptr, stack size, free space) for each thread in system
+ */
+#define MC_CMD_STACKINFO 0x0f
+
+/* MC_CMD_MDIO_READ:
+ * MDIO register read
+ */
+#define MC_CMD_MDIO_READ 0x10
+#define MC_CMD_MDIO_READ_IN_LEN 16
+#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
+#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
+#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
+#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
+#define MC_CMD_MDIO_READ_OUT_LEN 8
+#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
+#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
+
+/* MC_CMD_MDIO_WRITE:
+ * MDIO register write
+ */
+#define MC_CMD_MDIO_WRITE 0x11
+#define MC_CMD_MDIO_WRITE_IN_LEN 20
+#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
+#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
+#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
+#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
+#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
+#define MC_CMD_MDIO_WRITE_OUT_LEN 4
+#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
+
+/* By default all the MCDI MDIO operations perform clause45 mode.
+ * If you want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
+#define MC_CMD_MDIO_CLAUSE22 32
+
+/* There are two MDIO buses: one for the internal PHY, and one for external
+ * devices.
+ */
+#define MC_CMD_MDIO_BUS_INTERNAL 0
+#define MC_CMD_MDIO_BUS_EXTERNAL 1
+
+/* The MDIO commands return the raw status bits from the MDIO block.  A "good"
+ * transaction should have the DONE bit set and all other bits clear.
+ */
+#define MC_CMD_MDIO_STATUS_GOOD 0x08
+
+
+/* MC_CMD_DBI_WRITE: (debug)
+ * Write DBI register(s)
+ *
+ * Host: address, byte-enables (and VF selection, and cs2 flag),
+ *       value [,address ...]
+ * MC: nothing
+ */
+#define MC_CMD_DBI_WRITE 0x12
+#define MC_CMD_DBI_WRITE_IN_LEN(_numwords)		\
+	(12 * (_numwords))
+#define MC_CMD_DBI_WRITE_IN_ADDRESS_OFST(_word)		\
+	(((_word) * 12) + 0)
+#define MC_CMD_DBI_WRITE_IN_BYTE_MASK_OFST(_word)	\
+	(((_word) * 12) + 4)
+#define MC_CMD_DBI_WRITE_IN_VALUE_OFST(_word)		\
+	(((_word) * 12) + 8)
+#define MC_CMD_DBI_WRITE_OUT_LEN 0
+
+/* MC_CMD_DBI_READ: (debug)
+ * Read DBI register(s)
+ *
+ * Host: address, [,address ...]
+ * MC: value [,value ...]
+ * (note: this does not support reading from VFs, but is retained for backwards
+ * compatibility; see MC_CMD_DBI_READX below)
+ */
+#define MC_CMD_DBI_READ 0x13
+#define MC_CMD_DBI_READ_IN_LEN(_numwords)		\
+	(4 * (_numwords))
+#define MC_CMD_DBI_READ_OUT_LEN(_numwords)		\
+	(4 * (_numwords))
+
+/* MC_CMD_PORT_READ32: (debug)
+ * Read a 32-bit register from the indirect port register map.
+ *
+ * The port to access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_READ32 0x14
+#define MC_CMD_PORT_READ32_IN_LEN 4
+#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
+#define MC_CMD_PORT_READ32_OUT_LEN 8
+#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
+#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
+
+/* MC_CMD_PORT_WRITE32: (debug)
+ * Write a 32-bit register to the indirect port register map.
+ *
+ * The port to access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_WRITE32 0x15
+#define MC_CMD_PORT_WRITE32_IN_LEN 8
+#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
+#define MC_CMD_PORT_WRITE32_OUT_LEN 4
+#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
+
+/* MC_CMD_PORT_READ128: (debug)
+ * Read a 128-bit register from indirect port register map
+ *
+ * The port to access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_READ128 0x16
+#define MC_CMD_PORT_READ128_IN_LEN 4
+#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
+#define MC_CMD_PORT_READ128_OUT_LEN 20
+#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
+#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
+
+/* MC_CMD_PORT_WRITE128: (debug)
+ * Write a 128-bit register to indirect port register map.
+ *
+ * The port to access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_WRITE128 0x17
+#define MC_CMD_PORT_WRITE128_IN_LEN 20
+#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
+#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
+#define MC_CMD_PORT_WRITE128_OUT_LEN 4
+#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
+
+/* MC_CMD_GET_BOARD_CFG:
+ * Returns the MC firmware configuration structure
+ *
+ * The FW_SUBTYPE_LIST contains a 16-bit value for each of the 12 types of
+ * NVRAM area.  The values are defined in the firmware/mc/platform/<xxx>.c file
+ * for a specific board type, but otherwise have no meaning to the MC; they
+ * are used by the driver to manage selection of appropriate firmware updates.
+ */
+#define MC_CMD_GET_BOARD_CFG 0x18
+#define MC_CMD_GET_BOARD_CFG_IN_LEN 0
+#define MC_CMD_GET_BOARD_CFG_OUT_LEN 96
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 24
+
+/* MC_CMD_DBI_READX: (debug)
+ * Read DBI register(s) -- extended functionality
+ *
+ * Host: vf selection, address, [,vf selection ...]
+ * MC: value [,value ...]
+ */
+#define MC_CMD_DBI_READX 0x19
+#define MC_CMD_DBI_READX_IN_LEN(_numwords)	\
+  (8*(_numwords))
+#define MC_CMD_DBI_READX_OUT_LEN(_numwords)	\
+  (4*(_numwords))
+
+/* MC_CMD_SET_RAND_SEED:
+ * Set the 16byte seed for the MC psuedo-random generator
+ */
+#define MC_CMD_SET_RAND_SEED 0x1a
+#define MC_CMD_SET_RAND_SEED_IN_LEN 16
+#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
+#define MC_CMD_SET_RAND_SEED_OUT_LEN 0
+
+/* MC_CMD_LTSSM_HIST: (debug)
+ * Retrieve the history of the LTSSM, if the build supports it.
+ *
+ * Host: nothing
+ * MC: variable number of LTSSM values, as bytes
+ * The history is read-to-clear.
+ */
+#define MC_CMD_LTSSM_HIST 0x1b
+
+/* MC_CMD_DRV_ATTACH:
+ * Inform MCPU that this port is managed on the host (i.e. driver active)
+ */
+#define MC_CMD_DRV_ATTACH 0x1c
+#define MC_CMD_DRV_ATTACH_IN_LEN 8
+#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
+#define MC_CMD_DRV_ATTACH_OUT_LEN 4
+#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
+
+/* MC_CMD_NCSI_PROD: (debug)
+ * Trigger an NC-SI event (and possibly an AEN in response)
+ */
+#define MC_CMD_NCSI_PROD 0x1d
+#define MC_CMD_NCSI_PROD_IN_LEN 4
+#define MC_CMD_NCSI_PROD_IN_EVENTS_OFST 0
+#define MC_CMD_NCSI_PROD_LINKCHANGE_LBN 0
+#define MC_CMD_NCSI_PROD_LINKCHANGE_WIDTH 1
+#define MC_CMD_NCSI_PROD_RESET_LBN 1
+#define MC_CMD_NCSI_PROD_RESET_WIDTH 1
+#define MC_CMD_NCSI_PROD_DRVATTACH_LBN 2
+#define MC_CMD_NCSI_PROD_DRVATTACH_WIDTH 1
+#define MC_CMD_NCSI_PROD_OUT_LEN 0
+
+/* Enumeration */
+#define MC_CMD_NCSI_PROD_LINKCHANGE 0
+#define MC_CMD_NCSI_PROD_RESET 1
+#define MC_CMD_NCSI_PROD_DRVATTACH 2
+
+/* MC_CMD_DEVEL: (debug)
+ * Reserved for development
+ */
+#define MC_CMD_DEVEL 0x1e
+
+/* MC_CMD_SHMUART: (debug)
+ * Route UART output to circular buffer in shared memory instead.
+ */
+#define MC_CMD_SHMUART 0x1f
+#define MC_CMD_SHMUART_IN_FLAG_OFST 0
+#define MC_CMD_SHMUART_IN_LEN 4
+#define MC_CMD_SHMUART_OUT_LEN 0
+
+/* MC_CMD_PORT_RESET:
+ * Generic per-port reset. There is no equivalent for per-board reset.
+ *
+ * Locks required: None
+ * Return code: 0, ETIME
+ */
+#define MC_CMD_PORT_RESET 0x20
+#define MC_CMD_PORT_RESET_IN_LEN 0
+#define MC_CMD_PORT_RESET_OUT_LEN 0
+
+/* MC_CMD_RESOURCE_LOCK:
+ * Generic resource lock/unlock interface.
+ *
+ * Locks required: None
+ * Return code: 0,
+ *              EBUSY (if trylock is contended by other port),
+ *              EDEADLK (if trylock is already acquired by this port)
+ *              EINVAL (if unlock doesn't own the lock)
+ */
+#define MC_CMD_RESOURCE_LOCK 0x21
+#define MC_CMD_RESOURCE_LOCK_IN_LEN 8
+#define MC_CMD_RESOURCE_LOCK_IN_ACTION_OFST 0
+#define MC_CMD_RESOURCE_LOCK_ACTION_TRYLOCK 1
+#define MC_CMD_RESOURCE_LOCK_ACTION_UNLOCK 0
+#define MC_CMD_RESOURCE_LOCK_IN_RESOURCE_OFST 4
+#define MC_CMD_RESOURCE_LOCK_I2C 2
+#define MC_CMD_RESOURCE_LOCK_PHY 3
+#define MC_CMD_RESOURCE_LOCK_OUT_LEN 0
+
+/* MC_CMD_SPI_COMMAND: (variadic in, variadic out)
+ * Read/Write to/from the SPI device.
+ *
+ * Locks required: SPI_LOCK
+ * Return code: 0, ETIME, EINVAL, EACCES (if SPI_LOCK is not held)
+ */
+#define MC_CMD_SPI_COMMAND 0x22
+#define MC_CMD_SPI_COMMAND_IN_LEN(_write_bytes)	(12 + (_write_bytes))
+#define MC_CMD_SPI_COMMAND_IN_ARGS_OFST 0
+#define MC_CMD_SPI_COMMAND_IN_ARGS_ADDRESS_OFST 0
+#define MC_CMD_SPI_COMMAND_IN_ARGS_READ_BYTES_OFST 4
+#define MC_CMD_SPI_COMMAND_IN_ARGS_CHIP_SELECT_OFST 8
+/* Data to write here */
+#define MC_CMD_SPI_COMMAND_IN_WRITE_BUFFER_OFST 12
+#define MC_CMD_SPI_COMMAND_OUT_LEN(_read_bytes) (_read_bytes)
+/* Data read here */
+#define MC_CMD_SPI_COMMAND_OUT_READ_BUFFER_OFST 0
+
+/* MC_CMD_I2C_READ_WRITE: (variadic in, variadic out)
+ * Read/Write to/from the I2C bus.
+ *
+ * Locks required: I2C_LOCK
+ * Return code: 0, ETIME, EINVAL, EACCES (if I2C_LOCK is not held)
+ */
+#define MC_CMD_I2C_RW 0x23
+#define MC_CMD_I2C_RW_IN_LEN(_write_bytes) (8 + (_write_bytes))
+#define MC_CMD_I2C_RW_IN_ARGS_OFST 0
+#define MC_CMD_I2C_RW_IN_ARGS_ADDR_OFST 0
+#define MC_CMD_I2C_RW_IN_ARGS_READ_BYTES_OFST 4
+/* Data to write here */
+#define MC_CMD_I2C_RW_IN_WRITE_BUFFER_OFSET 8
+#define MC_CMD_I2C_RW_OUT_LEN(_read_bytes) (_read_bytes)
+/* Data read here */
+#define MC_CMD_I2C_RW_OUT_READ_BUFFER_OFST 0
+
+/* Generic phy capability bitmask */
+#define MC_CMD_PHY_CAP_10HDX_LBN 1
+#define MC_CMD_PHY_CAP_10HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_10FDX_LBN 2
+#define MC_CMD_PHY_CAP_10FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_100HDX_LBN 3
+#define MC_CMD_PHY_CAP_100HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_100FDX_LBN 4
+#define MC_CMD_PHY_CAP_100FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_1000HDX_LBN 5
+#define MC_CMD_PHY_CAP_1000HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_1000FDX_LBN 6
+#define MC_CMD_PHY_CAP_1000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_10000FDX_LBN 7
+#define MC_CMD_PHY_CAP_10000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_PAUSE_LBN 8
+#define MC_CMD_PHY_CAP_PAUSE_WIDTH 1
+#define MC_CMD_PHY_CAP_ASYM_LBN 9
+#define MC_CMD_PHY_CAP_ASYM_WIDTH 1
+#define MC_CMD_PHY_CAP_AN_LBN 10
+#define MC_CMD_PHY_CAP_AN_WIDTH 1
+
+/* Generic loopback enumeration */
+#define MC_CMD_LOOPBACK_NONE 0
+#define MC_CMD_LOOPBACK_DATA 1
+#define MC_CMD_LOOPBACK_GMAC 2
+#define MC_CMD_LOOPBACK_XGMII 3
+#define MC_CMD_LOOPBACK_XGXS 4
+#define MC_CMD_LOOPBACK_XAUI 5
+#define MC_CMD_LOOPBACK_GMII 6
+#define MC_CMD_LOOPBACK_SGMII 7
+#define MC_CMD_LOOPBACK_XGBR 8
+#define MC_CMD_LOOPBACK_XFI 9
+#define MC_CMD_LOOPBACK_XAUI_FAR 10
+#define MC_CMD_LOOPBACK_GMII_FAR 11
+#define MC_CMD_LOOPBACK_SGMII_FAR 12
+#define MC_CMD_LOOPBACK_XFI_FAR 13
+#define MC_CMD_LOOPBACK_GPHY 14
+#define MC_CMD_LOOPBACK_PHYXS 15
+#define MC_CMD_LOOPBACK_PCS 16
+#define MC_CMD_LOOPBACK_PMAPMD 17
+#define MC_CMD_LOOPBACK_XPORT 18
+#define MC_CMD_LOOPBACK_XGMII_WS 19
+#define MC_CMD_LOOPBACK_XAUI_WS 20
+#define MC_CMD_LOOPBACK_XAUI_WS_FAR 21
+#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 22
+#define MC_CMD_LOOPBACK_GMII_WS 23
+#define MC_CMD_LOOPBACK_XFI_WS 24
+#define MC_CMD_LOOPBACK_XFI_WS_FAR 25
+#define MC_CMD_LOOPBACK_PHYXS_WS 26
+
+/* Generic PHY statistics enumeration */
+#define MC_CMD_OUI 0
+#define MC_CMD_PMA_PMD_LINK_UP 1
+#define MC_CMD_PMA_PMD_RX_FAULT 2
+#define MC_CMD_PMA_PMD_TX_FAULT 3
+#define MC_CMD_PMA_PMD_SIGNAL 4
+#define MC_CMD_PMA_PMD_SNR_A 5
+#define MC_CMD_PMA_PMD_SNR_B 6
+#define MC_CMD_PMA_PMD_SNR_C 7
+#define MC_CMD_PMA_PMD_SNR_D 8
+#define MC_CMD_PCS_LINK_UP 9
+#define MC_CMD_PCS_RX_FAULT 10
+#define MC_CMD_PCS_TX_FAULT 11
+#define MC_CMD_PCS_BER 12
+#define MC_CMD_PCS_BLOCK_ERRORS 13
+#define MC_CMD_PHYXS_LINK_UP 14
+#define MC_CMD_PHYXS_RX_FAULT 15
+#define MC_CMD_PHYXS_TX_FAULT 16
+#define MC_CMD_PHYXS_ALIGN 17
+#define MC_CMD_PHYXS_SYNC 18
+#define MC_CMD_AN_LINK_UP 19
+#define MC_CMD_AN_COMPLETE 20
+#define MC_CMD_AN_10GBT_STATUS 21
+#define MC_CMD_CL22_LINK_UP 22
+#define MC_CMD_PHY_NSTATS 23
+
+/* MC_CMD_GET_PHY_CFG:
+ * Report PHY configuration.  This guarantees to succeed even if the PHY is in
+ * a "zombie" state.
+ *
+ * Locks required: None
+ * Return code: 0
+ */
+#define MC_CMD_GET_PHY_CFG 0x24
+
+#define MC_CMD_GET_PHY_CFG_IN_LEN 0
+#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
+
+#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PHY_CFG_PRESENT_LBN 0
+#define MC_CMD_GET_PHY_CFG_PRESENT_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_SHORTBIST_LBN 1
+#define MC_CMD_GET_PHY_CFG_SHORTBIST_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_LONGBIST_LBN 2
+#define MC_CMD_GET_PHY_CFG_LONGBIST_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_LOWPOWER_LBN 3
+#define MC_CMD_GET_PHY_CFG_LOWPOWER_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_POWEROFF_LBN 4
+#define MC_CMD_GET_PHY_CFG_POWEROFF_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_TXDIS_LBN 5
+#define MC_CMD_GET_PHY_CFG_TXDIS_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
+/* Bitmask of supported capabilities */
+#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
+#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
+#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
+/* PHY statistics bitmap */
+#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
+/* PHY type/name string */
+#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
+#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
+#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
+#define MC_CMD_MEDIA_XAUI 1
+#define MC_CMD_MEDIA_CX4 2
+#define MC_CMD_MEDIA_KX4 3
+#define MC_CMD_MEDIA_XFP 4
+#define MC_CMD_MEDIA_SFP_PLUS 5
+#define MC_CMD_MEDIA_BASE_T 6
+/* MDIO "MMDS" supported */
+#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
+/* Native clause 22 */
+#define MC_CMD_MMD_CLAUSE22  0
+#define MC_CMD_MMD_CLAUSE45_PMAPMD 1
+#define MC_CMD_MMD_CLAUSE45_WIS 2
+#define MC_CMD_MMD_CLAUSE45_PCS 3
+#define MC_CMD_MMD_CLAUSE45_PHYXS 4
+#define MC_CMD_MMD_CLAUSE45_DTEXS 5
+#define MC_CMD_MMD_CLAUSE45_TC 6
+#define MC_CMD_MMD_CLAUSE45_AN 7
+/* Clause22 proxied over clause45 by PHY */
+#define MC_CMD_MMD_CLAUSE45_C22EXT 29
+#define MC_CMD_MMD_CLAUSE45_VEND1 30
+#define MC_CMD_MMD_CLAUSE45_VEND2 31
+/* PHY stepping version */
+#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
+#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20
+
+/* MC_CMD_START_PHY_BIST:
+ * Start a BIST test on the PHY.
+ *
+ * Locks required: PHY_LOCK if doing a  PHY BIST
+ * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held)
+ */
+#define MC_CMD_START_BIST 0x25
+#define MC_CMD_START_BIST_IN_LEN 4
+#define MC_CMD_START_BIST_TYPE_OFST 0
+
+/* Run the PHY's short BIST */
+#define MC_CMD_PHY_BIST_SHORT  1
+/* Run the PHY's long BIST */
+#define MC_CMD_PHY_BIST_LONG   2
+/* Run BIST on the currently selected BPX Serdes (XAUI or XFI) */
+#define MC_CMD_BPX_SERDES_BIST 3
+
+/* MC_CMD_POLL_PHY_BIST: (variadic output)
+ * Poll for BIST completion
+ *
+ * Returns a single status code, and a binary blob of phy-specific
+ * bist output. If the driver can't succesfully parse the BIST output,
+ * it should still respect the Pass/Fail in OUT.RESULT.
+ *
+ * Locks required: PHY_LOCK  if doing a  PHY BIST
+ * Return code: 0, EACCES (if PHY_LOCK is not held)
+ */
+#define MC_CMD_POLL_BIST 0x26
+#define MC_CMD_POLL_BIST_IN_LEN 0
+#define MC_CMD_POLL_BIST_OUT_LEN UNKNOWN
+#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
+#define MC_CMD_POLL_BIST_RUNNING 1
+#define MC_CMD_POLL_BIST_PASSED 2
+#define MC_CMD_POLL_BIST_FAILED 3
+#define MC_CMD_POLL_BIST_TIMEOUT 4
+#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
+
+/* MC_CMD_PHY_SPI: (variadic in, variadic out)
+ * Read/Write/Erase the PHY SPI device
+ *
+ * Locks required: PHY_LOCK
+ * Return code: 0, ETIME, EINVAL, EACCES (if PHY_LOCK is not held)
+ */
+#define MC_CMD_PHY_SPI 0x27
+#define MC_CMD_PHY_SPI_IN_LEN(_write_bytes) (12 + (_write_bytes))
+#define MC_CMD_PHY_SPI_IN_ARGS_OFST 0
+#define MC_CMD_PHY_SPI_IN_ARGS_ADDR_OFST 0
+#define MC_CMD_PHY_SPI_IN_ARGS_READ_BYTES_OFST 4
+#define MC_CMD_PHY_SPI_IN_ARGS_ERASE_ALL_OFST 8
+/* Data to write here */
+#define MC_CMD_PHY_SPI_IN_WRITE_BUFFER_OFSET 12
+#define MC_CMD_PHY_SPI_OUT_LEN(_read_bytes) (_read_bytes)
+/* Data read here */
+#define MC_CMD_PHY_SPI_OUT_READ_BUFFER_OFST 0
+
+
+/* MC_CMD_GET_LOOPBACK_MODES:
+ * Returns a bitmask of loopback modes evailable at each speed.
+ *
+ * Locks required: None
+ * Return code: 0
+ */
+#define MC_CMD_GET_LOOPBACK_MODES 0x28
+#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 32
+#define MC_CMD_GET_LOOPBACK_MODES_100M_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_1G_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_10G_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_SUGGESTED_OFST 24
+
+/* Flow control enumeration */
+#define MC_CMD_FCNTL_OFF 0
+#define MC_CMD_FCNTL_RESPOND 1
+#define MC_CMD_FCNTL_BIDIR 2
+/* Auto - Use what the link has autonegotiated
+ *      - The driver should modify the advertised capabilities via SET_LINK.CAP
+ *        to control the negotiated flow control mode.
+ *      - Can only be set if the PHY supports PAUSE+ASYM capabilities
+ *      - Never returned by GET_LINK as the value programmed into the MAC
+ */
+#define MC_CMD_FCNTL_AUTO 3
+
+/* Generic mac fault bitmask */
+#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
+#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
+#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1
+#define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1
+#define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2
+#define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1
+
+/* MC_CMD_GET_LINK:
+ * Read the unified MAC/PHY link state
+ *
+ * Locks required: None
+ * Return code: 0, ETIME
+ */
+#define MC_CMD_GET_LINK 0x29
+#define MC_CMD_GET_LINK_IN_LEN 0
+#define MC_CMD_GET_LINK_OUT_LEN 28
+/* near-side and link-partner advertised capabilities */
+#define MC_CMD_GET_LINK_OUT_CAP_OFST 0
+#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
+/* Autonegotiated speed in mbit/s. The link may still be down
+ * even if this reads non-zero */
+#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
+#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
+#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16
+/* Whether we have overall link up */
+#define MC_CMD_GET_LINK_LINK_UP_LBN 0
+#define MC_CMD_GET_LINK_LINK_UP_WIDTH 1
+#define MC_CMD_GET_LINK_FULL_DUPLEX_LBN 1
+#define MC_CMD_GET_LINK_FULL_DUPLEX_WIDTH 1
+/* Whether we have link at the layers provided by the BPX */
+#define MC_CMD_GET_LINK_BPX_LINK_LBN 2
+#define MC_CMD_GET_LINK_BPX_LINK_WIDTH 1
+/* Whether the PHY has external link */
+#define MC_CMD_GET_LINK_PHY_LINK_LBN 3
+#define MC_CMD_GET_LINK_PHY_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
+#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
+
+/* MC_CMD_SET_LINK:
+ * Write the unified MAC/PHY link configuration
+ *
+ * A loopback speed of "0" is supported, and means
+ * (choose any available speed)
+ *
+ * Locks required: None
+ * Return code: 0, EINVAL, ETIME
+ */
+#define MC_CMD_SET_LINK 0x2a
+#define MC_CMD_SET_LINK_IN_LEN 16
+#define MC_CMD_SET_LINK_IN_CAP_OFST 0
+#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4
+#define MC_CMD_SET_LINK_LOWPOWER_LBN 0
+#define MC_CMD_SET_LINK_LOWPOWER_WIDTH 1
+#define MC_CMD_SET_LINK_POWEROFF_LBN 1
+#define MC_CMD_SET_LINK_POWEROFF_WIDTH 1
+#define MC_CMD_SET_LINK_TXDIS_LBN 2
+#define MC_CMD_SET_LINK_TXDIS_WIDTH 1
+#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
+#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
+#define MC_CMD_SET_LINK_OUT_LEN 0
+
+/* MC_CMD_SET_ID_LED:
+ * Set indentification LED state
+ *
+ * Locks required: None
+ * Return code: 0, EINVAL
+ */
+#define MC_CMD_SET_ID_LED 0x2b
+#define MC_CMD_SET_ID_LED_IN_LEN 4
+#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0
+#define  MC_CMD_LED_OFF 0
+#define  MC_CMD_LED_ON 1
+#define  MC_CMD_LED_DEFAULT 2
+#define MC_CMD_SET_ID_LED_OUT_LEN 0
+
+/* MC_CMD_SET_MAC:
+ * Set MAC configuration
+ *
+ * The MTU is the MTU programmed directly into the XMAC/GMAC
+ * (inclusive of EtherII, VLAN, bug16011 padding)
+ *
+ * Locks required: None
+ * Return code: 0, EINVAL
+ */
+#define MC_CMD_SET_MAC 0x2c
+#define MC_CMD_SET_MAC_IN_LEN 24
+#define MC_CMD_SET_MAC_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
+#define MC_CMD_SET_MAC_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0
+#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1
+#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
+#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
+#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
+#define MC_CMD_SET_MAC_OUT_LEN 0
+
+/* MC_CMD_PHY_STATS:
+ * Get generic PHY statistics
+ *
+ * This call returns the statistics for a generic PHY, by direct DMA
+ * into host memory, in a sparse array (indexed by the enumerate).
+ * Each value is represented by a 32bit number.
+ *
+ * Locks required: None
+ * Returns: 0, ETIME
+ * Response methods: shared memory, event
+ */
+#define MC_CMD_PHY_STATS 0x2d
+#define MC_CMD_PHY_STATS_IN_LEN 8
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_PHY_STATS_OUT_LEN 0
+
+/* Unified MAC statistics enumeration */
+#define MC_CMD_MAC_GENERATION_START 0
+#define MC_CMD_MAC_TX_PKTS 1
+#define MC_CMD_MAC_TX_PAUSE_PKTS 2
+#define MC_CMD_MAC_TX_CONTROL_PKTS 3
+#define MC_CMD_MAC_TX_UNICAST_PKTS 4
+#define MC_CMD_MAC_TX_MULTICAST_PKTS 5
+#define MC_CMD_MAC_TX_BROADCAST_PKTS 6
+#define MC_CMD_MAC_TX_BYTES 7
+#define MC_CMD_MAC_TX_BAD_BYTES 8
+#define MC_CMD_MAC_TX_LT64_PKTS 9
+#define MC_CMD_MAC_TX_64_PKTS 10
+#define MC_CMD_MAC_TX_65_TO_127_PKTS 11
+#define MC_CMD_MAC_TX_128_TO_255_PKTS 12
+#define MC_CMD_MAC_TX_256_TO_511_PKTS 13
+#define MC_CMD_MAC_TX_512_TO_1023_PKTS 14
+#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 15
+#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 16
+#define MC_CMD_MAC_TX_GTJUMBO_PKTS 17
+#define MC_CMD_MAC_TX_BAD_FCS_PKTS 18
+#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 19
+#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 20
+#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 21
+#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 22
+#define MC_CMD_MAC_TX_DEFERRED_PKTS 23
+#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 24
+#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 25
+#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 26
+#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 27
+#define MC_CMD_MAC_RX_PKTS 28
+#define MC_CMD_MAC_RX_PAUSE_PKTS 29
+#define MC_CMD_MAC_RX_GOOD_PKTS 30
+#define MC_CMD_MAC_RX_CONTROL_PKTS 31
+#define MC_CMD_MAC_RX_UNICAST_PKTS 32
+#define MC_CMD_MAC_RX_MULTICAST_PKTS 33
+#define MC_CMD_MAC_RX_BROADCAST_PKTS 34
+#define MC_CMD_MAC_RX_BYTES 35
+#define MC_CMD_MAC_RX_BAD_BYTES 36
+#define MC_CMD_MAC_RX_64_PKTS 37
+#define MC_CMD_MAC_RX_65_TO_127_PKTS 38
+#define MC_CMD_MAC_RX_128_TO_255_PKTS 39
+#define MC_CMD_MAC_RX_256_TO_511_PKTS 40
+#define MC_CMD_MAC_RX_512_TO_1023_PKTS 41
+#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 42
+#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 43
+#define MC_CMD_MAC_RX_GTJUMBO_PKTS 44
+#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 45
+#define MC_CMD_MAC_RX_BAD_FCS_PKTS 46
+#define MC_CMD_MAC_RX_OVERFLOW_PKTS 47
+#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 48
+#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 49
+#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 50
+#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 51
+#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 52
+#define MC_CMD_MAC_RX_JABBER_PKTS 53
+#define MC_CMD_MAC_RX_NODESC_DROPS 54
+#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 55
+#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 56
+#define MC_CMD_MAC_RX_LANES01_DISP_ERR 57
+#define MC_CMD_MAC_RX_LANES23_DISP_ERR 58
+#define MC_CMD_MAC_RX_MATCH_FAULT 59
+/* Insert new members here. */
+#define MC_CMD_MAC_GENERATION_END 60
+#define MC_CMD_MAC_NSTATS (MC_CMD_MAC_GENERATION_END+1)
+
+/* MC_CMD_MAC_STATS:
+ * Get unified GMAC/XMAC statistics
+ *
+ * This call returns unified statistics maintained by the MC as it
+ * switches between the GMAC and XMAC. The MC will write out all
+ * supported stats.  The driver should zero initialise the buffer to
+ * guarantee consistent results.
+ *
+ * Locks required: None
+ * Returns: 0
+ * Response methods: shared memory, event
+ */
+#define MC_CMD_MAC_STATS 0x2e
+#define MC_CMD_MAC_STATS_IN_LEN 16
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_MAC_STATS_IN_CMD_OFST 8
+#define MC_CMD_MAC_STATS_CMD_DMA_LBN 0
+#define MC_CMD_MAC_STATS_CMD_DMA_WIDTH 1
+#define MC_CMD_MAC_STATS_CMD_CLEAR_LBN 1
+#define MC_CMD_MAC_STATS_CMD_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_WIDTH 1
+/* Fields only relevent when PERIODIC_CHANGE is set */
+#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_LBN 4
+#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_LBN 16
+#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_WIDTH 16
+#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+
+#define MC_CMD_MAC_STATS_OUT_LEN 0
+
+/* Callisto flags */
+#define MC_CMD_SFT9001_ROBUST_LBN 0
+#define MC_CMD_SFT9001_ROBUST_WIDTH 1
+#define MC_CMD_SFT9001_SHORT_REACH_LBN 1
+#define MC_CMD_SFT9001_SHORT_REACH_WIDTH 1
+
+/* MC_CMD_SFT9001_GET:
+ * Read current callisto specific setting
+ *
+ * Locks required: None
+ * Returns: 0, ETIME
+ */
+#define MC_CMD_SFT9001_GET 0x30
+#define MC_CMD_SFT9001_GET_IN_LEN 0
+#define MC_CMD_SFT9001_GET_OUT_LEN 4
+#define MC_CMD_SFT9001_GET_OUT_FLAGS_OFST 0
+
+/* MC_CMD_SFT9001_SET:
+ * Write current callisto specific setting
+ *
+ * Locks required: None
+ * Returns: 0, ETIME, EINVAL
+ */
+#define MC_CMD_SFT9001_SET 0x31
+#define MC_CMD_SFT9001_SET_IN_LEN 4
+#define MC_CMD_SFT9001_SET_IN_FLAGS_OFST 0
+#define MC_CMD_SFT9001_SET_OUT_LEN 0
+
+
+/* MC_CMD_WOL_FILTER_SET:
+ * Set a WoL filter
+ *
+ * Locks required: None
+ * Returns: 0, EBUSY, EINVAL, ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_SET 0x32
+#define MC_CMD_WOL_FILTER_SET_IN_LEN 192 /* 190 rounded up to a word */
+#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
+#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
+
+/* There is a union at offset 8, following defines overlap due to
+ * this */
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8
+
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST		\
+	MC_CMD_WOL_FILTER_SET_IN_DATA_OFST
+
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST   \
+	MC_CMD_WOL_FILTER_SET_IN_DATA_OFST
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST   \
+	(MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 4)
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST \
+	(MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 8)
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST \
+	(MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 10)
+
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST   \
+	MC_CMD_WOL_FILTER_SET_IN_DATA_OFST
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST   \
+	(MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 16)
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_OFST \
+	(MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 32)
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_OFST \
+	(MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 34)
+
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST	\
+	MC_CMD_WOL_FILTER_SET_IN_DATA_OFST
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_OFST		\
+	(MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 48)
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_OFST	\
+	(MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 176)
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_OFST	\
+	(MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 177)
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST	\
+	(MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 178)
+
+#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4
+#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
+
+/* WOL Filter types enumeration */
+#define MC_CMD_WOL_TYPE_MAGIC      0x0
+			 /* unused 0x1 */
+#define MC_CMD_WOL_TYPE_WIN_MAGIC  0x2
+#define MC_CMD_WOL_TYPE_IPV4_SYN   0x3
+#define MC_CMD_WOL_TYPE_IPV6_SYN   0x4
+#define MC_CMD_WOL_TYPE_BITMAP     0x5
+#define MC_CMD_WOL_TYPE_MAX        0x6
+
+#define MC_CMD_FILTER_MODE_SIMPLE     0x0
+#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff
+
+/* MC_CMD_WOL_FILTER_REMOVE:
+ * Remove a WoL filter
+ *
+ * Locks required: None
+ * Returns: 0, EINVAL, ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_REMOVE 0x33
+#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
+#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
+#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
+
+
+/* MC_CMD_WOL_FILTER_RESET:
+ * Reset (i.e. remove all) WoL filters
+ *
+ * Locks required: None
+ * Returns: 0, ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_RESET 0x34
+#define MC_CMD_WOL_FILTER_RESET_IN_LEN 0
+#define MC_CMD_WOL_FILTER_RESET_OUT_LEN 0
+
+/* MC_CMD_SET_MCAST_HASH:
+ * Set the MCASH hash value without otherwise
+ * reconfiguring the MAC
+ */
+#define MC_CMD_SET_MCAST_HASH 0x35
+#define MC_CMD_SET_MCAST_HASH_IN_LEN 32
+#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0
+#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16
+#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0
+
+/* MC_CMD_NVRAM_TYPES:
+ * Return bitfield indicating available types of virtual NVRAM partitions
+ *
+ * Locks required: none
+ * Returns: 0
+ */
+#define MC_CMD_NVRAM_TYPES 0x36
+#define MC_CMD_NVRAM_TYPES_IN_LEN 0
+#define MC_CMD_NVRAM_TYPES_OUT_LEN 4
+#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
+
+/* Supported NVRAM types */
+#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0
+#define MC_CMD_NVRAM_TYPE_MC_FW 1
+#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 2
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 3
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 4
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 5
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 6
+#define MC_CMD_NVRAM_TYPE_EXP_ROM 7
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 8
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 9
+#define MC_CMD_NVRAM_TYPE_PHY_PORT0 10
+#define MC_CMD_NVRAM_TYPE_PHY_PORT1 11
+#define MC_CMD_NVRAM_TYPE_LOG 12
+
+/* MC_CMD_NVRAM_INFO:
+ * Read info about a virtual NVRAM partition
+ *
+ * Locks required: none
+ * Returns: 0, EINVAL (bad type)
+ */
+#define MC_CMD_NVRAM_INFO 0x37
+#define MC_CMD_NVRAM_INFO_IN_LEN 4
+#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_OUT_LEN 24
+#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
+#define   MC_CMD_NVRAM_PROTECTED_LBN 0
+#define   MC_CMD_NVRAM_PROTECTED_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
+
+/* MC_CMD_NVRAM_UPDATE_START:
+ * Start a group of update operations on a virtual NVRAM partition
+ *
+ * Locks required: PHY_LOCK if type==*PHY*
+ * Returns: 0, EINVAL (bad type), EACCES (if PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_UPDATE_START 0x38
+#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
+#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_START_OUT_LEN 0
+
+/* MC_CMD_NVRAM_READ:
+ * Read data from a virtual NVRAM partition
+ *
+ * Locks required: PHY_LOCK if type==*PHY*
+ * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_READ 0x39
+#define MC_CMD_NVRAM_READ_IN_LEN 12
+#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_READ_OUT_LEN(_read_bytes) (_read_bytes)
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0
+
+/* MC_CMD_NVRAM_WRITE:
+ * Write data to a virtual NVRAM partition
+ *
+ * Locks required: PHY_LOCK if type==*PHY*
+ * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_WRITE 0x3a
+#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
+#define MC_CMD_NVRAM_WRITE_IN_LEN(_write_bytes) (12 + _write_bytes)
+#define MC_CMD_NVRAM_WRITE_OUT_LEN 0
+
+/* MC_CMD_NVRAM_ERASE:
+ * Erase sector(s) from a virtual NVRAM partition
+ *
+ * Locks required: PHY_LOCK if type==*PHY*
+ * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_ERASE 0x3b
+#define MC_CMD_NVRAM_ERASE_IN_LEN 12
+#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_ERASE_OUT_LEN 0
+
+/* MC_CMD_NVRAM_UPDATE_FINISH:
+ * Finish a group of update operations on a virtual NVRAM partition
+ *
+ * Locks required: PHY_LOCK if type==*PHY*
+ * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0
+
+/* MC_CMD_REBOOT:
+ * Reboot the MC. The AFTER_ASSERTION flag is intended to be used
+ * when the driver notices an assertion failure, to allow two ports to
+ * both recover (semi-)gracefully.
+ *
+ * Locks required: NONE
+ * Returns: Nothing. You get back a response with ERR=1, DATALEN=0
+ */
+#define MC_CMD_REBOOT 0x3d
+#define MC_CMD_REBOOT_IN_LEN 4
+#define MC_CMD_REBOOT_IN_FLAGS_OFST 0
+#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 1
+#define MC_CMD_REBOOT_OUT_LEN 0
+
+/* MC_CMD_SCHEDINFO:
+ * Request scheduler info. from the MC.
+ *
+ * Locks required: NONE
+ * Returns: An array of (timeslice,maximum overrun), one for each thread,
+ * in ascending order of thread address.s
+ */
+#define MC_CMD_SCHEDINFO 0x3e
+#define MC_CMD_SCHEDINFO_IN_LEN 0
+
+
+/* MC_CMD_SET_REBOOT_MODE: (debug)
+ * Set the mode for the next MC reboot.
+ *
+ * Locks required: NONE
+ *
+ * Sets the reboot mode to the specified value.  Returns the old mode.
+ */
+#define MC_CMD_REBOOT_MODE 0x3f
+#define MC_CMD_REBOOT_MODE_IN_LEN 4
+#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
+#define MC_CMD_REBOOT_MODE_OUT_LEN 4
+#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
+#define   MC_CMD_REBOOT_MODE_NORMAL 0
+#define   MC_CMD_REBOOT_MODE_SNAPPER 3
+
+/* MC_CMD_DEBUG_LOG:
+ * Null request/response command (debug)
+ * - sequence number is always zero
+ * - only supported on the UART interface
+ * (the same set of bytes is delivered as an
+ * event over PCI)
+ */
+#define MC_CMD_DEBUG_LOG 0x40
+#define MC_CMD_DEBUG_LOG_IN_LEN 0
+#define MC_CMD_DEBUG_LOG_OUT_LEN 0
+
+/* Generic sensor enumeration. Note that a dual port NIC
+ * will EITHER expose PHY_COMMON_TEMP OR PHY0_TEMP and
+ * PHY1_TEMP depending on whether there is a single sensor
+ * in the vicinity of the two port, or one per port.
+ */
+#define MC_CMD_SENSOR_CONTROLLER_TEMP 0		/* degC */
+#define MC_CMD_SENSOR_PHY_COMMON_TEMP 1		/* degC */
+#define MC_CMD_SENSOR_CONTROLLER_COOLING 2	/* bool */
+#define MC_CMD_SENSOR_PHY0_TEMP 3		/* degC */
+#define MC_CMD_SENSOR_PHY0_COOLING 4		/* bool */
+#define MC_CMD_SENSOR_PHY1_TEMP 5		/* degC */
+#define MC_CMD_SENSOR_PHY1_COOLING 6		/* bool */
+#define MC_CMD_SENSOR_IN_1V0 7			/* mV */
+#define MC_CMD_SENSOR_IN_1V2 8			/* mV */
+#define MC_CMD_SENSOR_IN_1V8 9			/* mV */
+#define MC_CMD_SENSOR_IN_2V5 10			/* mV */
+#define MC_CMD_SENSOR_IN_3V3 11			/* mV */
+#define MC_CMD_SENSOR_IN_12V0 12		/* mV */
+
+
+/* Sensor state */
+#define MC_CMD_SENSOR_STATE_OK 0
+#define MC_CMD_SENSOR_STATE_WARNING 1
+#define MC_CMD_SENSOR_STATE_FATAL 2
+#define MC_CMD_SENSOR_STATE_BROKEN 3
+
+/* MC_CMD_SENSOR_INFO:
+ * Returns information about every available sensor.
+ *
+ * Each sensor has a single (16bit) value, and a corresponding state.
+ * The mapping between value and sensor is nominally determined by the
+ * MC, but in practise is implemented as zero (BROKEN), one (TEMPERATURE),
+ * or two (VOLTAGE) ranges per sensor per state.
+ *
+ * This call returns a mask (32bit) of the sensors that are supported
+ * by this platform, then an array (indexed by MC_CMD_SENSOR) of byte
+ * offsets to the per-sensor arrays. Each sensor array has four 16bit
+ * numbers, min1, max1, min2, max2.
+ *
+ * Locks required: None
+ * Returns: 0
+ */
+#define MC_CMD_SENSOR_INFO 0x41
+#define MC_CMD_SENSOR_INFO_IN_LEN 0
+#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
+#define MC_CMD_SENSOR_INFO_OUT_OFFSET_OFST(_x) \
+	(4 + (_x))
+#define MC_CMD_SENSOR_INFO_OUT_MIN1_OFST(_ofst) \
+	((_ofst) + 0)
+#define MC_CMD_SENSOR_INFO_OUT_MAX1_OFST(_ofst) \
+	((_ofst) + 2)
+#define MC_CMD_SENSOR_INFO_OUT_MIN2_OFST(_ofst) \
+	((_ofst) + 4)
+#define MC_CMD_SENSOR_INFO_OUT_MAX2_OFST(_ofst) \
+	((_ofst) + 6)
+
+/* MC_CMD_READ_SENSORS
+ * Returns the current (value, state) for each sensor
+ *
+ * Returns the current (value, state) [each 16bit] of each sensor supported by
+ * this board, by DMA'ing a sparse array (indexed by the sensor type) into host
+ * memory.
+ *
+ * The MC will send a SENSOREVT event every time any sensor changes state. The
+ * driver is responsible for ensuring that it doesn't miss any events. The board
+ * will function normally if all sensors are in STATE_OK or state_WARNING.
+ * Otherwise the board should not be expected to function.
+ */
+#define MC_CMD_READ_SENSORS 0x42
+#define MC_CMD_READ_SENSORS_IN_LEN 8
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_READ_SENSORS_OUT_LEN 0
+
+
+/* MC_CMD_GET_PHY_STATE:
+ * Report current state of PHY.  A "zombie" PHY is a PHY that has failed to
+ * boot (e.g. due to missing or corrupted firmware).
+ *
+ * Locks required: None
+ * Return code: 0
+ */
+#define MC_CMD_GET_PHY_STATE 0x43
+
+#define MC_CMD_GET_PHY_STATE_IN_LEN 0
+#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
+#define MC_CMD_GET_PHY_STATE_STATE_OFST 0
+/* PHY state enumeration: */
+#define MC_CMD_PHY_STATE_OK 1
+#define MC_CMD_PHY_STATE_ZOMBIE 2
+
+
+/* 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to
+ * disable 802.Qbb for a given priority. */
+#define MC_CMD_SETUP_8021QBB 0x44
+#define MC_CMD_SETUP_8021QBB_IN_LEN 32
+#define MC_CMD_SETUP_8021QBB_OUT_LEN 0
+#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFFST 0
+
+
+/* MC_CMD_WOL_FILTER_GET:
+ * Retrieve ID of any WoL filters
+ *
+ * Locks required: None
+ * Returns: 0, ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_GET 0x45
+#define MC_CMD_WOL_FILTER_GET_IN_LEN 0
+#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4
+#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
+
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD:
+ * Offload a protocol to NIC for lights-out state
+ *
+ * Locks required: None
+ * Returns: 0, ENOSYS
+ */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
+
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN 16
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+
+/* There is a union at offset 4, following defines overlap due to
+ * this */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARPMAC_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARPIP_OFST 10
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NSMAC_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NSSNIPV6_OFST 10
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NSIPV6_OFST 26
+
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
+
+
+/* MC_CMD_REMOVE_LIGHTSOUT_PROTOCOL_OFFLOAD:
+ * Offload a protocol to NIC for lights-out state
+ *
+ * Locks required: None
+ * Returns: 0, ENOSYS
+ */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
+
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
+
+/* Lights-out offload protocols enumeration */
+#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1
+#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS  0x2
+
+
+/* MC_CMD_MAC_RESET_RESTORE:
+ * Restore MAC after block reset
+ *
+ * Locks required: None
+ * Returns: 0
+ */
+
+#define MC_CMD_MAC_RESET_RESTORE 0x48
+#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
+#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
+
+#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
new file mode 100644
index 000000000000..0e1bcc5a0d52
--- /dev/null
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -0,0 +1,597 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2009 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+/*
+ * Driver for PHY related operations via MCDI.
+ */
+
+#include "efx.h"
+#include "phy.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+#include "mdio_10g.h"
+
+struct efx_mcdi_phy_cfg {
+	u32 flags;
+	u32 type;
+	u32 supported_cap;
+	u32 channel;
+	u32 port;
+	u32 stats_mask;
+	u8 name[20];
+	u32 media;
+	u32 mmd_mask;
+	u8 revision[20];
+	u32 forced_cap;
+};
+
+static int
+efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_cfg *cfg)
+{
+	u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN];
+	size_t outlen;
+	int rc;
+
+	BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0);
+	BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name));
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0,
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		goto fail;
+
+	if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) {
+		rc = -EMSGSIZE;
+		goto fail;
+	}
+
+	cfg->flags = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_FLAGS);
+	cfg->type = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_TYPE);
+	cfg->supported_cap =
+		MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_SUPPORTED_CAP);
+	cfg->channel = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_CHANNEL);
+	cfg->port = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_PRT);
+	cfg->stats_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_STATS_MASK);
+	memcpy(cfg->name, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_NAME),
+	       sizeof(cfg->name));
+	cfg->media = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MEDIA_TYPE);
+	cfg->mmd_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MMD_MASK);
+	memcpy(cfg->revision, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_REVISION),
+	       sizeof(cfg->revision));
+
+	return 0;
+
+fail:
+	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
+			     u32 flags, u32 loopback_mode,
+			     u32 loopback_speed)
+{
+	u8 inbuf[MC_CMD_SET_LINK_IN_LEN];
+	int rc;
+
+	BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0);
+
+	MCDI_SET_DWORD(inbuf, SET_LINK_IN_CAP, capabilities);
+	MCDI_SET_DWORD(inbuf, SET_LINK_IN_FLAGS, flags);
+	MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode);
+	MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf),
+			  NULL, 0, NULL);
+	if (rc)
+		goto fail;
+
+	return 0;
+
+fail:
+	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
+{
+	u8 outbuf[MC_CMD_GET_LOOPBACK_MODES_OUT_LEN];
+	size_t outlen;
+	int rc;
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0,
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		goto fail;
+
+	if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) {
+		rc = -EMSGSIZE;
+		goto fail;
+	}
+
+	*loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_SUGGESTED);
+
+	return 0;
+
+fail:
+	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
+			 unsigned int prtad, unsigned int devad, u16 addr,
+			 u16 *value_out, u32 *status_out)
+{
+	u8 inbuf[MC_CMD_MDIO_READ_IN_LEN];
+	u8 outbuf[MC_CMD_MDIO_READ_OUT_LEN];
+	size_t outlen;
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, bus);
+	MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad);
+	MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad);
+	MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf),
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		goto fail;
+
+	*value_out = (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
+	*status_out = MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS);
+	return 0;
+
+fail:
+	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus,
+			  unsigned int prtad, unsigned int devad, u16 addr,
+			  u16 value, u32 *status_out)
+{
+	u8 inbuf[MC_CMD_MDIO_WRITE_IN_LEN];
+	u8 outbuf[MC_CMD_MDIO_WRITE_OUT_LEN];
+	size_t outlen;
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, bus);
+	MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad);
+	MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad);
+	MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr);
+	MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf),
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		goto fail;
+
+	*status_out = MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS);
+	return 0;
+
+fail:
+	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
+{
+	u32 result = 0;
+
+	switch (media) {
+	case MC_CMD_MEDIA_KX4:
+		result |= SUPPORTED_Backplane;
+		if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+			result |= SUPPORTED_1000baseKX_Full;
+		if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+			result |= SUPPORTED_10000baseKX4_Full;
+		break;
+
+	case MC_CMD_MEDIA_XFP:
+	case MC_CMD_MEDIA_SFP_PLUS:
+		result |= SUPPORTED_FIBRE;
+		break;
+
+	case MC_CMD_MEDIA_BASE_T:
+		result |= SUPPORTED_TP;
+		if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
+			result |= SUPPORTED_10baseT_Half;
+		if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
+			result |= SUPPORTED_10baseT_Full;
+		if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
+			result |= SUPPORTED_100baseT_Half;
+		if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
+			result |= SUPPORTED_100baseT_Full;
+		if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
+			result |= SUPPORTED_1000baseT_Half;
+		if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+			result |= SUPPORTED_1000baseT_Full;
+		if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+			result |= SUPPORTED_10000baseT_Full;
+		break;
+	}
+
+	if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+		result |= SUPPORTED_Pause;
+	if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+		result |= SUPPORTED_Asym_Pause;
+	if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+		result |= SUPPORTED_Autoneg;
+
+	return result;
+}
+
+static u32 ethtool_to_mcdi_cap(u32 cap)
+{
+	u32 result = 0;
+
+	if (cap & SUPPORTED_10baseT_Half)
+		result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN);
+	if (cap & SUPPORTED_10baseT_Full)
+		result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN);
+	if (cap & SUPPORTED_100baseT_Half)
+		result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN);
+	if (cap & SUPPORTED_100baseT_Full)
+		result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
+	if (cap & SUPPORTED_1000baseT_Half)
+		result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
+	if (cap & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseKX_Full))
+		result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
+	if (cap & (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full))
+		result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
+	if (cap & SUPPORTED_Pause)
+		result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN);
+	if (cap & SUPPORTED_Asym_Pause)
+		result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN);
+	if (cap & SUPPORTED_Autoneg)
+		result |= (1 << MC_CMD_PHY_CAP_AN_LBN);
+
+	return result;
+}
+
+static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
+{
+	struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+	enum efx_phy_mode mode, supported;
+	u32 flags;
+
+	/* TODO: Advertise the capabilities supported by this PHY */
+	supported = 0;
+	if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_TXDIS_LBN))
+		supported |= PHY_MODE_TX_DISABLED;
+	if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_LOWPOWER_LBN))
+		supported |= PHY_MODE_LOW_POWER;
+	if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_POWEROFF_LBN))
+		supported |= PHY_MODE_OFF;
+
+	mode = efx->phy_mode & supported;
+
+	flags = 0;
+	if (mode & PHY_MODE_TX_DISABLED)
+		flags |= (1 << MC_CMD_SET_LINK_TXDIS_LBN);
+	if (mode & PHY_MODE_LOW_POWER)
+		flags |= (1 << MC_CMD_SET_LINK_LOWPOWER_LBN);
+	if (mode & PHY_MODE_OFF)
+		flags |= (1 << MC_CMD_SET_LINK_POWEROFF_LBN);
+
+	return flags;
+}
+
+static u32 mcdi_to_ethtool_media(u32 media)
+{
+	switch (media) {
+	case MC_CMD_MEDIA_XAUI:
+	case MC_CMD_MEDIA_CX4:
+	case MC_CMD_MEDIA_KX4:
+		return PORT_OTHER;
+
+	case MC_CMD_MEDIA_XFP:
+	case MC_CMD_MEDIA_SFP_PLUS:
+		return PORT_FIBRE;
+
+	case MC_CMD_MEDIA_BASE_T:
+		return PORT_TP;
+
+	default:
+		return PORT_OTHER;
+	}
+}
+
+static int efx_mcdi_phy_probe(struct efx_nic *efx)
+{
+	struct efx_mcdi_phy_cfg *phy_cfg;
+	int rc;
+
+	/* TODO: Move phy_data initialisation to
+	 * phy_op->probe/remove, rather than init/fini */
+	phy_cfg = kzalloc(sizeof(*phy_cfg), GFP_KERNEL);
+	if (phy_cfg == NULL) {
+		rc = -ENOMEM;
+		goto fail_alloc;
+	}
+	rc = efx_mcdi_get_phy_cfg(efx, phy_cfg);
+	if (rc != 0)
+		goto fail;
+
+	efx->phy_type = phy_cfg->type;
+
+	efx->mdio_bus = phy_cfg->channel;
+	efx->mdio.prtad = phy_cfg->port;
+	efx->mdio.mmds = phy_cfg->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
+	efx->mdio.mode_support = 0;
+	if (phy_cfg->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
+		efx->mdio.mode_support |= MDIO_SUPPORTS_C22;
+	if (phy_cfg->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
+		efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+
+	/* Assert that we can map efx -> mcdi loopback modes */
+	BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE);
+	BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA);
+	BUILD_BUG_ON(LOOPBACK_GMAC != MC_CMD_LOOPBACK_GMAC);
+	BUILD_BUG_ON(LOOPBACK_XGMII != MC_CMD_LOOPBACK_XGMII);
+	BUILD_BUG_ON(LOOPBACK_XGXS != MC_CMD_LOOPBACK_XGXS);
+	BUILD_BUG_ON(LOOPBACK_XAUI != MC_CMD_LOOPBACK_XAUI);
+	BUILD_BUG_ON(LOOPBACK_GMII != MC_CMD_LOOPBACK_GMII);
+	BUILD_BUG_ON(LOOPBACK_SGMII != MC_CMD_LOOPBACK_SGMII);
+	BUILD_BUG_ON(LOOPBACK_XGBR != MC_CMD_LOOPBACK_XGBR);
+	BUILD_BUG_ON(LOOPBACK_XFI != MC_CMD_LOOPBACK_XFI);
+	BUILD_BUG_ON(LOOPBACK_XAUI_FAR != MC_CMD_LOOPBACK_XAUI_FAR);
+	BUILD_BUG_ON(LOOPBACK_GMII_FAR != MC_CMD_LOOPBACK_GMII_FAR);
+	BUILD_BUG_ON(LOOPBACK_SGMII_FAR != MC_CMD_LOOPBACK_SGMII_FAR);
+	BUILD_BUG_ON(LOOPBACK_XFI_FAR != MC_CMD_LOOPBACK_XFI_FAR);
+	BUILD_BUG_ON(LOOPBACK_GPHY != MC_CMD_LOOPBACK_GPHY);
+	BUILD_BUG_ON(LOOPBACK_PHYXS != MC_CMD_LOOPBACK_PHYXS);
+	BUILD_BUG_ON(LOOPBACK_PCS != MC_CMD_LOOPBACK_PCS);
+	BUILD_BUG_ON(LOOPBACK_PMAPMD != MC_CMD_LOOPBACK_PMAPMD);
+	BUILD_BUG_ON(LOOPBACK_XPORT != MC_CMD_LOOPBACK_XPORT);
+	BUILD_BUG_ON(LOOPBACK_XGMII_WS != MC_CMD_LOOPBACK_XGMII_WS);
+	BUILD_BUG_ON(LOOPBACK_XAUI_WS != MC_CMD_LOOPBACK_XAUI_WS);
+	BUILD_BUG_ON(LOOPBACK_XAUI_WS_FAR != MC_CMD_LOOPBACK_XAUI_WS_FAR);
+	BUILD_BUG_ON(LOOPBACK_XAUI_WS_NEAR != MC_CMD_LOOPBACK_XAUI_WS_NEAR);
+	BUILD_BUG_ON(LOOPBACK_GMII_WS != MC_CMD_LOOPBACK_GMII_WS);
+	BUILD_BUG_ON(LOOPBACK_XFI_WS != MC_CMD_LOOPBACK_XFI_WS);
+	BUILD_BUG_ON(LOOPBACK_XFI_WS_FAR != MC_CMD_LOOPBACK_XFI_WS_FAR);
+	BUILD_BUG_ON(LOOPBACK_PHYXS_WS != MC_CMD_LOOPBACK_PHYXS_WS);
+
+	rc = efx_mcdi_loopback_modes(efx, &efx->loopback_modes);
+	if (rc != 0)
+		goto fail;
+	/* The MC indicates that LOOPBACK_NONE is a valid loopback mode,
+	 * but by convention we don't */
+	efx->loopback_modes &= ~(1 << LOOPBACK_NONE);
+
+	kfree(phy_cfg);
+
+	return 0;
+
+fail:
+	kfree(phy_cfg);
+fail_alloc:
+	return rc;
+}
+
+static int efx_mcdi_phy_init(struct efx_nic *efx)
+{
+	struct efx_mcdi_phy_cfg *phy_data;
+	u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
+	u32 caps;
+	int rc;
+
+	phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
+	if (phy_data == NULL)
+		return -ENOMEM;
+
+	rc = efx_mcdi_get_phy_cfg(efx, phy_data);
+	if (rc != 0)
+		goto fail;
+
+	efx->phy_data = phy_data;
+
+	BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+	rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+			  outbuf, sizeof(outbuf), NULL);
+	if (rc)
+		goto fail;
+
+	caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
+	if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
+		efx->link_advertising =
+			mcdi_to_ethtool_cap(phy_data->media, caps);
+	else
+		phy_data->forced_cap = caps;
+
+	return 0;
+
+fail:
+	kfree(phy_data);
+	return rc;
+}
+
+int efx_mcdi_phy_reconfigure(struct efx_nic *efx)
+{
+	struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+	u32 caps = (efx->link_advertising ?
+		    ethtool_to_mcdi_cap(efx->link_advertising) :
+		    phy_cfg->forced_cap);
+
+	return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
+				 efx->loopback_mode, 0);
+}
+
+void efx_mcdi_phy_decode_link(struct efx_nic *efx,
+			      struct efx_link_state *link_state,
+			      u32 speed, u32 flags, u32 fcntl)
+{
+	switch (fcntl) {
+	case MC_CMD_FCNTL_AUTO:
+		WARN_ON(1);	/* This is not a link mode */
+		link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
+		break;
+	case MC_CMD_FCNTL_BIDIR:
+		link_state->fc = EFX_FC_TX | EFX_FC_RX;
+		break;
+	case MC_CMD_FCNTL_RESPOND:
+		link_state->fc = EFX_FC_RX;
+		break;
+	default:
+		WARN_ON(1);
+	case MC_CMD_FCNTL_OFF:
+		link_state->fc = 0;
+		break;
+	}
+
+	link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_LINK_UP_LBN));
+	link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_FULL_DUPLEX_LBN));
+	link_state->speed = speed;
+}
+
+/* Verify that the forced flow control settings (!EFX_FC_AUTO) are
+ * supported by the link partner. Warn the user if this isn't the case
+ */
+void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
+{
+	struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+	u32 rmtadv;
+
+	/* The link partner capabilities are only relevent if the
+	 * link supports flow control autonegotiation */
+	if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+		return;
+
+	/* If flow control autoneg is supported and enabled, then fine */
+	if (efx->wanted_fc & EFX_FC_AUTO)
+		return;
+
+	rmtadv = 0;
+	if (lpa & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+		rmtadv |= ADVERTISED_Pause;
+	if (lpa & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+		rmtadv |=  ADVERTISED_Asym_Pause;
+
+	if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause)
+		EFX_ERR(efx, "warning: link partner doesn't support "
+			"pause frames");
+}
+
+static bool efx_mcdi_phy_poll(struct efx_nic *efx)
+{
+	struct efx_link_state old_state = efx->link_state;
+	u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
+	int rc;
+
+	WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+	BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+			  outbuf, sizeof(outbuf), NULL);
+	if (rc) {
+		EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+		efx->link_state.up = false;
+	} else {
+		efx_mcdi_phy_decode_link(
+			efx, &efx->link_state,
+			MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
+			MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
+			MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
+	}
+
+	return !efx_link_state_equal(&efx->link_state, &old_state);
+}
+
+static void efx_mcdi_phy_fini(struct efx_nic *efx)
+{
+	struct efx_mcdi_phy_data *phy_data = efx->phy_data;
+
+	efx->phy_data = NULL;
+	kfree(phy_data);
+}
+
+static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+{
+	struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+	u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
+	int rc;
+
+	ecmd->supported =
+		mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap);
+	ecmd->advertising = efx->link_advertising;
+	ecmd->speed = efx->link_state.speed;
+	ecmd->duplex = efx->link_state.fd;
+	ecmd->port = mcdi_to_ethtool_media(phy_cfg->media);
+	ecmd->phy_address = phy_cfg->port;
+	ecmd->transceiver = XCVR_INTERNAL;
+	ecmd->autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg);
+	ecmd->mdio_support = (efx->mdio.mode_support &
+			      (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
+
+	BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+	rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+			  outbuf, sizeof(outbuf), NULL);
+	if (rc) {
+		EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+		return;
+	}
+	ecmd->lp_advertising =
+		mcdi_to_ethtool_cap(phy_cfg->media,
+				    MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP));
+}
+
+static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+{
+	struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+	u32 caps;
+	int rc;
+
+	if (ecmd->autoneg) {
+		caps = (ethtool_to_mcdi_cap(ecmd->advertising) |
+			 1 << MC_CMD_PHY_CAP_AN_LBN);
+	} else if (ecmd->duplex) {
+		switch (ecmd->speed) {
+		case 10:    caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN;    break;
+		case 100:   caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN;   break;
+		case 1000:  caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN;  break;
+		case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
+		default:    return -EINVAL;
+		}
+	} else {
+		switch (ecmd->speed) {
+		case 10:    caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN;    break;
+		case 100:   caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN;   break;
+		case 1000:  caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN;  break;
+		default:    return -EINVAL;
+		}
+	}
+
+	rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
+			       efx->loopback_mode, 0);
+	if (rc)
+		return rc;
+
+	if (ecmd->autoneg) {
+		efx_link_set_advertising(
+			efx, ecmd->advertising | ADVERTISED_Autoneg);
+		phy_cfg->forced_cap = 0;
+	} else {
+		efx_link_set_advertising(efx, 0);
+		phy_cfg->forced_cap = caps;
+	}
+	return 0;
+}
+
+struct efx_phy_operations efx_mcdi_phy_ops = {
+	.probe		= efx_mcdi_phy_probe,
+	.init 	 	= efx_mcdi_phy_init,
+	.reconfigure	= efx_mcdi_phy_reconfigure,
+	.poll		= efx_mcdi_phy_poll,
+	.fini		= efx_mcdi_phy_fini,
+	.get_settings	= efx_mcdi_phy_get_settings,
+	.set_settings	= efx_mcdi_phy_set_settings,
+	.run_tests	= NULL,
+	.test_name	= NULL,
+};
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 6c33459f9ea9..1574e52f0594 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2008 Solarflare Communications Inc.
+ * Copyright 2006-2009 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -14,8 +14,8 @@
 #include <linux/delay.h>
 #include "net_driver.h"
 #include "mdio_10g.h"
-#include "boards.h"
 #include "workarounds.h"
+#include "nic.h"
 
 unsigned efx_mdio_id_oui(u32 id)
 {
@@ -174,7 +174,7 @@ bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
 	 * of mmd's */
 	if (LOOPBACK_INTERNAL(efx))
 		return true;
-	else if (efx->loopback_mode == LOOPBACK_NETWORK)
+	else if (LOOPBACK_MASK(efx) & LOOPBACKS_WS)
 		return false;
 	else if (efx_phy_mode_disabled(efx->phy_mode))
 		return false;
@@ -211,7 +211,7 @@ void efx_mdio_phy_reconfigure(struct efx_nic *efx)
 			  efx->loopback_mode == LOOPBACK_PCS);
 	efx_mdio_set_flag(efx, MDIO_MMD_PHYXS,
 			  MDIO_CTRL1, MDIO_PHYXS_CTRL1_LOOPBACK,
-			  efx->loopback_mode == LOOPBACK_NETWORK);
+			  efx->loopback_mode == LOOPBACK_PHYXS_WS);
 }
 
 static void efx_mdio_set_mmd_lpower(struct efx_nic *efx,
@@ -249,8 +249,6 @@ void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
 int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
 {
 	struct ethtool_cmd prev;
-	u32 required;
-	int reg;
 
 	efx->phy_op->get_settings(efx, &prev);
 
@@ -266,86 +264,74 @@ int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
 		return -EINVAL;
 
 	/* Check that PHY supports these settings */
-	if (ecmd->autoneg) {
-		required = SUPPORTED_Autoneg;
-	} else if (ecmd->duplex) {
-		switch (ecmd->speed) {
-		case SPEED_10:  required = SUPPORTED_10baseT_Full;  break;
-		case SPEED_100: required = SUPPORTED_100baseT_Full; break;
-		default:        return -EINVAL;
-		}
-	} else {
-		switch (ecmd->speed) {
-		case SPEED_10:  required = SUPPORTED_10baseT_Half;  break;
-		case SPEED_100: required = SUPPORTED_100baseT_Half; break;
-		default:        return -EINVAL;
-		}
-	}
-	required |= ecmd->advertising;
-	if (required & ~prev.supported)
+	if (!ecmd->autoneg ||
+	    (ecmd->advertising | SUPPORTED_Autoneg) & ~prev.supported)
 		return -EINVAL;
 
-	if (ecmd->autoneg) {
-		bool xnp = (ecmd->advertising & ADVERTISED_10000baseT_Full
-			    || EFX_WORKAROUND_13204(efx));
-
-		/* Set up the base page */
-		reg = ADVERTISE_CSMA;
-		if (ecmd->advertising & ADVERTISED_10baseT_Half)
-			reg |= ADVERTISE_10HALF;
-		if (ecmd->advertising & ADVERTISED_10baseT_Full)
-			reg |= ADVERTISE_10FULL;
-		if (ecmd->advertising & ADVERTISED_100baseT_Half)
-			reg |= ADVERTISE_100HALF;
-		if (ecmd->advertising & ADVERTISED_100baseT_Full)
-			reg |= ADVERTISE_100FULL;
-		if (xnp)
-			reg |= ADVERTISE_RESV;
-		else if (ecmd->advertising & (ADVERTISED_1000baseT_Half |
-					      ADVERTISED_1000baseT_Full))
-			reg |= ADVERTISE_NPAGE;
-		reg |= mii_advertise_flowctrl(efx->wanted_fc);
-		efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
-
-		/* Set up the (extended) next page if necessary */
-		if (efx->phy_op->set_npage_adv)
-			efx->phy_op->set_npage_adv(efx, ecmd->advertising);
-
-		/* Enable and restart AN */
-		reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1);
-		reg |= MDIO_AN_CTRL1_ENABLE;
-		if (!(EFX_WORKAROUND_15195(efx) &&
-		      LOOPBACK_MASK(efx) & efx->phy_op->loopbacks))
-			reg |= MDIO_AN_CTRL1_RESTART;
-		if (xnp)
-			reg |= MDIO_AN_CTRL1_XNP;
-		else
-			reg &= ~MDIO_AN_CTRL1_XNP;
-		efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg);
-	} else {
-		/* Disable AN */
-		efx_mdio_set_flag(efx, MDIO_MMD_AN, MDIO_CTRL1,
-				  MDIO_AN_CTRL1_ENABLE, false);
-
-		/* Set the basic control bits */
-		reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1);
-		reg &= ~(MDIO_CTRL1_SPEEDSEL | MDIO_CTRL1_FULLDPLX);
-		if (ecmd->speed == SPEED_100)
-			reg |= MDIO_PMA_CTRL1_SPEED100;
-		if (ecmd->duplex)
-			reg |= MDIO_CTRL1_FULLDPLX;
-		efx_mdio_write(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1, reg);
-	}
-
+	efx_link_set_advertising(efx, ecmd->advertising | ADVERTISED_Autoneg);
+	efx_mdio_an_reconfigure(efx);
 	return 0;
 }
 
+/**
+ * efx_mdio_an_reconfigure - Push advertising flags and restart autonegotiation
+ * @efx:		Efx NIC
+ */
+void efx_mdio_an_reconfigure(struct efx_nic *efx)
+{
+	bool xnp = (efx->link_advertising & ADVERTISED_10000baseT_Full
+		    || EFX_WORKAROUND_13204(efx));
+	int reg;
+
+	WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN));
+
+	/* Set up the base page */
+	reg = ADVERTISE_CSMA;
+	if (efx->link_advertising & ADVERTISED_10baseT_Half)
+		reg |= ADVERTISE_10HALF;
+	if (efx->link_advertising & ADVERTISED_10baseT_Full)
+		reg |= ADVERTISE_10FULL;
+	if (efx->link_advertising & ADVERTISED_100baseT_Half)
+		reg |= ADVERTISE_100HALF;
+	if (efx->link_advertising & ADVERTISED_100baseT_Full)
+		reg |= ADVERTISE_100FULL;
+	if (xnp)
+		reg |= ADVERTISE_RESV;
+	else if (efx->link_advertising & (ADVERTISED_1000baseT_Half |
+					  ADVERTISED_1000baseT_Full))
+		reg |= ADVERTISE_NPAGE;
+	if (efx->link_advertising & ADVERTISED_Pause)
+		reg |= ADVERTISE_PAUSE_CAP;
+	if (efx->link_advertising & ADVERTISED_Asym_Pause)
+		reg |= ADVERTISE_PAUSE_ASYM;
+	efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
+
+	/* Set up the (extended) next page if necessary */
+	if (efx->phy_op->set_npage_adv)
+		efx->phy_op->set_npage_adv(efx, efx->link_advertising);
+
+	/* Enable and restart AN */
+	reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1);
+	reg |= MDIO_AN_CTRL1_ENABLE;
+	if (!(EFX_WORKAROUND_15195(efx) && LOOPBACK_EXTERNAL(efx)))
+		reg |= MDIO_AN_CTRL1_RESTART;
+	if (xnp)
+		reg |= MDIO_AN_CTRL1_XNP;
+	else
+		reg &= ~MDIO_AN_CTRL1_XNP;
+	efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg);
+}
+
 enum efx_fc_type efx_mdio_get_pause(struct efx_nic *efx)
 {
-	int lpa;
+	BUILD_BUG_ON(EFX_FC_AUTO & (EFX_FC_RX | EFX_FC_TX));
 
-	if (!(efx->phy_op->mmds & MDIO_DEVS_AN))
+	if (!(efx->wanted_fc & EFX_FC_AUTO))
 		return efx->wanted_fc;
-	lpa = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA);
-	return efx_fc_resolve(efx->wanted_fc, lpa);
+
+	WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN));
+
+	return mii_resolve_flowctrl_fdx(
+		mii_advertise_flowctrl(efx->wanted_fc),
+		efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA));
 }
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index 6b14421a7444..f6ac9503339d 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2008 Solarflare Communications Inc.
+ * Copyright 2006-2009 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -17,7 +17,6 @@
  */
 
 #include "efx.h"
-#include "boards.h"
 
 static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; }
 static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; }
@@ -87,6 +86,9 @@ extern void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
 /* Set (some of) the PHY settings over MDIO */
 extern int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
 
+/* Push advertising flags and restart autonegotiation */
+extern void efx_mdio_an_reconfigure(struct efx_nic *efx);
+
 /* Get pause parameters from AN if available (otherwise return
  * requested pause parameters)
  */
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index 820c233c3ea0..3a464529a46b 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -1,36 +1,80 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2008 Solarflare Communications Inc.
+ * Copyright 2006-2009 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
  * by the Free Software Foundation, incorporated herein by reference.
  */
 
+#include <linux/bitops.h>
 #include <linux/module.h>
 #include <linux/mtd/mtd.h>
 #include <linux/delay.h>
+#include <linux/rtnetlink.h>
 
 #define EFX_DRIVER_NAME "sfc_mtd"
 #include "net_driver.h"
 #include "spi.h"
 #include "efx.h"
+#include "nic.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
 
 #define EFX_SPI_VERIFY_BUF_LEN 16
+#define EFX_MCDI_CHUNK_LEN 128
 
-struct efx_mtd {
-	const struct efx_spi_device *spi;
+struct efx_mtd_partition {
 	struct mtd_info mtd;
+	union {
+		struct {
+			bool updating;
+			u8 nvram_type;
+			u16 fw_subtype;
+		} mcdi;
+		size_t offset;
+	};
+	const char *type_name;
 	char name[IFNAMSIZ + 20];
 };
 
+struct efx_mtd_ops {
+	int (*read)(struct mtd_info *mtd, loff_t start, size_t len,
+		    size_t *retlen, u8 *buffer);
+	int (*erase)(struct mtd_info *mtd, loff_t start, size_t len);
+	int (*write)(struct mtd_info *mtd, loff_t start, size_t len,
+		     size_t *retlen, const u8 *buffer);
+	int (*sync)(struct mtd_info *mtd);
+};
+
+struct efx_mtd {
+	struct list_head node;
+	struct efx_nic *efx;
+	const struct efx_spi_device *spi;
+	const char *name;
+	const struct efx_mtd_ops *ops;
+	size_t n_parts;
+	struct efx_mtd_partition part[0];
+};
+
+#define efx_for_each_partition(part, efx_mtd)			\
+	for ((part) = &(efx_mtd)->part[0];			\
+	     (part) != &(efx_mtd)->part[(efx_mtd)->n_parts];	\
+	     (part)++)
+
+#define to_efx_mtd_partition(mtd)				\
+	container_of(mtd, struct efx_mtd_partition, mtd)
+
+static int falcon_mtd_probe(struct efx_nic *efx);
+static int siena_mtd_probe(struct efx_nic *efx);
+
 /* SPI utilities */
 
 static int efx_spi_slow_wait(struct efx_mtd *efx_mtd, bool uninterruptible)
 {
 	const struct efx_spi_device *spi = efx_mtd->spi;
-	struct efx_nic *efx = spi->efx;
+	struct efx_nic *efx = efx_mtd->efx;
 	u8 status;
 	int rc, i;
 
@@ -39,7 +83,7 @@ static int efx_spi_slow_wait(struct efx_mtd *efx_mtd, bool uninterruptible)
 		__set_current_state(uninterruptible ?
 				    TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
 		schedule_timeout(HZ / 10);
-		rc = falcon_spi_cmd(spi, SPI_RDSR, -1, NULL,
+		rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
 				    &status, sizeof(status));
 		if (rc)
 			return rc;
@@ -52,32 +96,35 @@ static int efx_spi_slow_wait(struct efx_mtd *efx_mtd, bool uninterruptible)
 	return -ETIMEDOUT;
 }
 
-static int efx_spi_unlock(const struct efx_spi_device *spi)
+static int
+efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi)
 {
 	const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
 				SPI_STATUS_BP0);
 	u8 status;
 	int rc;
 
-	rc = falcon_spi_cmd(spi, SPI_RDSR, -1, NULL, &status, sizeof(status));
+	rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
+			    &status, sizeof(status));
 	if (rc)
 		return rc;
 
 	if (!(status & unlock_mask))
 		return 0; /* already unlocked */
 
-	rc = falcon_spi_cmd(spi, SPI_WREN, -1, NULL, NULL, 0);
+	rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
 	if (rc)
 		return rc;
-	rc = falcon_spi_cmd(spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
+	rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
 	if (rc)
 		return rc;
 
 	status &= ~unlock_mask;
-	rc = falcon_spi_cmd(spi, SPI_WRSR, -1, &status, NULL, sizeof(status));
+	rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
+			    NULL, sizeof(status));
 	if (rc)
 		return rc;
-	rc = falcon_spi_wait_write(spi);
+	rc = falcon_spi_wait_write(efx, spi);
 	if (rc)
 		return rc;
 
@@ -87,6 +134,7 @@ static int efx_spi_unlock(const struct efx_spi_device *spi)
 static int efx_spi_erase(struct efx_mtd *efx_mtd, loff_t start, size_t len)
 {
 	const struct efx_spi_device *spi = efx_mtd->spi;
+	struct efx_nic *efx = efx_mtd->efx;
 	unsigned pos, block_len;
 	u8 empty[EFX_SPI_VERIFY_BUF_LEN];
 	u8 buffer[EFX_SPI_VERIFY_BUF_LEN];
@@ -98,13 +146,14 @@ static int efx_spi_erase(struct efx_mtd *efx_mtd, loff_t start, size_t len)
 	if (spi->erase_command == 0)
 		return -EOPNOTSUPP;
 
-	rc = efx_spi_unlock(spi);
+	rc = efx_spi_unlock(efx, spi);
 	if (rc)
 		return rc;
-	rc = falcon_spi_cmd(spi, SPI_WREN, -1, NULL, NULL, 0);
+	rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
 	if (rc)
 		return rc;
-	rc = falcon_spi_cmd(spi, spi->erase_command, start, NULL, NULL, 0);
+	rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
+			    NULL, 0);
 	if (rc)
 		return rc;
 	rc = efx_spi_slow_wait(efx_mtd, false);
@@ -113,7 +162,8 @@ static int efx_spi_erase(struct efx_mtd *efx_mtd, loff_t start, size_t len)
 	memset(empty, 0xff, sizeof(empty));
 	for (pos = 0; pos < len; pos += block_len) {
 		block_len = min(len - pos, sizeof(buffer));
-		rc = falcon_spi_read(spi, start + pos, block_len, NULL, buffer);
+		rc = falcon_spi_read(efx, spi, start + pos, block_len,
+				     NULL, buffer);
 		if (rc)
 			return rc;
 		if (memcmp(empty, buffer, block_len))
@@ -130,140 +180,473 @@ static int efx_spi_erase(struct efx_mtd *efx_mtd, loff_t start, size_t len)
 
 /* MTD interface */
 
-static int efx_mtd_read(struct mtd_info *mtd, loff_t start, size_t len,
-			size_t *retlen, u8 *buffer)
+static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
 {
 	struct efx_mtd *efx_mtd = mtd->priv;
+	int rc;
+
+	rc = efx_mtd->ops->erase(mtd, erase->addr, erase->len);
+	if (rc == 0) {
+		erase->state = MTD_ERASE_DONE;
+	} else {
+		erase->state = MTD_ERASE_FAILED;
+		erase->fail_addr = 0xffffffff;
+	}
+	mtd_erase_callback(erase);
+	return rc;
+}
+
+static void efx_mtd_sync(struct mtd_info *mtd)
+{
+	struct efx_mtd *efx_mtd = mtd->priv;
+	struct efx_nic *efx = efx_mtd->efx;
+	int rc;
+
+	rc = efx_mtd->ops->sync(mtd);
+	if (rc)
+		EFX_ERR(efx, "%s sync failed (%d)\n", efx_mtd->name, rc);
+}
+
+static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
+{
+	int rc;
+
+	for (;;) {
+		rc = del_mtd_device(&part->mtd);
+		if (rc != -EBUSY)
+			break;
+		ssleep(1);
+	}
+	WARN_ON(rc);
+}
+
+static void efx_mtd_remove_device(struct efx_mtd *efx_mtd)
+{
+	struct efx_mtd_partition *part;
+
+	efx_for_each_partition(part, efx_mtd)
+		efx_mtd_remove_partition(part);
+	list_del(&efx_mtd->node);
+	kfree(efx_mtd);
+}
+
+static void efx_mtd_rename_device(struct efx_mtd *efx_mtd)
+{
+	struct efx_mtd_partition *part;
+
+	efx_for_each_partition(part, efx_mtd)
+		if (efx_nic_rev(efx_mtd->efx) >= EFX_REV_SIENA_A0)
+			snprintf(part->name, sizeof(part->name),
+				 "%s %s:%02x", efx_mtd->efx->name,
+				 part->type_name, part->mcdi.fw_subtype);
+		else
+			snprintf(part->name, sizeof(part->name),
+				 "%s %s", efx_mtd->efx->name,
+				 part->type_name);
+}
+
+static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd)
+{
+	struct efx_mtd_partition *part;
+
+	efx_mtd->efx = efx;
+
+	efx_mtd_rename_device(efx_mtd);
+
+	efx_for_each_partition(part, efx_mtd) {
+		part->mtd.writesize = 1;
+
+		part->mtd.owner = THIS_MODULE;
+		part->mtd.priv = efx_mtd;
+		part->mtd.name = part->name;
+		part->mtd.erase = efx_mtd_erase;
+		part->mtd.read = efx_mtd->ops->read;
+		part->mtd.write = efx_mtd->ops->write;
+		part->mtd.sync = efx_mtd_sync;
+
+		if (add_mtd_device(&part->mtd))
+			goto fail;
+	}
+
+	list_add(&efx_mtd->node, &efx->mtd_list);
+	return 0;
+
+fail:
+	while (part != &efx_mtd->part[0]) {
+		--part;
+		efx_mtd_remove_partition(part);
+	}
+	/* add_mtd_device() returns 1 if the MTD table is full */
+	return -ENOMEM;
+}
+
+void efx_mtd_remove(struct efx_nic *efx)
+{
+	struct efx_mtd *efx_mtd, *next;
+
+	WARN_ON(efx_dev_registered(efx));
+
+	list_for_each_entry_safe(efx_mtd, next, &efx->mtd_list, node)
+		efx_mtd_remove_device(efx_mtd);
+}
+
+void efx_mtd_rename(struct efx_nic *efx)
+{
+	struct efx_mtd *efx_mtd;
+
+	ASSERT_RTNL();
+
+	list_for_each_entry(efx_mtd, &efx->mtd_list, node)
+		efx_mtd_rename_device(efx_mtd);
+}
+
+int efx_mtd_probe(struct efx_nic *efx)
+{
+	if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
+		return siena_mtd_probe(efx);
+	else
+		return falcon_mtd_probe(efx);
+}
+
+/* Implementation of MTD operations for Falcon */
+
+static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
+			   size_t len, size_t *retlen, u8 *buffer)
+{
+	struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
+	struct efx_mtd *efx_mtd = mtd->priv;
 	const struct efx_spi_device *spi = efx_mtd->spi;
-	struct efx_nic *efx = spi->efx;
+	struct efx_nic *efx = efx_mtd->efx;
 	int rc;
 
 	rc = mutex_lock_interruptible(&efx->spi_lock);
 	if (rc)
 		return rc;
-	rc = falcon_spi_read(spi, FALCON_FLASH_BOOTCODE_START + start,
-			     len, retlen, buffer);
+	rc = falcon_spi_read(efx, spi, part->offset + start, len,
+			     retlen, buffer);
 	mutex_unlock(&efx->spi_lock);
 	return rc;
 }
 
-static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
+static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
 {
+	struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
 	struct efx_mtd *efx_mtd = mtd->priv;
-	struct efx_nic *efx = efx_mtd->spi->efx;
+	struct efx_nic *efx = efx_mtd->efx;
 	int rc;
 
 	rc = mutex_lock_interruptible(&efx->spi_lock);
 	if (rc)
 		return rc;
-	rc = efx_spi_erase(efx_mtd, FALCON_FLASH_BOOTCODE_START + erase->addr,
-			   erase->len);
+	rc = efx_spi_erase(efx_mtd, part->offset + start, len);
 	mutex_unlock(&efx->spi_lock);
-
-	if (rc == 0) {
-		erase->state = MTD_ERASE_DONE;
-	} else {
-		erase->state = MTD_ERASE_FAILED;
-		erase->fail_addr = 0xffffffff;
-	}
-	mtd_erase_callback(erase);
 	return rc;
 }
 
-static int efx_mtd_write(struct mtd_info *mtd, loff_t start,
-			 size_t len, size_t *retlen, const u8 *buffer)
+static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
+			    size_t len, size_t *retlen, const u8 *buffer)
 {
+	struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
 	struct efx_mtd *efx_mtd = mtd->priv;
 	const struct efx_spi_device *spi = efx_mtd->spi;
-	struct efx_nic *efx = spi->efx;
+	struct efx_nic *efx = efx_mtd->efx;
 	int rc;
 
 	rc = mutex_lock_interruptible(&efx->spi_lock);
 	if (rc)
 		return rc;
-	rc = falcon_spi_write(spi, FALCON_FLASH_BOOTCODE_START + start,
-			      len, retlen, buffer);
+	rc = falcon_spi_write(efx, spi, part->offset + start, len,
+			      retlen, buffer);
 	mutex_unlock(&efx->spi_lock);
 	return rc;
 }
 
-static void efx_mtd_sync(struct mtd_info *mtd)
+static int falcon_mtd_sync(struct mtd_info *mtd)
 {
 	struct efx_mtd *efx_mtd = mtd->priv;
-	struct efx_nic *efx = efx_mtd->spi->efx;
+	struct efx_nic *efx = efx_mtd->efx;
 	int rc;
 
 	mutex_lock(&efx->spi_lock);
 	rc = efx_spi_slow_wait(efx_mtd, true);
 	mutex_unlock(&efx->spi_lock);
+	return rc;
+}
+
+static struct efx_mtd_ops falcon_mtd_ops = {
+	.read	= falcon_mtd_read,
+	.erase	= falcon_mtd_erase,
+	.write	= falcon_mtd_write,
+	.sync	= falcon_mtd_sync,
+};
+
+static int falcon_mtd_probe(struct efx_nic *efx)
+{
+	struct efx_spi_device *spi = efx->spi_flash;
+	struct efx_mtd *efx_mtd;
+	int rc;
+
+	ASSERT_RTNL();
 
+	if (!spi || spi->size <= FALCON_FLASH_BOOTCODE_START)
+		return -ENODEV;
+
+	efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
+			  GFP_KERNEL);
+	if (!efx_mtd)
+		return -ENOMEM;
+
+	efx_mtd->spi = spi;
+	efx_mtd->name = "flash";
+	efx_mtd->ops = &falcon_mtd_ops;
+
+	efx_mtd->n_parts = 1;
+	efx_mtd->part[0].mtd.type = MTD_NORFLASH;
+	efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
+	efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
+	efx_mtd->part[0].mtd.erasesize = spi->erase_size;
+	efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
+	efx_mtd->part[0].type_name = "sfc_flash_bootrom";
+
+	rc = efx_mtd_probe_device(efx, efx_mtd);
 	if (rc)
-		EFX_ERR(efx, "%s sync failed (%d)\n", efx_mtd->name, rc);
-	return;
+		kfree(efx_mtd);
+	return rc;
 }
 
-void efx_mtd_remove(struct efx_nic *efx)
+/* Implementation of MTD operations for Siena */
+
+static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
+			  size_t len, size_t *retlen, u8 *buffer)
 {
-	if (efx->spi_flash && efx->spi_flash->mtd) {
-		struct efx_mtd *efx_mtd = efx->spi_flash->mtd;
-		int rc;
-
-		for (;;) {
-			rc = del_mtd_device(&efx_mtd->mtd);
-			if (rc != -EBUSY)
-				break;
-			ssleep(1);
-		}
-		WARN_ON(rc);
-		kfree(efx_mtd);
+	struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
+	struct efx_mtd *efx_mtd = mtd->priv;
+	struct efx_nic *efx = efx_mtd->efx;
+	loff_t offset = start;
+	loff_t end = min_t(loff_t, start + len, mtd->size);
+	size_t chunk;
+	int rc = 0;
+
+	while (offset < end) {
+		chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN);
+		rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset,
+					 buffer, chunk);
+		if (rc)
+			goto out;
+		offset += chunk;
+		buffer += chunk;
 	}
+out:
+	*retlen = offset - start;
+	return rc;
 }
 
-void efx_mtd_rename(struct efx_nic *efx)
+static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
 {
-	if (efx->spi_flash && efx->spi_flash->mtd) {
-		struct efx_mtd *efx_mtd = efx->spi_flash->mtd;
-		snprintf(efx_mtd->name, sizeof(efx_mtd->name),
-			 "%s sfc_flash_bootrom", efx->name);
+	struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
+	struct efx_mtd *efx_mtd = mtd->priv;
+	struct efx_nic *efx = efx_mtd->efx;
+	loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
+	loff_t end = min_t(loff_t, start + len, mtd->size);
+	size_t chunk = part->mtd.erasesize;
+	int rc = 0;
+
+	if (!part->mcdi.updating) {
+		rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
+		if (rc)
+			goto out;
+		part->mcdi.updating = 1;
+	}
+
+	/* The MCDI interface can in fact do multiple erase blocks at once;
+	 * but erasing may be slow, so we make multiple calls here to avoid
+	 * tripping the MCDI RPC timeout. */
+	while (offset < end) {
+		rc = efx_mcdi_nvram_erase(efx, part->mcdi.nvram_type, offset,
+					  chunk);
+		if (rc)
+			goto out;
+		offset += chunk;
 	}
+out:
+	return rc;
 }
 
-int efx_mtd_probe(struct efx_nic *efx)
+static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
+			   size_t len, size_t *retlen, const u8 *buffer)
 {
-	struct efx_spi_device *spi = efx->spi_flash;
-	struct efx_mtd *efx_mtd;
+	struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
+	struct efx_mtd *efx_mtd = mtd->priv;
+	struct efx_nic *efx = efx_mtd->efx;
+	loff_t offset = start;
+	loff_t end = min_t(loff_t, start + len, mtd->size);
+	size_t chunk;
+	int rc = 0;
+
+	if (!part->mcdi.updating) {
+		rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
+		if (rc)
+			goto out;
+		part->mcdi.updating = 1;
+	}
 
-	if (!spi || spi->size <= FALCON_FLASH_BOOTCODE_START)
+	while (offset < end) {
+		chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN);
+		rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset,
+					  buffer, chunk);
+		if (rc)
+			goto out;
+		offset += chunk;
+		buffer += chunk;
+	}
+out:
+	*retlen = offset - start;
+	return rc;
+}
+
+static int siena_mtd_sync(struct mtd_info *mtd)
+{
+	struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
+	struct efx_mtd *efx_mtd = mtd->priv;
+	struct efx_nic *efx = efx_mtd->efx;
+	int rc = 0;
+
+	if (part->mcdi.updating) {
+		part->mcdi.updating = 0;
+		rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type);
+	}
+
+	return rc;
+}
+
+static struct efx_mtd_ops siena_mtd_ops = {
+	.read	= siena_mtd_read,
+	.erase	= siena_mtd_erase,
+	.write	= siena_mtd_write,
+	.sync	= siena_mtd_sync,
+};
+
+struct siena_nvram_type_info {
+	int port;
+	const char *name;
+};
+
+static struct siena_nvram_type_info siena_nvram_types[] = {
+	[MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO]	= { 0, "sfc_dummy_phy" },
+	[MC_CMD_NVRAM_TYPE_MC_FW]		= { 0, "sfc_mcfw" },
+	[MC_CMD_NVRAM_TYPE_MC_FW_BACKUP]	= { 0, "sfc_mcfw_backup" },
+	[MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0]	= { 0, "sfc_static_cfg" },
+	[MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1]	= { 1, "sfc_static_cfg" },
+	[MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0]	= { 0, "sfc_dynamic_cfg" },
+	[MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1]	= { 1, "sfc_dynamic_cfg" },
+	[MC_CMD_NVRAM_TYPE_EXP_ROM]		= { 0, "sfc_exp_rom" },
+	[MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0]	= { 0, "sfc_exp_rom_cfg" },
+	[MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1]	= { 1, "sfc_exp_rom_cfg" },
+	[MC_CMD_NVRAM_TYPE_PHY_PORT0]		= { 0, "sfc_phy_fw" },
+	[MC_CMD_NVRAM_TYPE_PHY_PORT1]		= { 1, "sfc_phy_fw" },
+};
+
+static int siena_mtd_probe_partition(struct efx_nic *efx,
+				     struct efx_mtd *efx_mtd,
+				     unsigned int part_id,
+				     unsigned int type)
+{
+	struct efx_mtd_partition *part = &efx_mtd->part[part_id];
+	struct siena_nvram_type_info *info;
+	size_t size, erase_size;
+	bool protected;
+	int rc;
+
+	if (type >= ARRAY_SIZE(siena_nvram_types))
 		return -ENODEV;
 
-	efx_mtd = kzalloc(sizeof(*efx_mtd), GFP_KERNEL);
+	info = &siena_nvram_types[type];
+
+	if (info->port != efx_port_num(efx))
+		return -ENODEV;
+
+	rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
+	if (rc)
+		return rc;
+	if (protected)
+		return -ENODEV; /* hide it */
+
+	part->mcdi.nvram_type = type;
+	part->type_name = info->name;
+
+	part->mtd.type = MTD_NORFLASH;
+	part->mtd.flags = MTD_CAP_NORFLASH;
+	part->mtd.size = size;
+	part->mtd.erasesize = erase_size;
+
+	return 0;
+}
+
+static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
+				     struct efx_mtd *efx_mtd)
+{
+	struct efx_mtd_partition *part;
+	uint16_t fw_subtype_list[MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN /
+				 sizeof(uint16_t)];
+	int rc;
+
+	rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list);
+	if (rc)
+		return rc;
+
+	efx_for_each_partition(part, efx_mtd)
+		part->mcdi.fw_subtype = fw_subtype_list[part->mcdi.nvram_type];
+
+	return 0;
+}
+
+static int siena_mtd_probe(struct efx_nic *efx)
+{
+	struct efx_mtd *efx_mtd;
+	int rc = -ENODEV;
+	u32 nvram_types;
+	unsigned int type;
+
+	ASSERT_RTNL();
+
+	rc = efx_mcdi_nvram_types(efx, &nvram_types);
+	if (rc)
+		return rc;
+
+	efx_mtd = kzalloc(sizeof(*efx_mtd) +
+			  hweight32(nvram_types) * sizeof(efx_mtd->part[0]),
+			  GFP_KERNEL);
 	if (!efx_mtd)
 		return -ENOMEM;
 
-	efx_mtd->spi = spi;
-	spi->mtd = efx_mtd;
-
-	efx_mtd->mtd.type = MTD_NORFLASH;
-	efx_mtd->mtd.flags = MTD_CAP_NORFLASH;
-	efx_mtd->mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
-	efx_mtd->mtd.erasesize = spi->erase_size;
-	efx_mtd->mtd.writesize = 1;
-	efx_mtd_rename(efx);
-
-	efx_mtd->mtd.owner = THIS_MODULE;
-	efx_mtd->mtd.priv = efx_mtd;
-	efx_mtd->mtd.name = efx_mtd->name;
-	efx_mtd->mtd.erase = efx_mtd_erase;
-	efx_mtd->mtd.read = efx_mtd_read;
-	efx_mtd->mtd.write = efx_mtd_write;
-	efx_mtd->mtd.sync = efx_mtd_sync;
-
-	if (add_mtd_device(&efx_mtd->mtd)) {
-		kfree(efx_mtd);
-		spi->mtd = NULL;
-		/* add_mtd_device() returns 1 if the MTD table is full */
-		return -ENOMEM;
+	efx_mtd->name = "Siena NVRAM manager";
+
+	efx_mtd->ops = &siena_mtd_ops;
+
+	type = 0;
+	efx_mtd->n_parts = 0;
+
+	while (nvram_types != 0) {
+		if (nvram_types & 1) {
+			rc = siena_mtd_probe_partition(efx, efx_mtd,
+						       efx_mtd->n_parts, type);
+			if (rc == 0)
+				efx_mtd->n_parts++;
+			else if (rc != -ENODEV)
+				goto fail;
+		}
+		type++;
+		nvram_types >>= 1;
 	}
 
-	return 0;
+	rc = siena_mtd_get_fw_subtypes(efx, efx_mtd);
+	if (rc)
+		goto fail;
+
+	rc = efx_mtd_probe_device(efx, efx_mtd);
+fail:
+	if (rc)
+		kfree(efx_mtd);
+	return rc;
 }
+
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 298566da638b..34c381f009b7 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2008 Solarflare Communications Inc.
+ * Copyright 2005-2009 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -38,7 +38,7 @@
 #ifndef EFX_DRIVER_NAME
 #define EFX_DRIVER_NAME	"sfc"
 #endif
-#define EFX_DRIVER_VERSION	"2.3"
+#define EFX_DRIVER_VERSION	"3.0"
 
 #ifdef EFX_ENABLE_DEBUG
 #define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@@ -113,6 +113,13 @@ struct efx_special_buffer {
 	int entries;
 };
 
+enum efx_flush_state {
+	FLUSH_NONE,
+	FLUSH_PENDING,
+	FLUSH_FAILED,
+	FLUSH_DONE,
+};
+
 /**
  * struct efx_tx_buffer - An Efx TX buffer
  * @skb: The associated socket buffer.
@@ -189,7 +196,7 @@ struct efx_tx_queue {
 	struct efx_nic *nic;
 	struct efx_tx_buffer *buffer;
 	struct efx_special_buffer txd;
-	bool flushed;
+	enum efx_flush_state flushed;
 
 	/* Members used mainly on the completion path */
 	unsigned int read_count ____cacheline_aligned_in_smp;
@@ -284,7 +291,7 @@ struct efx_rx_queue {
 	struct page *buf_page;
 	dma_addr_t buf_dma_addr;
 	char *buf_data;
-	bool flushed;
+	enum efx_flush_state flushed;
 };
 
 /**
@@ -327,7 +334,7 @@ enum efx_rx_alloc_method {
  * @used_flags: Channel is used by net driver
  * @enabled: Channel enabled indicator
  * @irq: IRQ number (MSI and MSI-X only)
- * @irq_moderation: IRQ moderation value (in us)
+ * @irq_moderation: IRQ moderation value (in hardware ticks)
  * @napi_dev: Net device used with NAPI
  * @napi_str: NAPI control structure
  * @reset_work: Scheduled reset work thread
@@ -343,9 +350,9 @@ enum efx_rx_alloc_method {
  * @rx_alloc_push_pages: RX allocation method currently in use for pushing
  *	descriptors
  * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
- * @n_rx_ip_frag_err: Count of RX IP fragment errors
  * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
  * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
+ * @n_rx_mcast_mismatch: Count of unmatched multicast frames
  * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
  * @n_rx_overlength: Count of RX_OVERLENGTH errors
  * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
@@ -373,9 +380,9 @@ struct efx_channel {
 	int rx_alloc_push_pages;
 
 	unsigned n_rx_tobe_disc;
-	unsigned n_rx_ip_frag_err;
 	unsigned n_rx_ip_hdr_chksum_err;
 	unsigned n_rx_tcp_udp_chksum_err;
+	unsigned n_rx_mcast_mismatch;
 	unsigned n_rx_frm_trunc;
 	unsigned n_rx_overlength;
 	unsigned n_skbuff_leaks;
@@ -388,53 +395,29 @@ struct efx_channel {
 
 };
 
-/**
- * struct efx_blinker - S/W LED blinking context
- * @state: Current state - on or off
- * @resubmit: Timer resubmission flag
- * @timer: Control timer for blinking
- */
-struct efx_blinker {
-	bool state;
-	bool resubmit;
-	struct timer_list timer;
+enum efx_led_mode {
+	EFX_LED_OFF	= 0,
+	EFX_LED_ON	= 1,
+	EFX_LED_DEFAULT	= 2
 };
 
+#define STRING_TABLE_LOOKUP(val, member) \
+	((val) < member ## _max) ? member ## _names[val] : "(invalid)"
 
-/**
- * struct efx_board - board information
- * @type: Board model type
- * @major: Major rev. ('A', 'B' ...)
- * @minor: Minor rev. (0, 1, ...)
- * @init: Initialisation function
- * @init_leds: Sets up board LEDs. May be called repeatedly.
- * @set_id_led: Turns the identification LED on or off
- * @blink: Starts/stops blinking
- * @monitor: Board-specific health check function
- * @fini: Cleanup function
- * @blinker: used to blink LEDs in software
- * @hwmon_client: I2C client for hardware monitor
- * @ioexp_client: I2C client for power/port control
- */
-struct efx_board {
-	int type;
-	int major;
-	int minor;
-	int (*init) (struct efx_nic *nic);
-	/* As the LEDs are typically attached to the PHY, LEDs
-	 * have a separate init callback that happens later than
-	 * board init. */
-	void (*init_leds)(struct efx_nic *efx);
-	void (*set_id_led) (struct efx_nic *efx, bool state);
-	int (*monitor) (struct efx_nic *nic);
-	void (*blink) (struct efx_nic *efx, bool start);
-	void (*fini) (struct efx_nic *nic);
-	struct efx_blinker blinker;
-	struct i2c_client *hwmon_client, *ioexp_client;
-};
+extern const char *efx_loopback_mode_names[];
+extern const unsigned int efx_loopback_mode_max;
+#define LOOPBACK_MODE(efx) \
+	STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode)
+
+extern const char *efx_interrupt_mode_names[];
+extern const unsigned int efx_interrupt_mode_max;
+#define INT_MODE(efx) \
+	STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode)
 
-#define STRING_TABLE_LOOKUP(val, member)	\
-	member ## _names[val]
+extern const char *efx_reset_type_names[];
+extern const unsigned int efx_reset_type_max;
+#define RESET_TYPE(type) \
+	STRING_TABLE_LOOKUP(type, efx_reset_type)
 
 enum efx_int_mode {
 	/* Be careful if altering to correct macro below */
@@ -445,20 +428,7 @@ enum efx_int_mode {
 };
 #define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
 
-enum phy_type {
-	PHY_TYPE_NONE = 0,
-	PHY_TYPE_TXC43128 = 1,
-	PHY_TYPE_88E1111 = 2,
-	PHY_TYPE_SFX7101 = 3,
-	PHY_TYPE_QT2022C2 = 4,
-	PHY_TYPE_PM8358 = 6,
-	PHY_TYPE_SFT9001A = 8,
-	PHY_TYPE_QT2025C = 9,
-	PHY_TYPE_SFT9001B = 10,
-	PHY_TYPE_MAX	/* Insert any new items before this */
-};
-
-#define EFX_IS10G(efx) ((efx)->link_speed == 10000)
+#define EFX_IS10G(efx) ((efx)->link_state.speed == 10000)
 
 enum nic_state {
 	STATE_INIT = 0,
@@ -500,73 +470,69 @@ enum efx_fc_type {
 	EFX_FC_AUTO = 4,
 };
 
-/* Supported MAC bit-mask */
-enum efx_mac_type {
-	EFX_GMAC = 1,
-	EFX_XMAC = 2,
+/**
+ * struct efx_link_state - Current state of the link
+ * @up: Link is up
+ * @fd: Link is full-duplex
+ * @fc: Actual flow control flags
+ * @speed: Link speed (Mbps)
+ */
+struct efx_link_state {
+	bool up;
+	bool fd;
+	enum efx_fc_type fc;
+	unsigned int speed;
 };
 
-static inline enum efx_fc_type efx_fc_resolve(enum efx_fc_type wanted_fc,
-					      unsigned int lpa)
+static inline bool efx_link_state_equal(const struct efx_link_state *left,
+					const struct efx_link_state *right)
 {
-	BUILD_BUG_ON(EFX_FC_AUTO & (EFX_FC_RX | EFX_FC_TX));
-
-	if (!(wanted_fc & EFX_FC_AUTO))
-		return wanted_fc;
-
-	return mii_resolve_flowctrl_fdx(mii_advertise_flowctrl(wanted_fc), lpa);
+	return left->up == right->up && left->fd == right->fd &&
+		left->fc == right->fc && left->speed == right->speed;
 }
 
 /**
  * struct efx_mac_operations - Efx MAC operations table
  * @reconfigure: Reconfigure MAC. Serialised by the mac_lock
  * @update_stats: Update statistics
- * @irq: Hardware MAC event callback. Serialised by the mac_lock
- * @poll: Poll for hardware state. Serialised by the mac_lock
+ * @check_fault: Check fault state. True if fault present.
  */
 struct efx_mac_operations {
-	void (*reconfigure) (struct efx_nic *efx);
+	int (*reconfigure) (struct efx_nic *efx);
 	void (*update_stats) (struct efx_nic *efx);
-	void (*irq) (struct efx_nic *efx);
-	void (*poll) (struct efx_nic *efx);
+	bool (*check_fault)(struct efx_nic *efx);
 };
 
 /**
  * struct efx_phy_operations - Efx PHY operations table
+ * @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds,
+ *	efx->loopback_modes.
  * @init: Initialise PHY
  * @fini: Shut down PHY
  * @reconfigure: Reconfigure PHY (e.g. for new link parameters)
- * @clear_interrupt: Clear down interrupt
- * @blink: Blink LEDs
- * @poll: Poll for hardware state. Serialised by the mac_lock.
+ * @poll: Update @link_state and report whether it changed.
+ *	Serialised by the mac_lock.
  * @get_settings: Get ethtool settings. Serialised by the mac_lock.
  * @set_settings: Set ethtool settings. Serialised by the mac_lock.
  * @set_npage_adv: Set abilities advertised in (Extended) Next Page
  *	(only needed where AN bit is set in mmds)
- * @num_tests: Number of PHY-specific tests/results
- * @test_names: Names of the tests/results
+ * @test_name: Get the name of a PHY-specific test/result
  * @run_tests: Run tests and record results as appropriate.
  *	Flags are the ethtool tests flags.
- * @mmds: MMD presence mask
- * @loopbacks: Supported loopback modes mask
  */
 struct efx_phy_operations {
-	enum efx_mac_type macs;
+	int (*probe) (struct efx_nic *efx);
 	int (*init) (struct efx_nic *efx);
 	void (*fini) (struct efx_nic *efx);
-	void (*reconfigure) (struct efx_nic *efx);
-	void (*clear_interrupt) (struct efx_nic *efx);
-	void (*poll) (struct efx_nic *efx);
+	int (*reconfigure) (struct efx_nic *efx);
+	bool (*poll) (struct efx_nic *efx);
 	void (*get_settings) (struct efx_nic *efx,
 			      struct ethtool_cmd *ecmd);
 	int (*set_settings) (struct efx_nic *efx,
 			     struct ethtool_cmd *ecmd);
 	void (*set_npage_adv) (struct efx_nic *efx, u32);
-	u32 num_tests;
-	const char *const *test_names;
+	const char *(*test_name) (struct efx_nic *efx, unsigned int index);
 	int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
-	int mmds;
-	unsigned loopbacks;
 };
 
 /**
@@ -690,36 +656,38 @@ union efx_multicast_hash {
  * @interrupt_mode: Interrupt mode
  * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
  * @irq_rx_moderation: IRQ moderation time for RX event queues
- * @i2c_adap: I2C adapter
- * @board_info: Board-level information
  * @state: Device state flag. Serialised by the rtnl_lock.
  * @reset_pending: Pending reset method (normally RESET_TYPE_NONE)
  * @tx_queue: TX DMA queues
  * @rx_queue: RX DMA queues
  * @channel: Channels
+ * @next_buffer_table: First available buffer table id
  * @n_rx_queues: Number of RX queues
  * @n_channels: Number of channels in use
  * @rx_buffer_len: RX buffer length
  * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
+ * @int_error_count: Number of internal errors seen recently
+ * @int_error_expire: Time at which error count will be expired
  * @irq_status: Interrupt status buffer
  * @last_irq_cpu: Last CPU to handle interrupt.
  *	This register is written with the SMP processor ID whenever an
  *	interrupt is handled.  It is used by falcon_test_interrupt()
  *	to verify that an interrupt has occurred.
  * @spi_flash: SPI flash device
- *	This field will be %NULL if no flash device is present.
+ *	This field will be %NULL if no flash device is present (or for Siena).
  * @spi_eeprom: SPI EEPROM device
- *	This field will be %NULL if no EEPROM device is present.
+ *	This field will be %NULL if no EEPROM device is present (or for Siena).
  * @spi_lock: SPI bus lock
+ * @mtd_list: List of MTDs attached to the NIC
  * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
  * @nic_data: Hardware dependant state
  * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
  *	@port_inhibited, efx_monitor() and efx_reconfigure_port()
  * @port_enabled: Port enabled indicator.
- *	Serialises efx_stop_all(), efx_start_all(), efx_monitor(),
- *	efx_phy_work(), and efx_mac_work() with kernel interfaces. Safe to read
- *	under any one of the rtnl_lock, mac_lock, or netif_tx_lock, but all
- *	three must be held to modify it.
+ *	Serialises efx_stop_all(), efx_start_all(), efx_monitor() and
+ *	efx_mac_work() with kernel interfaces. Safe to read under any
+ *	one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must
+ *	be held to modify it.
  * @port_inhibited: If set, the netif_carrier is always off. Hold the mac_lock
  * @port_initialized: Port initialized?
  * @net_dev: Operating system network device. Consider holding the rtnl lock
@@ -731,26 +699,23 @@ union efx_multicast_hash {
  *	&struct net_device_stats.
  * @stats_buffer: DMA buffer for statistics
  * @stats_lock: Statistics update lock. Serialises statistics fetches
- * @stats_disable_count: Nest count for disabling statistics fetches
  * @mac_op: MAC interface
  * @mac_address: Permanent MAC address
  * @phy_type: PHY type
- * @phy_lock: PHY access lock
+ * @mdio_lock: MDIO lock
  * @phy_op: PHY interface
  * @phy_data: PHY private data (including PHY-specific stats)
  * @mdio: PHY MDIO interface
+ * @mdio_bus: PHY MDIO bus ID (only used by Siena)
  * @phy_mode: PHY operating mode. Serialised by @mac_lock.
- * @mac_up: MAC link state
- * @link_up: Link status
- * @link_fd: Link is full duplex
- * @link_fc: Actualy flow control flags
- * @link_speed: Link speed (Mbps)
+ * @xmac_poll_required: XMAC link state needs polling
+ * @link_advertising: Autonegotiation advertising flags
+ * @link_state: Current state of the link
  * @n_link_state_changes: Number of times the link has changed state
  * @promiscuous: Promiscuous flag. Protected by netif_tx_lock.
  * @multicast_hash: Multicast hash table
  * @wanted_fc: Wanted flow control flags
- * @phy_work: work item for dealing with PHY events
- * @mac_work: work item for dealing with MAC events
+ * @mac_work: Work item for changing MAC promiscuity and multicast hash
  * @loopback_mode: Loopback status
  * @loopback_modes: Supported loopback mode bitmask
  * @loopback_selftest: Offline self-test private state
@@ -774,9 +739,6 @@ struct efx_nic {
 	bool irq_rx_adaptive;
 	unsigned int irq_rx_moderation;
 
-	struct i2c_adapter i2c_adap;
-	struct efx_board board_info;
-
 	enum nic_state state;
 	enum reset_type reset_pending;
 
@@ -784,21 +746,29 @@ struct efx_nic {
 	struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
 	struct efx_channel channel[EFX_MAX_CHANNELS];
 
+	unsigned next_buffer_table;
 	int n_rx_queues;
 	int n_channels;
 	unsigned int rx_buffer_len;
 	unsigned int rx_buffer_order;
 
+	unsigned int_error_count;
+	unsigned long int_error_expire;
+
 	struct efx_buffer irq_status;
 	volatile signed int last_irq_cpu;
+	unsigned long irq_zero_count;
 
 	struct efx_spi_device *spi_flash;
 	struct efx_spi_device *spi_eeprom;
 	struct mutex spi_lock;
+#ifdef CONFIG_SFC_MTD
+	struct list_head mtd_list;
+#endif
 
 	unsigned n_rx_nodesc_drop_cnt;
 
-	struct falcon_nic_data *nic_data;
+	void *nic_data;
 
 	struct mutex mac_lock;
 	struct work_struct mac_work;
@@ -815,24 +785,21 @@ struct efx_nic {
 	struct efx_mac_stats mac_stats;
 	struct efx_buffer stats_buffer;
 	spinlock_t stats_lock;
-	unsigned int stats_disable_count;
 
 	struct efx_mac_operations *mac_op;
 	unsigned char mac_address[ETH_ALEN];
 
-	enum phy_type phy_type;
-	spinlock_t phy_lock;
-	struct work_struct phy_work;
+	unsigned int phy_type;
+	struct mutex mdio_lock;
 	struct efx_phy_operations *phy_op;
 	void *phy_data;
 	struct mdio_if_info mdio;
+	unsigned int mdio_bus;
 	enum efx_phy_mode phy_mode;
 
-	bool mac_up;
-	bool link_up;
-	bool link_fd;
-	enum efx_fc_type link_fc;
-	unsigned int link_speed;
+	bool xmac_poll_required;
+	u32 link_advertising;
+	struct efx_link_state link_state;
 	unsigned int n_link_state_changes;
 
 	bool promiscuous;
@@ -841,7 +808,7 @@ struct efx_nic {
 
 	atomic_t rx_reset;
 	enum efx_loopback_mode loopback_mode;
-	unsigned int loopback_modes;
+	u64 loopback_modes;
 
 	void *loopback_selftest;
 };
@@ -860,50 +827,95 @@ static inline const char *efx_dev_name(struct efx_nic *efx)
 	return efx_dev_registered(efx) ? efx->name : "";
 }
 
+static inline unsigned int efx_port_num(struct efx_nic *efx)
+{
+	return PCI_FUNC(efx->pci_dev->devfn);
+}
+
 /**
  * struct efx_nic_type - Efx device type definition
- * @mem_bar: Memory BAR number
+ * @probe: Probe the controller
+ * @remove: Free resources allocated by probe()
+ * @init: Initialise the controller
+ * @fini: Shut down the controller
+ * @monitor: Periodic function for polling link state and hardware monitor
+ * @reset: Reset the controller hardware and possibly the PHY.  This will
+ *	be called while the controller is uninitialised.
+ * @probe_port: Probe the MAC and PHY
+ * @remove_port: Free resources allocated by probe_port()
+ * @prepare_flush: Prepare the hardware for flushing the DMA queues
+ * @update_stats: Update statistics not provided by event handling
+ * @start_stats: Start the regular fetching of statistics
+ * @stop_stats: Stop the regular fetching of statistics
+ * @set_id_led: Set state of identifying LED or revert to automatic function
+ * @push_irq_moderation: Apply interrupt moderation value
+ * @push_multicast_hash: Apply multicast hash table
+ * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY
+ * @get_wol: Get WoL configuration from driver state
+ * @set_wol: Push WoL configuration to the NIC
+ * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
+ * @test_registers: Test read/write functionality of control registers
+ * @test_nvram: Test validity of NVRAM contents
+ * @default_mac_ops: efx_mac_operations to set at startup
+ * @revision: Hardware architecture revision
  * @mem_map_size: Memory BAR mapped size
  * @txd_ptr_tbl_base: TX descriptor ring base address
  * @rxd_ptr_tbl_base: RX descriptor ring base address
  * @buf_tbl_base: Buffer table base address
  * @evq_ptr_tbl_base: Event queue pointer table base address
  * @evq_rptr_tbl_base: Event queue read-pointer table base address
- * @txd_ring_mask: TX descriptor ring size - 1 (must be a power of two - 1)
- * @rxd_ring_mask: RX descriptor ring size - 1 (must be a power of two - 1)
- * @evq_size: Event queue size (must be a power of two)
  * @max_dma_mask: Maximum possible DMA mask
- * @tx_dma_mask: TX DMA mask
- * @bug5391_mask: Address mask for bug 5391 workaround
- * @rx_xoff_thresh: RX FIFO XOFF watermark (bytes)
- * @rx_xon_thresh: RX FIFO XON watermark (bytes)
  * @rx_buffer_padding: Padding added to each RX buffer
  * @max_interrupt_mode: Highest capability interrupt mode supported
  *	from &enum efx_init_mode.
  * @phys_addr_channels: Number of channels with physically addressed
  *	descriptors
+ * @tx_dc_base: Base address in SRAM of TX queue descriptor caches
+ * @rx_dc_base: Base address in SRAM of RX queue descriptor caches
+ * @offload_features: net_device feature flags for protocol offload
+ *	features implemented in hardware
+ * @reset_world_flags: Flags for additional components covered by
+ *	reset method RESET_TYPE_WORLD
  */
 struct efx_nic_type {
-	unsigned int mem_bar;
+	int (*probe)(struct efx_nic *efx);
+	void (*remove)(struct efx_nic *efx);
+	int (*init)(struct efx_nic *efx);
+	void (*fini)(struct efx_nic *efx);
+	void (*monitor)(struct efx_nic *efx);
+	int (*reset)(struct efx_nic *efx, enum reset_type method);
+	int (*probe_port)(struct efx_nic *efx);
+	void (*remove_port)(struct efx_nic *efx);
+	void (*prepare_flush)(struct efx_nic *efx);
+	void (*update_stats)(struct efx_nic *efx);
+	void (*start_stats)(struct efx_nic *efx);
+	void (*stop_stats)(struct efx_nic *efx);
+	void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
+	void (*push_irq_moderation)(struct efx_channel *channel);
+	void (*push_multicast_hash)(struct efx_nic *efx);
+	int (*reconfigure_port)(struct efx_nic *efx);
+	void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
+	int (*set_wol)(struct efx_nic *efx, u32 type);
+	void (*resume_wol)(struct efx_nic *efx);
+	int (*test_registers)(struct efx_nic *efx);
+	int (*test_nvram)(struct efx_nic *efx);
+	struct efx_mac_operations *default_mac_ops;
+
+	int revision;
 	unsigned int mem_map_size;
 	unsigned int txd_ptr_tbl_base;
 	unsigned int rxd_ptr_tbl_base;
 	unsigned int buf_tbl_base;
 	unsigned int evq_ptr_tbl_base;
 	unsigned int evq_rptr_tbl_base;
-
-	unsigned int txd_ring_mask;
-	unsigned int rxd_ring_mask;
-	unsigned int evq_size;
 	u64 max_dma_mask;
-	unsigned int tx_dma_mask;
-	unsigned bug5391_mask;
-
-	int rx_xoff_thresh;
-	int rx_xon_thresh;
 	unsigned int rx_buffer_padding;
 	unsigned int max_interrupt_mode;
 	unsigned int phys_addr_channels;
+	unsigned int tx_dc_base;
+	unsigned int rx_dc_base;
+	unsigned long offload_features;
+	u32 reset_world_flags;
 };
 
 /**************************************************************************
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
new file mode 100644
index 000000000000..a577be227862
--- /dev/null
+++ b/drivers/net/sfc/nic.c
@@ -0,0 +1,1583 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2009 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include "net_driver.h"
+#include "bitfield.h"
+#include "efx.h"
+#include "nic.h"
+#include "regs.h"
+#include "io.h"
+#include "workarounds.h"
+
+/**************************************************************************
+ *
+ * Configurable values
+ *
+ **************************************************************************
+ */
+
+/* This is set to 16 for a good reason.  In summary, if larger than
+ * 16, the descriptor cache holds more than a default socket
+ * buffer's worth of packets (for UDP we can only have at most one
+ * socket buffer's worth outstanding).  This combined with the fact
+ * that we only get 1 TX event per descriptor cache means the NIC
+ * goes idle.
+ */
+#define TX_DC_ENTRIES 16
+#define TX_DC_ENTRIES_ORDER 1
+
+#define RX_DC_ENTRIES 64
+#define RX_DC_ENTRIES_ORDER 3
+
+/* RX FIFO XOFF watermark
+ *
+ * When the amount of the RX FIFO increases used increases past this
+ * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
+ * This also has an effect on RX/TX arbitration
+ */
+int efx_nic_rx_xoff_thresh = -1;
+module_param_named(rx_xoff_thresh_bytes, efx_nic_rx_xoff_thresh, int, 0644);
+MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
+
+/* RX FIFO XON watermark
+ *
+ * When the amount of the RX FIFO used decreases below this
+ * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
+ * This also has an effect on RX/TX arbitration
+ */
+int efx_nic_rx_xon_thresh = -1;
+module_param_named(rx_xon_thresh_bytes, efx_nic_rx_xon_thresh, int, 0644);
+MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
+
+/* If EFX_MAX_INT_ERRORS internal errors occur within
+ * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
+ * disable it.
+ */
+#define EFX_INT_ERROR_EXPIRE 3600
+#define EFX_MAX_INT_ERRORS 5
+
+/* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
+ */
+#define EFX_FLUSH_INTERVAL 10
+#define EFX_FLUSH_POLL_COUNT 100
+
+/* Size and alignment of special buffers (4KB) */
+#define EFX_BUF_SIZE 4096
+
+/* Depth of RX flush request fifo */
+#define EFX_RX_FLUSH_COUNT 4
+
+/**************************************************************************
+ *
+ * Solarstorm hardware access
+ *
+ **************************************************************************/
+
+static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
+				     unsigned int index)
+{
+	efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
+			value, index);
+}
+
+/* Read the current event from the event queue */
+static inline efx_qword_t *efx_event(struct efx_channel *channel,
+				     unsigned int index)
+{
+	return (((efx_qword_t *) (channel->eventq.addr)) + index);
+}
+
+/* See if an event is present
+ *
+ * We check both the high and low dword of the event for all ones.  We
+ * wrote all ones when we cleared the event, and no valid event can
+ * have all ones in either its high or low dwords.  This approach is
+ * robust against reordering.
+ *
+ * Note that using a single 64-bit comparison is incorrect; even
+ * though the CPU read will be atomic, the DMA write may not be.
+ */
+static inline int efx_event_present(efx_qword_t *event)
+{
+	return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
+		  EFX_DWORD_IS_ALL_ONES(event->dword[1])));
+}
+
+static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
+				     const efx_oword_t *mask)
+{
+	return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
+		((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
+}
+
+int efx_nic_test_registers(struct efx_nic *efx,
+			   const struct efx_nic_register_test *regs,
+			   size_t n_regs)
+{
+	unsigned address = 0, i, j;
+	efx_oword_t mask, imask, original, reg, buf;
+
+	/* Falcon should be in loopback to isolate the XMAC from the PHY */
+	WARN_ON(!LOOPBACK_INTERNAL(efx));
+
+	for (i = 0; i < n_regs; ++i) {
+		address = regs[i].address;
+		mask = imask = regs[i].mask;
+		EFX_INVERT_OWORD(imask);
+
+		efx_reado(efx, &original, address);
+
+		/* bit sweep on and off */
+		for (j = 0; j < 128; j++) {
+			if (!EFX_EXTRACT_OWORD32(mask, j, j))
+				continue;
+
+			/* Test this testable bit can be set in isolation */
+			EFX_AND_OWORD(reg, original, mask);
+			EFX_SET_OWORD32(reg, j, j, 1);
+
+			efx_writeo(efx, &reg, address);
+			efx_reado(efx, &buf, address);
+
+			if (efx_masked_compare_oword(&reg, &buf, &mask))
+				goto fail;
+
+			/* Test this testable bit can be cleared in isolation */
+			EFX_OR_OWORD(reg, original, mask);
+			EFX_SET_OWORD32(reg, j, j, 0);
+
+			efx_writeo(efx, &reg, address);
+			efx_reado(efx, &buf, address);
+
+			if (efx_masked_compare_oword(&reg, &buf, &mask))
+				goto fail;
+		}
+
+		efx_writeo(efx, &original, address);
+	}
+
+	return 0;
+
+fail:
+	EFX_ERR(efx, "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
+		" at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
+		EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
+	return -EIO;
+}
+
+/**************************************************************************
+ *
+ * Special buffer handling
+ * Special buffers are used for event queues and the TX and RX
+ * descriptor rings.
+ *
+ *************************************************************************/
+
+/*
+ * Initialise a special buffer
+ *
+ * This will define a buffer (previously allocated via
+ * efx_alloc_special_buffer()) in the buffer table, allowing
+ * it to be used for event queues, descriptor rings etc.
+ */
+static void
+efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
+{
+	efx_qword_t buf_desc;
+	int index;
+	dma_addr_t dma_addr;
+	int i;
+
+	EFX_BUG_ON_PARANOID(!buffer->addr);
+
+	/* Write buffer descriptors to NIC */
+	for (i = 0; i < buffer->entries; i++) {
+		index = buffer->index + i;
+		dma_addr = buffer->dma_addr + (i * 4096);
+		EFX_LOG(efx, "mapping special buffer %d at %llx\n",
+			index, (unsigned long long)dma_addr);
+		EFX_POPULATE_QWORD_3(buf_desc,
+				     FRF_AZ_BUF_ADR_REGION, 0,
+				     FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
+				     FRF_AZ_BUF_OWNER_ID_FBUF, 0);
+		efx_write_buf_tbl(efx, &buf_desc, index);
+	}
+}
+
+/* Unmaps a buffer and clears the buffer table entries */
+static void
+efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
+{
+	efx_oword_t buf_tbl_upd;
+	unsigned int start = buffer->index;
+	unsigned int end = (buffer->index + buffer->entries - 1);
+
+	if (!buffer->entries)
+		return;
+
+	EFX_LOG(efx, "unmapping special buffers %d-%d\n",
+		buffer->index, buffer->index + buffer->entries - 1);
+
+	EFX_POPULATE_OWORD_4(buf_tbl_upd,
+			     FRF_AZ_BUF_UPD_CMD, 0,
+			     FRF_AZ_BUF_CLR_CMD, 1,
+			     FRF_AZ_BUF_CLR_END_ID, end,
+			     FRF_AZ_BUF_CLR_START_ID, start);
+	efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
+}
+
+/*
+ * Allocate a new special buffer
+ *
+ * This allocates memory for a new buffer, clears it and allocates a
+ * new buffer ID range.  It does not write into the buffer table.
+ *
+ * This call will allocate 4KB buffers, since 8KB buffers can't be
+ * used for event queues and descriptor rings.
+ */
+static int efx_alloc_special_buffer(struct efx_nic *efx,
+				    struct efx_special_buffer *buffer,
+				    unsigned int len)
+{
+	len = ALIGN(len, EFX_BUF_SIZE);
+
+	buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
+					    &buffer->dma_addr);
+	if (!buffer->addr)
+		return -ENOMEM;
+	buffer->len = len;
+	buffer->entries = len / EFX_BUF_SIZE;
+	BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
+
+	/* All zeros is a potentially valid event so memset to 0xff */
+	memset(buffer->addr, 0xff, len);
+
+	/* Select new buffer ID */
+	buffer->index = efx->next_buffer_table;
+	efx->next_buffer_table += buffer->entries;
+
+	EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
+		"(virt %p phys %llx)\n", buffer->index,
+		buffer->index + buffer->entries - 1,
+		(u64)buffer->dma_addr, len,
+		buffer->addr, (u64)virt_to_phys(buffer->addr));
+
+	return 0;
+}
+
+static void
+efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
+{
+	if (!buffer->addr)
+		return;
+
+	EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x "
+		"(virt %p phys %llx)\n", buffer->index,
+		buffer->index + buffer->entries - 1,
+		(u64)buffer->dma_addr, buffer->len,
+		buffer->addr, (u64)virt_to_phys(buffer->addr));
+
+	pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr,
+			    buffer->dma_addr);
+	buffer->addr = NULL;
+	buffer->entries = 0;
+}
+
+/**************************************************************************
+ *
+ * Generic buffer handling
+ * These buffers are used for interrupt status and MAC stats
+ *
+ **************************************************************************/
+
+int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
+			 unsigned int len)
+{
+	buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
+					    &buffer->dma_addr);
+	if (!buffer->addr)
+		return -ENOMEM;
+	buffer->len = len;
+	memset(buffer->addr, 0, len);
+	return 0;
+}
+
+void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
+{
+	if (buffer->addr) {
+		pci_free_consistent(efx->pci_dev, buffer->len,
+				    buffer->addr, buffer->dma_addr);
+		buffer->addr = NULL;
+	}
+}
+
+/**************************************************************************
+ *
+ * TX path
+ *
+ **************************************************************************/
+
+/* Returns a pointer to the specified transmit descriptor in the TX
+ * descriptor queue belonging to the specified channel.
+ */
+static inline efx_qword_t *
+efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
+{
+	return (((efx_qword_t *) (tx_queue->txd.addr)) + index);
+}
+
+/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
+static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
+{
+	unsigned write_ptr;
+	efx_dword_t reg;
+
+	write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
+	EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
+	efx_writed_page(tx_queue->efx, &reg,
+			FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
+}
+
+
+/* For each entry inserted into the software descriptor ring, create a
+ * descriptor in the hardware TX descriptor ring (in host memory), and
+ * write a doorbell.
+ */
+void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
+{
+
+	struct efx_tx_buffer *buffer;
+	efx_qword_t *txd;
+	unsigned write_ptr;
+
+	BUG_ON(tx_queue->write_count == tx_queue->insert_count);
+
+	do {
+		write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
+		buffer = &tx_queue->buffer[write_ptr];
+		txd = efx_tx_desc(tx_queue, write_ptr);
+		++tx_queue->write_count;
+
+		/* Create TX descriptor ring entry */
+		EFX_POPULATE_QWORD_4(*txd,
+				     FSF_AZ_TX_KER_CONT, buffer->continuation,
+				     FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
+				     FSF_AZ_TX_KER_BUF_REGION, 0,
+				     FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
+	} while (tx_queue->write_count != tx_queue->insert_count);
+
+	wmb(); /* Ensure descriptors are written before they are fetched */
+	efx_notify_tx_desc(tx_queue);
+}
+
+/* Allocate hardware resources for a TX queue */
+int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
+{
+	struct efx_nic *efx = tx_queue->efx;
+	BUILD_BUG_ON(EFX_TXQ_SIZE < 512 || EFX_TXQ_SIZE > 4096 ||
+		     EFX_TXQ_SIZE & EFX_TXQ_MASK);
+	return efx_alloc_special_buffer(efx, &tx_queue->txd,
+					EFX_TXQ_SIZE * sizeof(efx_qword_t));
+}
+
+void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
+{
+	efx_oword_t tx_desc_ptr;
+	struct efx_nic *efx = tx_queue->efx;
+
+	tx_queue->flushed = FLUSH_NONE;
+
+	/* Pin TX descriptor ring */
+	efx_init_special_buffer(efx, &tx_queue->txd);
+
+	/* Push TX descriptor ring to card */
+	EFX_POPULATE_OWORD_10(tx_desc_ptr,
+			      FRF_AZ_TX_DESCQ_EN, 1,
+			      FRF_AZ_TX_ISCSI_DDIG_EN, 0,
+			      FRF_AZ_TX_ISCSI_HDIG_EN, 0,
+			      FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
+			      FRF_AZ_TX_DESCQ_EVQ_ID,
+			      tx_queue->channel->channel,
+			      FRF_AZ_TX_DESCQ_OWNER_ID, 0,
+			      FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
+			      FRF_AZ_TX_DESCQ_SIZE,
+			      __ffs(tx_queue->txd.entries),
+			      FRF_AZ_TX_DESCQ_TYPE, 0,
+			      FRF_BZ_TX_NON_IP_DROP_DIS, 1);
+
+	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+		int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM;
+		EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
+		EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS,
+				    !csum);
+	}
+
+	efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
+			 tx_queue->queue);
+
+	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
+		efx_oword_t reg;
+
+		/* Only 128 bits in this register */
+		BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128);
+
+		efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
+		if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM)
+			clear_bit_le(tx_queue->queue, (void *)&reg);
+		else
+			set_bit_le(tx_queue->queue, (void *)&reg);
+		efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
+	}
+}
+
+static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
+{
+	struct efx_nic *efx = tx_queue->efx;
+	efx_oword_t tx_flush_descq;
+
+	tx_queue->flushed = FLUSH_PENDING;
+
+	/* Post a flush command */
+	EFX_POPULATE_OWORD_2(tx_flush_descq,
+			     FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
+			     FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
+	efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
+}
+
+void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
+{
+	struct efx_nic *efx = tx_queue->efx;
+	efx_oword_t tx_desc_ptr;
+
+	/* The queue should have been flushed */
+	WARN_ON(tx_queue->flushed != FLUSH_DONE);
+
+	/* Remove TX descriptor ring from card */
+	EFX_ZERO_OWORD(tx_desc_ptr);
+	efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
+			 tx_queue->queue);
+
+	/* Unpin TX descriptor ring */
+	efx_fini_special_buffer(efx, &tx_queue->txd);
+}
+
+/* Free buffers backing TX queue */
+void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
+{
+	efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
+}
+
+/**************************************************************************
+ *
+ * RX path
+ *
+ **************************************************************************/
+
+/* Returns a pointer to the specified descriptor in the RX descriptor queue */
+static inline efx_qword_t *
+efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
+{
+	return (((efx_qword_t *) (rx_queue->rxd.addr)) + index);
+}
+
+/* This creates an entry in the RX descriptor queue */
+static inline void
+efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
+{
+	struct efx_rx_buffer *rx_buf;
+	efx_qword_t *rxd;
+
+	rxd = efx_rx_desc(rx_queue, index);
+	rx_buf = efx_rx_buffer(rx_queue, index);
+	EFX_POPULATE_QWORD_3(*rxd,
+			     FSF_AZ_RX_KER_BUF_SIZE,
+			     rx_buf->len -
+			     rx_queue->efx->type->rx_buffer_padding,
+			     FSF_AZ_RX_KER_BUF_REGION, 0,
+			     FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
+}
+
+/* This writes to the RX_DESC_WPTR register for the specified receive
+ * descriptor ring.
+ */
+void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
+{
+	efx_dword_t reg;
+	unsigned write_ptr;
+
+	while (rx_queue->notified_count != rx_queue->added_count) {
+		efx_build_rx_desc(rx_queue,
+				  rx_queue->notified_count &
+				  EFX_RXQ_MASK);
+		++rx_queue->notified_count;
+	}
+
+	wmb();
+	write_ptr = rx_queue->added_count & EFX_RXQ_MASK;
+	EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
+	efx_writed_page(rx_queue->efx, &reg,
+			FR_AZ_RX_DESC_UPD_DWORD_P0, rx_queue->queue);
+}
+
+int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
+{
+	struct efx_nic *efx = rx_queue->efx;
+	BUILD_BUG_ON(EFX_RXQ_SIZE < 512 || EFX_RXQ_SIZE > 4096 ||
+		     EFX_RXQ_SIZE & EFX_RXQ_MASK);
+	return efx_alloc_special_buffer(efx, &rx_queue->rxd,
+					EFX_RXQ_SIZE * sizeof(efx_qword_t));
+}
+
+void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
+{
+	efx_oword_t rx_desc_ptr;
+	struct efx_nic *efx = rx_queue->efx;
+	bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
+	bool iscsi_digest_en = is_b0;
+
+	EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
+		rx_queue->queue, rx_queue->rxd.index,
+		rx_queue->rxd.index + rx_queue->rxd.entries - 1);
+
+	rx_queue->flushed = FLUSH_NONE;
+
+	/* Pin RX descriptor ring */
+	efx_init_special_buffer(efx, &rx_queue->rxd);
+
+	/* Push RX descriptor ring to card */
+	EFX_POPULATE_OWORD_10(rx_desc_ptr,
+			      FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
+			      FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
+			      FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
+			      FRF_AZ_RX_DESCQ_EVQ_ID,
+			      rx_queue->channel->channel,
+			      FRF_AZ_RX_DESCQ_OWNER_ID, 0,
+			      FRF_AZ_RX_DESCQ_LABEL, rx_queue->queue,
+			      FRF_AZ_RX_DESCQ_SIZE,
+			      __ffs(rx_queue->rxd.entries),
+			      FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
+			      /* For >=B0 this is scatter so disable */
+			      FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
+			      FRF_AZ_RX_DESCQ_EN, 1);
+	efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
+			 rx_queue->queue);
+}
+
+static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
+{
+	struct efx_nic *efx = rx_queue->efx;
+	efx_oword_t rx_flush_descq;
+
+	rx_queue->flushed = FLUSH_PENDING;
+
+	/* Post a flush command */
+	EFX_POPULATE_OWORD_2(rx_flush_descq,
+			     FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
+			     FRF_AZ_RX_FLUSH_DESCQ, rx_queue->queue);
+	efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
+}
+
+void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
+{
+	efx_oword_t rx_desc_ptr;
+	struct efx_nic *efx = rx_queue->efx;
+
+	/* The queue should already have been flushed */
+	WARN_ON(rx_queue->flushed != FLUSH_DONE);
+
+	/* Remove RX descriptor ring from card */
+	EFX_ZERO_OWORD(rx_desc_ptr);
+	efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
+			 rx_queue->queue);
+
+	/* Unpin RX descriptor ring */
+	efx_fini_special_buffer(efx, &rx_queue->rxd);
+}
+
+/* Free buffers backing RX queue */
+void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
+{
+	efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
+}
+
+/**************************************************************************
+ *
+ * Event queue processing
+ * Event queues are processed by per-channel tasklets.
+ *
+ **************************************************************************/
+
+/* Update a channel's event queue's read pointer (RPTR) register
+ *
+ * This writes the EVQ_RPTR_REG register for the specified channel's
+ * event queue.
+ *
+ * Note that EVQ_RPTR_REG contains the index of the "last read" event,
+ * whereas channel->eventq_read_ptr contains the index of the "next to
+ * read" event.
+ */
+void efx_nic_eventq_read_ack(struct efx_channel *channel)
+{
+	efx_dword_t reg;
+	struct efx_nic *efx = channel->efx;
+
+	EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr);
+	efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
+			 channel->channel);
+}
+
+/* Use HW to insert a SW defined event */
+void efx_generate_event(struct efx_channel *channel, efx_qword_t *event)
+{
+	efx_oword_t drv_ev_reg;
+
+	BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
+		     FRF_AZ_DRV_EV_DATA_WIDTH != 64);
+	drv_ev_reg.u32[0] = event->u32[0];
+	drv_ev_reg.u32[1] = event->u32[1];
+	drv_ev_reg.u32[2] = 0;
+	drv_ev_reg.u32[3] = 0;
+	EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel);
+	efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV);
+}
+
+/* Handle a transmit completion event
+ *
+ * The NIC batches TX completion events; the message we receive is of
+ * the form "complete all TX events up to this index".
+ */
+static void
+efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
+{
+	unsigned int tx_ev_desc_ptr;
+	unsigned int tx_ev_q_label;
+	struct efx_tx_queue *tx_queue;
+	struct efx_nic *efx = channel->efx;
+
+	if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
+		/* Transmit completion */
+		tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
+		tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
+		tx_queue = &efx->tx_queue[tx_ev_q_label];
+		channel->irq_mod_score +=
+			(tx_ev_desc_ptr - tx_queue->read_count) &
+			EFX_TXQ_MASK;
+		efx_xmit_done(tx_queue, tx_ev_desc_ptr);
+	} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
+		/* Rewrite the FIFO write pointer */
+		tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
+		tx_queue = &efx->tx_queue[tx_ev_q_label];
+
+		if (efx_dev_registered(efx))
+			netif_tx_lock(efx->net_dev);
+		efx_notify_tx_desc(tx_queue);
+		if (efx_dev_registered(efx))
+			netif_tx_unlock(efx->net_dev);
+	} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
+		   EFX_WORKAROUND_10727(efx)) {
+		efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
+	} else {
+		EFX_ERR(efx, "channel %d unexpected TX event "
+			EFX_QWORD_FMT"\n", channel->channel,
+			EFX_QWORD_VAL(*event));
+	}
+}
+
+/* Detect errors included in the rx_evt_pkt_ok bit. */
+static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
+				 const efx_qword_t *event,
+				 bool *rx_ev_pkt_ok,
+				 bool *discard)
+{
+	struct efx_nic *efx = rx_queue->efx;
+	bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
+	bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
+	bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
+	bool rx_ev_other_err, rx_ev_pause_frm;
+	bool rx_ev_hdr_type, rx_ev_mcast_pkt;
+	unsigned rx_ev_pkt_type;
+
+	rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
+	rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
+	rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
+	rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
+	rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
+						 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
+	rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
+						  FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
+	rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
+						   FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
+	rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
+	rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
+	rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
+			  0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
+	rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
+
+	/* Every error apart from tobe_disc and pause_frm */
+	rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
+			   rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
+			   rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
+
+	/* Count errors that are not in MAC stats.  Ignore expected
+	 * checksum errors during self-test. */
+	if (rx_ev_frm_trunc)
+		++rx_queue->channel->n_rx_frm_trunc;
+	else if (rx_ev_tobe_disc)
+		++rx_queue->channel->n_rx_tobe_disc;
+	else if (!efx->loopback_selftest) {
+		if (rx_ev_ip_hdr_chksum_err)
+			++rx_queue->channel->n_rx_ip_hdr_chksum_err;
+		else if (rx_ev_tcp_udp_chksum_err)
+			++rx_queue->channel->n_rx_tcp_udp_chksum_err;
+	}
+
+	/* The frame must be discarded if any of these are true. */
+	*discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
+		    rx_ev_tobe_disc | rx_ev_pause_frm);
+
+	/* TOBE_DISC is expected on unicast mismatches; don't print out an
+	 * error message.  FRM_TRUNC indicates RXDP dropped the packet due
+	 * to a FIFO overflow.
+	 */
+#ifdef EFX_ENABLE_DEBUG
+	if (rx_ev_other_err) {
+		EFX_INFO_RL(efx, " RX queue %d unexpected RX event "
+			    EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
+			    rx_queue->queue, EFX_QWORD_VAL(*event),
+			    rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
+			    rx_ev_ip_hdr_chksum_err ?
+			    " [IP_HDR_CHKSUM_ERR]" : "",
+			    rx_ev_tcp_udp_chksum_err ?
+			    " [TCP_UDP_CHKSUM_ERR]" : "",
+			    rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
+			    rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
+			    rx_ev_drib_nib ? " [DRIB_NIB]" : "",
+			    rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
+			    rx_ev_pause_frm ? " [PAUSE]" : "");
+	}
+#endif
+}
+
+/* Handle receive events that are not in-order. */
+static void
+efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
+{
+	struct efx_nic *efx = rx_queue->efx;
+	unsigned expected, dropped;
+
+	expected = rx_queue->removed_count & EFX_RXQ_MASK;
+	dropped = (index - expected) & EFX_RXQ_MASK;
+	EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n",
+		dropped, index, expected);
+
+	efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
+			   RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
+}
+
+/* Handle a packet received event
+ *
+ * The NIC gives a "discard" flag if it's a unicast packet with the
+ * wrong destination address
+ * Also "is multicast" and "matches multicast filter" flags can be used to
+ * discard non-matching multicast packets.
+ */
+static void
+efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
+{
+	unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
+	unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
+	unsigned expected_ptr;
+	bool rx_ev_pkt_ok, discard = false, checksummed;
+	struct efx_rx_queue *rx_queue;
+	struct efx_nic *efx = channel->efx;
+
+	/* Basic packet information */
+	rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
+	rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
+	rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
+	WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
+	WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
+	WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
+		channel->channel);
+
+	rx_queue = &efx->rx_queue[channel->channel];
+
+	rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
+	expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK;
+	if (unlikely(rx_ev_desc_ptr != expected_ptr))
+		efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
+
+	if (likely(rx_ev_pkt_ok)) {
+		/* If packet is marked as OK and packet type is TCP/IP or
+		 * UDP/IP, then we can rely on the hardware checksum.
+		 */
+		checksummed =
+			likely(efx->rx_checksum_enabled) &&
+			(rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
+			 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP);
+	} else {
+		efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard);
+		checksummed = false;
+	}
+
+	/* Detect multicast packets that didn't match the filter */
+	rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
+	if (rx_ev_mcast_pkt) {
+		unsigned int rx_ev_mcast_hash_match =
+			EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
+
+		if (unlikely(!rx_ev_mcast_hash_match)) {
+			++channel->n_rx_mcast_mismatch;
+			discard = true;
+		}
+	}
+
+	channel->irq_mod_score += 2;
+
+	/* Handle received packet */
+	efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
+		      checksummed, discard);
+}
+
+/* Global events are basically PHY events */
+static void
+efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
+{
+	struct efx_nic *efx = channel->efx;
+	bool handled = false;
+
+	if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
+	    EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
+	    EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) {
+		/* Ignored */
+		handled = true;
+	}
+
+	if ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) &&
+	    EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
+		efx->xmac_poll_required = true;
+		handled = true;
+	}
+
+	if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
+	    EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
+	    EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
+		EFX_ERR(efx, "channel %d seen global RX_RESET "
+			"event. Resetting.\n", channel->channel);
+
+		atomic_inc(&efx->rx_reset);
+		efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
+				   RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
+		handled = true;
+	}
+
+	if (!handled)
+		EFX_ERR(efx, "channel %d unknown global event "
+			EFX_QWORD_FMT "\n", channel->channel,
+			EFX_QWORD_VAL(*event));
+}
+
+static void
+efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
+{
+	struct efx_nic *efx = channel->efx;
+	unsigned int ev_sub_code;
+	unsigned int ev_sub_data;
+
+	ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
+	ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
+
+	switch (ev_sub_code) {
+	case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
+		EFX_TRACE(efx, "channel %d TXQ %d flushed\n",
+			  channel->channel, ev_sub_data);
+		break;
+	case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
+		EFX_TRACE(efx, "channel %d RXQ %d flushed\n",
+			  channel->channel, ev_sub_data);
+		break;
+	case FSE_AZ_EVQ_INIT_DONE_EV:
+		EFX_LOG(efx, "channel %d EVQ %d initialised\n",
+			channel->channel, ev_sub_data);
+		break;
+	case FSE_AZ_SRM_UPD_DONE_EV:
+		EFX_TRACE(efx, "channel %d SRAM update done\n",
+			  channel->channel);
+		break;
+	case FSE_AZ_WAKE_UP_EV:
+		EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n",
+			  channel->channel, ev_sub_data);
+		break;
+	case FSE_AZ_TIMER_EV:
+		EFX_TRACE(efx, "channel %d RX queue %d timer expired\n",
+			  channel->channel, ev_sub_data);
+		break;
+	case FSE_AA_RX_RECOVER_EV:
+		EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
+			"Resetting.\n", channel->channel);
+		atomic_inc(&efx->rx_reset);
+		efx_schedule_reset(efx,
+				   EFX_WORKAROUND_6555(efx) ?
+				   RESET_TYPE_RX_RECOVERY :
+				   RESET_TYPE_DISABLE);
+		break;
+	case FSE_BZ_RX_DSC_ERROR_EV:
+		EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error."
+			" RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
+		efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
+		break;
+	case FSE_BZ_TX_DSC_ERROR_EV:
+		EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error."
+			" TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
+		efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
+		break;
+	default:
+		EFX_TRACE(efx, "channel %d unknown driver event code %d "
+			  "data %04x\n", channel->channel, ev_sub_code,
+			  ev_sub_data);
+		break;
+	}
+}
+
+int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota)
+{
+	unsigned int read_ptr;
+	efx_qword_t event, *p_event;
+	int ev_code;
+	int rx_packets = 0;
+
+	read_ptr = channel->eventq_read_ptr;
+
+	do {
+		p_event = efx_event(channel, read_ptr);
+		event = *p_event;
+
+		if (!efx_event_present(&event))
+			/* End of events */
+			break;
+
+		EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n",
+			  channel->channel, EFX_QWORD_VAL(event));
+
+		/* Clear this event by marking it all ones */
+		EFX_SET_QWORD(*p_event);
+
+		ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
+
+		switch (ev_code) {
+		case FSE_AZ_EV_CODE_RX_EV:
+			efx_handle_rx_event(channel, &event);
+			++rx_packets;
+			break;
+		case FSE_AZ_EV_CODE_TX_EV:
+			efx_handle_tx_event(channel, &event);
+			break;
+		case FSE_AZ_EV_CODE_DRV_GEN_EV:
+			channel->eventq_magic = EFX_QWORD_FIELD(
+				event, FSF_AZ_DRV_GEN_EV_MAGIC);
+			EFX_LOG(channel->efx, "channel %d received generated "
+				"event "EFX_QWORD_FMT"\n", channel->channel,
+				EFX_QWORD_VAL(event));
+			break;
+		case FSE_AZ_EV_CODE_GLOBAL_EV:
+			efx_handle_global_event(channel, &event);
+			break;
+		case FSE_AZ_EV_CODE_DRIVER_EV:
+			efx_handle_driver_event(channel, &event);
+			break;
+		case FSE_CZ_EV_CODE_MCDI_EV:
+			efx_mcdi_process_event(channel, &event);
+			break;
+		default:
+			EFX_ERR(channel->efx, "channel %d unknown event type %d"
+				" (data " EFX_QWORD_FMT ")\n", channel->channel,
+				ev_code, EFX_QWORD_VAL(event));
+		}
+
+		/* Increment read pointer */
+		read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
+
+	} while (rx_packets < rx_quota);
+
+	channel->eventq_read_ptr = read_ptr;
+	return rx_packets;
+}
+
+
+/* Allocate buffer table entries for event queue */
+int efx_nic_probe_eventq(struct efx_channel *channel)
+{
+	struct efx_nic *efx = channel->efx;
+	BUILD_BUG_ON(EFX_EVQ_SIZE < 512 || EFX_EVQ_SIZE > 32768 ||
+		     EFX_EVQ_SIZE & EFX_EVQ_MASK);
+	return efx_alloc_special_buffer(efx, &channel->eventq,
+					EFX_EVQ_SIZE * sizeof(efx_qword_t));
+}
+
+void efx_nic_init_eventq(struct efx_channel *channel)
+{
+	efx_oword_t reg;
+	struct efx_nic *efx = channel->efx;
+
+	EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n",
+		channel->channel, channel->eventq.index,
+		channel->eventq.index + channel->eventq.entries - 1);
+
+	if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
+		EFX_POPULATE_OWORD_3(reg,
+				     FRF_CZ_TIMER_Q_EN, 1,
+				     FRF_CZ_HOST_NOTIFY_MODE, 0,
+				     FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
+		efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
+	}
+
+	/* Pin event queue buffer */
+	efx_init_special_buffer(efx, &channel->eventq);
+
+	/* Fill event queue with all ones (i.e. empty events) */
+	memset(channel->eventq.addr, 0xff, channel->eventq.len);
+
+	/* Push event queue to card */
+	EFX_POPULATE_OWORD_3(reg,
+			     FRF_AZ_EVQ_EN, 1,
+			     FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
+			     FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
+	efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
+			 channel->channel);
+
+	efx->type->push_irq_moderation(channel);
+}
+
+void efx_nic_fini_eventq(struct efx_channel *channel)
+{
+	efx_oword_t reg;
+	struct efx_nic *efx = channel->efx;
+
+	/* Remove event queue from card */
+	EFX_ZERO_OWORD(reg);
+	efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
+			 channel->channel);
+	if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
+		efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
+
+	/* Unpin event queue */
+	efx_fini_special_buffer(efx, &channel->eventq);
+}
+
+/* Free buffers backing event queue */
+void efx_nic_remove_eventq(struct efx_channel *channel)
+{
+	efx_free_special_buffer(channel->efx, &channel->eventq);
+}
+
+
+/* Generates a test event on the event queue.  A subsequent call to
+ * process_eventq() should pick up the event and place the value of
+ * "magic" into channel->eventq_magic;
+ */
+void efx_nic_generate_test_event(struct efx_channel *channel, unsigned int magic)
+{
+	efx_qword_t test_event;
+
+	EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
+			     FSE_AZ_EV_CODE_DRV_GEN_EV,
+			     FSF_AZ_DRV_GEN_EV_MAGIC, magic);
+	efx_generate_event(channel, &test_event);
+}
+
+/**************************************************************************
+ *
+ * Flush handling
+ *
+ **************************************************************************/
+
+
+static void efx_poll_flush_events(struct efx_nic *efx)
+{
+	struct efx_channel *channel = &efx->channel[0];
+	struct efx_tx_queue *tx_queue;
+	struct efx_rx_queue *rx_queue;
+	unsigned int read_ptr = channel->eventq_read_ptr;
+	unsigned int end_ptr = (read_ptr - 1) & EFX_EVQ_MASK;
+
+	do {
+		efx_qword_t *event = efx_event(channel, read_ptr);
+		int ev_code, ev_sub_code, ev_queue;
+		bool ev_failed;
+
+		if (!efx_event_present(event))
+			break;
+
+		ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE);
+		ev_sub_code = EFX_QWORD_FIELD(*event,
+					      FSF_AZ_DRIVER_EV_SUBCODE);
+		if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
+		    ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
+			ev_queue = EFX_QWORD_FIELD(*event,
+						   FSF_AZ_DRIVER_EV_SUBDATA);
+			if (ev_queue < EFX_TX_QUEUE_COUNT) {
+				tx_queue = efx->tx_queue + ev_queue;
+				tx_queue->flushed = FLUSH_DONE;
+			}
+		} else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
+			   ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) {
+			ev_queue = EFX_QWORD_FIELD(
+				*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
+			ev_failed = EFX_QWORD_FIELD(
+				*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
+			if (ev_queue < efx->n_rx_queues) {
+				rx_queue = efx->rx_queue + ev_queue;
+				rx_queue->flushed =
+					ev_failed ? FLUSH_FAILED : FLUSH_DONE;
+			}
+		}
+
+		/* We're about to destroy the queue anyway, so
+		 * it's ok to throw away every non-flush event */
+		EFX_SET_QWORD(*event);
+
+		read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
+	} while (read_ptr != end_ptr);
+
+	channel->eventq_read_ptr = read_ptr;
+}
+
+/* Handle tx and rx flushes at the same time, since they run in
+ * parallel in the hardware and there's no reason for us to
+ * serialise them */
+int efx_nic_flush_queues(struct efx_nic *efx)
+{
+	struct efx_rx_queue *rx_queue;
+	struct efx_tx_queue *tx_queue;
+	int i, tx_pending, rx_pending;
+
+	/* If necessary prepare the hardware for flushing */
+	efx->type->prepare_flush(efx);
+
+	/* Flush all tx queues in parallel */
+	efx_for_each_tx_queue(tx_queue, efx)
+		efx_flush_tx_queue(tx_queue);
+
+	/* The hardware supports four concurrent rx flushes, each of which may
+	 * need to be retried if there is an outstanding descriptor fetch */
+	for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) {
+		rx_pending = tx_pending = 0;
+		efx_for_each_rx_queue(rx_queue, efx) {
+			if (rx_queue->flushed == FLUSH_PENDING)
+				++rx_pending;
+		}
+		efx_for_each_rx_queue(rx_queue, efx) {
+			if (rx_pending == EFX_RX_FLUSH_COUNT)
+				break;
+			if (rx_queue->flushed == FLUSH_FAILED ||
+			    rx_queue->flushed == FLUSH_NONE) {
+				efx_flush_rx_queue(rx_queue);
+				++rx_pending;
+			}
+		}
+		efx_for_each_tx_queue(tx_queue, efx) {
+			if (tx_queue->flushed != FLUSH_DONE)
+				++tx_pending;
+		}
+
+		if (rx_pending == 0 && tx_pending == 0)
+			return 0;
+
+		msleep(EFX_FLUSH_INTERVAL);
+		efx_poll_flush_events(efx);
+	}
+
+	/* Mark the queues as all flushed. We're going to return failure
+	 * leading to a reset, or fake up success anyway */
+	efx_for_each_tx_queue(tx_queue, efx) {
+		if (tx_queue->flushed != FLUSH_DONE)
+			EFX_ERR(efx, "tx queue %d flush command timed out\n",
+				tx_queue->queue);
+		tx_queue->flushed = FLUSH_DONE;
+	}
+	efx_for_each_rx_queue(rx_queue, efx) {
+		if (rx_queue->flushed != FLUSH_DONE)
+			EFX_ERR(efx, "rx queue %d flush command timed out\n",
+				rx_queue->queue);
+		rx_queue->flushed = FLUSH_DONE;
+	}
+
+	if (EFX_WORKAROUND_7803(efx))
+		return 0;
+
+	return -ETIMEDOUT;
+}
+
+/**************************************************************************
+ *
+ * Hardware interrupts
+ * The hardware interrupt handler does very little work; all the event
+ * queue processing is carried out by per-channel tasklets.
+ *
+ **************************************************************************/
+
+/* Enable/disable/generate interrupts */
+static inline void efx_nic_interrupts(struct efx_nic *efx,
+				      bool enabled, bool force)
+{
+	efx_oword_t int_en_reg_ker;
+	unsigned int level = 0;
+
+	if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
+		/* Set the level always even if we're generating a test
+		 * interrupt, because our legacy interrupt handler is safe */
+		level = 0x1f;
+
+	EFX_POPULATE_OWORD_3(int_en_reg_ker,
+			     FRF_AZ_KER_INT_LEVE_SEL, level,
+			     FRF_AZ_KER_INT_KER, force,
+			     FRF_AZ_DRV_INT_EN_KER, enabled);
+	efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
+}
+
+void efx_nic_enable_interrupts(struct efx_nic *efx)
+{
+	struct efx_channel *channel;
+
+	EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
+	wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
+
+	/* Enable interrupts */
+	efx_nic_interrupts(efx, true, false);
+
+	/* Force processing of all the channels to get the EVQ RPTRs up to
+	   date */
+	efx_for_each_channel(channel, efx)
+		efx_schedule_channel(channel);
+}
+
+void efx_nic_disable_interrupts(struct efx_nic *efx)
+{
+	/* Disable interrupts */
+	efx_nic_interrupts(efx, false, false);
+}
+
+/* Generate a test interrupt
+ * Interrupt must already have been enabled, otherwise nasty things
+ * may happen.
+ */
+void efx_nic_generate_interrupt(struct efx_nic *efx)
+{
+	efx_nic_interrupts(efx, true, true);
+}
+
+/* Process a fatal interrupt
+ * Disable bus mastering ASAP and schedule a reset
+ */
+irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
+{
+	struct falcon_nic_data *nic_data = efx->nic_data;
+	efx_oword_t *int_ker = efx->irq_status.addr;
+	efx_oword_t fatal_intr;
+	int error, mem_perr;
+
+	efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
+	error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
+
+	EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status "
+		EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
+		EFX_OWORD_VAL(fatal_intr),
+		error ? "disabling bus mastering" : "no recognised error");
+	if (error == 0)
+		goto out;
+
+	/* If this is a memory parity error dump which blocks are offending */
+	mem_perr = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER);
+	if (mem_perr) {
+		efx_oword_t reg;
+		efx_reado(efx, &reg, FR_AZ_MEM_STAT);
+		EFX_ERR(efx, "SYSTEM ERROR: memory parity error "
+			EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
+	}
+
+	/* Disable both devices */
+	pci_clear_master(efx->pci_dev);
+	if (efx_nic_is_dual_func(efx))
+		pci_clear_master(nic_data->pci_dev2);
+	efx_nic_disable_interrupts(efx);
+
+	/* Count errors and reset or disable the NIC accordingly */
+	if (efx->int_error_count == 0 ||
+	    time_after(jiffies, efx->int_error_expire)) {
+		efx->int_error_count = 0;
+		efx->int_error_expire =
+			jiffies + EFX_INT_ERROR_EXPIRE * HZ;
+	}
+	if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
+		EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
+		efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
+	} else {
+		EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen."
+			"NIC will be disabled\n");
+		efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+	}
+out:
+	return IRQ_HANDLED;
+}
+
+/* Handle a legacy interrupt
+ * Acknowledges the interrupt and schedule event queue processing.
+ */
+static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
+{
+	struct efx_nic *efx = dev_id;
+	efx_oword_t *int_ker = efx->irq_status.addr;
+	irqreturn_t result = IRQ_NONE;
+	struct efx_channel *channel;
+	efx_dword_t reg;
+	u32 queues;
+	int syserr;
+
+	/* Read the ISR which also ACKs the interrupts */
+	efx_readd(efx, &reg, FR_BZ_INT_ISR0);
+	queues = EFX_EXTRACT_DWORD(reg, 0, 31);
+
+	/* Check to see if we have a serious error condition */
+	syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+	if (unlikely(syserr))
+		return efx_nic_fatal_interrupt(efx);
+
+	if (queues != 0) {
+		if (EFX_WORKAROUND_15783(efx))
+			efx->irq_zero_count = 0;
+
+		/* Schedule processing of any interrupting queues */
+		efx_for_each_channel(channel, efx) {
+			if (queues & 1)
+				efx_schedule_channel(channel);
+			queues >>= 1;
+		}
+		result = IRQ_HANDLED;
+
+	} else if (EFX_WORKAROUND_15783(efx) &&
+		   efx->irq_zero_count++ == 0) {
+		efx_qword_t *event;
+
+		/* Ensure we rearm all event queues */
+		efx_for_each_channel(channel, efx) {
+			event = efx_event(channel, channel->eventq_read_ptr);
+			if (efx_event_present(event))
+				efx_schedule_channel(channel);
+		}
+
+		result = IRQ_HANDLED;
+	}
+
+	if (result == IRQ_HANDLED) {
+		efx->last_irq_cpu = raw_smp_processor_id();
+		EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
+			  irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
+	}
+
+	return result;
+}
+
+/* Handle an MSI interrupt
+ *
+ * Handle an MSI hardware interrupt.  This routine schedules event
+ * queue processing.  No interrupt acknowledgement cycle is necessary.
+ * Also, we never need to check that the interrupt is for us, since
+ * MSI interrupts cannot be shared.
+ */
+static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
+{
+	struct efx_channel *channel = dev_id;
+	struct efx_nic *efx = channel->efx;
+	efx_oword_t *int_ker = efx->irq_status.addr;
+	int syserr;
+
+	efx->last_irq_cpu = raw_smp_processor_id();
+	EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
+		  irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
+
+	/* Check to see if we have a serious error condition */
+	syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+	if (unlikely(syserr))
+		return efx_nic_fatal_interrupt(efx);
+
+	/* Schedule processing of the channel */
+	efx_schedule_channel(channel);
+
+	return IRQ_HANDLED;
+}
+
+
+/* Setup RSS indirection table.
+ * This maps from the hash value of the packet to RXQ
+ */
+static void efx_setup_rss_indir_table(struct efx_nic *efx)
+{
+	int i = 0;
+	unsigned long offset;
+	efx_dword_t dword;
+
+	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
+		return;
+
+	for (offset = FR_BZ_RX_INDIRECTION_TBL;
+	     offset < FR_BZ_RX_INDIRECTION_TBL + 0x800;
+	     offset += 0x10) {
+		EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
+				     i % efx->n_rx_queues);
+		efx_writed(efx, &dword, offset);
+		i++;
+	}
+}
+
+/* Hook interrupt handler(s)
+ * Try MSI and then legacy interrupts.
+ */
+int efx_nic_init_interrupt(struct efx_nic *efx)
+{
+	struct efx_channel *channel;
+	int rc;
+
+	if (!EFX_INT_MODE_USE_MSI(efx)) {
+		irq_handler_t handler;
+		if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
+			handler = efx_legacy_interrupt;
+		else
+			handler = falcon_legacy_interrupt_a1;
+
+		rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
+				 efx->name, efx);
+		if (rc) {
+			EFX_ERR(efx, "failed to hook legacy IRQ %d\n",
+				efx->pci_dev->irq);
+			goto fail1;
+		}
+		return 0;
+	}
+
+	/* Hook MSI or MSI-X interrupt */
+	efx_for_each_channel(channel, efx) {
+		rc = request_irq(channel->irq, efx_msi_interrupt,
+				 IRQF_PROBE_SHARED, /* Not shared */
+				 channel->name, channel);
+		if (rc) {
+			EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq);
+			goto fail2;
+		}
+	}
+
+	return 0;
+
+ fail2:
+	efx_for_each_channel(channel, efx)
+		free_irq(channel->irq, channel);
+ fail1:
+	return rc;
+}
+
+void efx_nic_fini_interrupt(struct efx_nic *efx)
+{
+	struct efx_channel *channel;
+	efx_oword_t reg;
+
+	/* Disable MSI/MSI-X interrupts */
+	efx_for_each_channel(channel, efx) {
+		if (channel->irq)
+			free_irq(channel->irq, channel);
+	}
+
+	/* ACK legacy interrupt */
+	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
+		efx_reado(efx, &reg, FR_BZ_INT_ISR0);
+	else
+		falcon_irq_ack_a1(efx);
+
+	/* Disable legacy interrupt */
+	if (efx->legacy_irq)
+		free_irq(efx->legacy_irq, efx);
+}
+
+u32 efx_nic_fpga_ver(struct efx_nic *efx)
+{
+	efx_oword_t altera_build;
+	efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
+	return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
+}
+
+void efx_nic_init_common(struct efx_nic *efx)
+{
+	efx_oword_t temp;
+
+	/* Set positions of descriptor caches in SRAM. */
+	EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR,
+			     efx->type->tx_dc_base / 8);
+	efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
+	EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR,
+			     efx->type->rx_dc_base / 8);
+	efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
+
+	/* Set TX descriptor cache size. */
+	BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
+	EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
+	efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
+
+	/* Set RX descriptor cache size.  Set low watermark to size-8, as
+	 * this allows most efficient prefetching.
+	 */
+	BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
+	EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
+	efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
+	EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
+	efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
+
+	/* Program INT_KER address */
+	EFX_POPULATE_OWORD_2(temp,
+			     FRF_AZ_NORM_INT_VEC_DIS_KER,
+			     EFX_INT_MODE_USE_MSI(efx),
+			     FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
+	efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
+
+	/* Enable all the genuinely fatal interrupts.  (They are still
+	 * masked by the overall interrupt mask, controlled by
+	 * falcon_interrupts()).
+	 *
+	 * Note: All other fatal interrupts are enabled
+	 */
+	EFX_POPULATE_OWORD_3(temp,
+			     FRF_AZ_ILL_ADR_INT_KER_EN, 1,
+			     FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
+			     FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
+	EFX_INVERT_OWORD(temp);
+	efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
+
+	efx_setup_rss_indir_table(efx);
+
+	/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
+	 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
+	 */
+	efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
+	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
+	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
+	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
+	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0);
+	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
+	/* Enable SW_EV to inherit in char driver - assume harmless here */
+	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
+	/* Prefetch threshold 2 => fetch when descriptor cache half empty */
+	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
+	/* Squash TX of packets of 16 bytes or less */
+	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
+		EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
+	efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
+}
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
new file mode 100644
index 000000000000..9351c0331a47
--- /dev/null
+++ b/drivers/net/sfc/nic.h
@@ -0,0 +1,261 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2009 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_NIC_H
+#define EFX_NIC_H
+
+#include <linux/i2c-algo-bit.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "mcdi.h"
+
+/*
+ * Falcon hardware control
+ */
+
+enum {
+	EFX_REV_FALCON_A0 = 0,
+	EFX_REV_FALCON_A1 = 1,
+	EFX_REV_FALCON_B0 = 2,
+	EFX_REV_SIENA_A0 = 3,
+};
+
+static inline int efx_nic_rev(struct efx_nic *efx)
+{
+	return efx->type->revision;
+}
+
+extern u32 efx_nic_fpga_ver(struct efx_nic *efx);
+
+static inline bool efx_nic_has_mc(struct efx_nic *efx)
+{
+	return efx_nic_rev(efx) >= EFX_REV_SIENA_A0;
+}
+/* NIC has two interlinked PCI functions for the same port. */
+static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
+{
+	return efx_nic_rev(efx) < EFX_REV_FALCON_B0;
+}
+
+enum {
+	PHY_TYPE_NONE = 0,
+	PHY_TYPE_TXC43128 = 1,
+	PHY_TYPE_88E1111 = 2,
+	PHY_TYPE_SFX7101 = 3,
+	PHY_TYPE_QT2022C2 = 4,
+	PHY_TYPE_PM8358 = 6,
+	PHY_TYPE_SFT9001A = 8,
+	PHY_TYPE_QT2025C = 9,
+	PHY_TYPE_SFT9001B = 10,
+};
+
+#define FALCON_XMAC_LOOPBACKS			\
+	((1 << LOOPBACK_XGMII) |		\
+	 (1 << LOOPBACK_XGXS) |			\
+	 (1 << LOOPBACK_XAUI))
+
+#define FALCON_GMAC_LOOPBACKS			\
+	(1 << LOOPBACK_GMAC)
+
+/**
+ * struct falcon_board_type - board operations and type information
+ * @id: Board type id, as found in NVRAM
+ * @ref_model: Model number of Solarflare reference design
+ * @gen_type: Generic board type description
+ * @init: Allocate resources and initialise peripheral hardware
+ * @init_phy: Do board-specific PHY initialisation
+ * @fini: Shut down hardware and free resources
+ * @set_id_led: Set state of identifying LED or revert to automatic function
+ * @monitor: Board-specific health check function
+ */
+struct falcon_board_type {
+	u8 id;
+	const char *ref_model;
+	const char *gen_type;
+	int (*init) (struct efx_nic *nic);
+	void (*init_phy) (struct efx_nic *efx);
+	void (*fini) (struct efx_nic *nic);
+	void (*set_id_led) (struct efx_nic *efx, enum efx_led_mode mode);
+	int (*monitor) (struct efx_nic *nic);
+};
+
+/**
+ * struct falcon_board - board information
+ * @type: Type of board
+ * @major: Major rev. ('A', 'B' ...)
+ * @minor: Minor rev. (0, 1, ...)
+ * @i2c_adap: I2C adapter for on-board peripherals
+ * @i2c_data: Data for bit-banging algorithm
+ * @hwmon_client: I2C client for hardware monitor
+ * @ioexp_client: I2C client for power/port control
+ */
+struct falcon_board {
+	const struct falcon_board_type *type;
+	int major;
+	int minor;
+	struct i2c_adapter i2c_adap;
+	struct i2c_algo_bit_data i2c_data;
+	struct i2c_client *hwmon_client, *ioexp_client;
+};
+
+/**
+ * struct falcon_nic_data - Falcon NIC state
+ * @pci_dev2: Secondary function of Falcon A
+ * @board: Board state and functions
+ * @stats_disable_count: Nest count for disabling statistics fetches
+ * @stats_pending: Is there a pending DMA of MAC statistics.
+ * @stats_timer: A timer for regularly fetching MAC statistics.
+ * @stats_dma_done: Pointer to the flag which indicates DMA completion.
+ */
+struct falcon_nic_data {
+	struct pci_dev *pci_dev2;
+	struct falcon_board board;
+	unsigned int stats_disable_count;
+	bool stats_pending;
+	struct timer_list stats_timer;
+	u32 *stats_dma_done;
+};
+
+static inline struct falcon_board *falcon_board(struct efx_nic *efx)
+{
+	struct falcon_nic_data *data = efx->nic_data;
+	return &data->board;
+}
+
+/**
+ * struct siena_nic_data - Siena NIC state
+ * @fw_version: Management controller firmware version
+ * @fw_build: Firmware build number
+ * @mcdi: Management-Controller-to-Driver Interface
+ * @wol_filter_id: Wake-on-LAN packet filter id
+ */
+struct siena_nic_data {
+	u64 fw_version;
+	u32 fw_build;
+	struct efx_mcdi_iface mcdi;
+	int wol_filter_id;
+};
+
+extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len);
+
+extern struct efx_nic_type falcon_a1_nic_type;
+extern struct efx_nic_type falcon_b0_nic_type;
+extern struct efx_nic_type siena_a0_nic_type;
+
+/**************************************************************************
+ *
+ * Externs
+ *
+ **************************************************************************
+ */
+
+extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info);
+
+/* TX data path */
+extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
+extern void efx_nic_init_tx(struct efx_tx_queue *tx_queue);
+extern void efx_nic_fini_tx(struct efx_tx_queue *tx_queue);
+extern void efx_nic_remove_tx(struct efx_tx_queue *tx_queue);
+extern void efx_nic_push_buffers(struct efx_tx_queue *tx_queue);
+
+/* RX data path */
+extern int efx_nic_probe_rx(struct efx_rx_queue *rx_queue);
+extern void efx_nic_init_rx(struct efx_rx_queue *rx_queue);
+extern void efx_nic_fini_rx(struct efx_rx_queue *rx_queue);
+extern void efx_nic_remove_rx(struct efx_rx_queue *rx_queue);
+extern void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue);
+
+/* Event data path */
+extern int efx_nic_probe_eventq(struct efx_channel *channel);
+extern void efx_nic_init_eventq(struct efx_channel *channel);
+extern void efx_nic_fini_eventq(struct efx_channel *channel);
+extern void efx_nic_remove_eventq(struct efx_channel *channel);
+extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota);
+extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
+
+/* MAC/PHY */
+extern void falcon_drain_tx_fifo(struct efx_nic *efx);
+extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
+extern int efx_nic_rx_xoff_thresh, efx_nic_rx_xon_thresh;
+
+/* Interrupts and test events */
+extern int efx_nic_init_interrupt(struct efx_nic *efx);
+extern void efx_nic_enable_interrupts(struct efx_nic *efx);
+extern void efx_nic_generate_test_event(struct efx_channel *channel,
+					unsigned int magic);
+extern void efx_nic_generate_interrupt(struct efx_nic *efx);
+extern void efx_nic_disable_interrupts(struct efx_nic *efx);
+extern void efx_nic_fini_interrupt(struct efx_nic *efx);
+extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx);
+extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id);
+extern void falcon_irq_ack_a1(struct efx_nic *efx);
+
+#define EFX_IRQ_MOD_RESOLUTION 5
+
+/* Global Resources */
+extern int efx_nic_flush_queues(struct efx_nic *efx);
+extern void falcon_start_nic_stats(struct efx_nic *efx);
+extern void falcon_stop_nic_stats(struct efx_nic *efx);
+extern int falcon_reset_xaui(struct efx_nic *efx);
+extern void efx_nic_init_common(struct efx_nic *efx);
+
+int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
+			 unsigned int len);
+void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
+
+/* Tests */
+struct efx_nic_register_test {
+	unsigned address;
+	efx_oword_t mask;
+};
+extern int efx_nic_test_registers(struct efx_nic *efx,
+				  const struct efx_nic_register_test *regs,
+				  size_t n_regs);
+
+/**************************************************************************
+ *
+ * Falcon MAC stats
+ *
+ **************************************************************************
+ */
+
+#define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset)
+#define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH)
+
+/* Retrieve statistic from statistics block */
+#define FALCON_STAT(efx, falcon_stat, efx_stat) do {		\
+	if (FALCON_STAT_WIDTH(falcon_stat) == 16)		\
+		(efx)->mac_stats.efx_stat += le16_to_cpu(	\
+			*((__force __le16 *)				\
+			  (efx->stats_buffer.addr +		\
+			   FALCON_STAT_OFFSET(falcon_stat))));	\
+	else if (FALCON_STAT_WIDTH(falcon_stat) == 32)		\
+		(efx)->mac_stats.efx_stat += le32_to_cpu(	\
+			*((__force __le32 *)				\
+			  (efx->stats_buffer.addr +		\
+			   FALCON_STAT_OFFSET(falcon_stat))));	\
+	else							\
+		(efx)->mac_stats.efx_stat += le64_to_cpu(	\
+			*((__force __le64 *)				\
+			  (efx->stats_buffer.addr +		\
+			   FALCON_STAT_OFFSET(falcon_stat))));	\
+	} while (0)
+
+#define FALCON_MAC_STATS_SIZE 0x100
+
+#define MAC_DATA_LBN 0
+#define MAC_DATA_WIDTH 32
+
+extern void efx_nic_generate_event(struct efx_channel *channel,
+				   efx_qword_t *event);
+
+extern void falcon_poll_xmac(struct efx_nic *efx);
+
+#endif /* EFX_NIC_H */
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
index c1cff9c0c173..5bc26137257b 100644
--- a/drivers/net/sfc/phy.h
+++ b/drivers/net/sfc/phy.h
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2008 Solarflare Communications Inc.
+ * Copyright 2007-2009 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -16,16 +16,16 @@
 extern struct efx_phy_operations falcon_sfx7101_phy_ops;
 extern struct efx_phy_operations falcon_sft9001_phy_ops;
 
-extern void tenxpress_phy_blink(struct efx_nic *efx, bool blink);
+extern void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
 
 /* Wait for the PHY to boot. Return 0 on success, -EINVAL if the PHY failed
  * to boot due to corrupt flash, or some other negative error code. */
 extern int sft9001_wait_boot(struct efx_nic *efx);
 
 /****************************************************************************
- * AMCC/Quake QT20xx PHYs
+ * AMCC/Quake QT202x PHYs
  */
-extern struct efx_phy_operations falcon_xfp_phy_ops;
+extern struct efx_phy_operations falcon_qt202x_phy_ops;
 
 /* These PHYs provide various H/W control states for LEDs */
 #define QUAKE_LED_LINK_INVAL	(0)
@@ -39,6 +39,23 @@ extern struct efx_phy_operations falcon_xfp_phy_ops;
 #define QUAKE_LED_TXLINK	(0)
 #define QUAKE_LED_RXLINK	(8)
 
-extern void xfp_set_led(struct efx_nic *p, int led, int state);
+extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
+
+/****************************************************************************
+ * Siena managed PHYs
+ */
+extern struct efx_phy_operations efx_mcdi_phy_ops;
+
+extern int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
+			      unsigned int prtad, unsigned int devad,
+			      u16 addr, u16 *value_out, u32 *status_out);
+extern int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus,
+			       unsigned int prtad, unsigned int devad,
+			       u16 addr, u16 value, u32 *status_out);
+extern void efx_mcdi_phy_decode_link(struct efx_nic *efx,
+				     struct efx_link_state *link_state,
+				     u32 speed, u32 flags, u32 fcntl);
+extern int efx_mcdi_phy_reconfigure(struct efx_nic *efx);
+extern void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa);
 
 #endif
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/qt202x_phy.c
index e6b3d5eaddba..3800fc791b2f 100644
--- a/drivers/net/sfc/xfp_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -1,14 +1,13 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2008 Solarflare Communications Inc.
+ * Copyright 2006-2009 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
  * by the Free Software Foundation, incorporated herein by reference.
  */
 /*
- * Driver for SFP+ and XFP optical PHYs plus some support specific to the
- * AMCC QT20xx adapters; see www.amcc.com for details
+ * Driver for AMCC QT202x SFP+ and XFP adapters; see www.amcc.com for details
  */
 
 #include <linux/timer.h>
@@ -16,15 +15,15 @@
 #include "efx.h"
 #include "mdio_10g.h"
 #include "phy.h"
-#include "falcon.h"
+#include "nic.h"
 
-#define XFP_REQUIRED_DEVS (MDIO_DEVS_PCS |	\
-			   MDIO_DEVS_PMAPMD |	\
-			   MDIO_DEVS_PHYXS)
+#define QT202X_REQUIRED_DEVS (MDIO_DEVS_PCS |		\
+			      MDIO_DEVS_PMAPMD |	\
+			      MDIO_DEVS_PHYXS)
 
-#define XFP_LOOPBACKS ((1 << LOOPBACK_PCS) |		\
-		       (1 << LOOPBACK_PMAPMD) |		\
-		       (1 << LOOPBACK_NETWORK))
+#define QT202X_LOOPBACKS ((1 << LOOPBACK_PCS) |		\
+			  (1 << LOOPBACK_PMAPMD) |	\
+			  (1 << LOOPBACK_PHYXS_WS))
 
 /****************************************************************************/
 /* Quake-specific MDIO registers */
@@ -45,18 +44,18 @@
 #define PCS_VEND1_REG	   	0xc000
 #define PCS_VEND1_LBTXD_LBN	5
 
-void xfp_set_led(struct efx_nic *p, int led, int mode)
+void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode)
 {
 	int addr = MDIO_QUAKE_LED0_REG + led;
 	efx_mdio_write(p, MDIO_MMD_PMAPMD, addr, mode);
 }
 
-struct xfp_phy_data {
+struct qt202x_phy_data {
 	enum efx_phy_mode phy_mode;
 };
 
-#define XFP_MAX_RESET_TIME 500
-#define XFP_RESET_WAIT 10
+#define QT2022C2_MAX_RESET_TIME 500
+#define QT2022C2_RESET_WAIT 10
 
 static int qt2025c_wait_reset(struct efx_nic *efx)
 {
@@ -97,7 +96,7 @@ static int qt2025c_wait_reset(struct efx_nic *efx)
 	return 0;
 }
 
-static int xfp_reset_phy(struct efx_nic *efx)
+static int qt202x_reset_phy(struct efx_nic *efx)
 {
 	int rc;
 
@@ -111,8 +110,9 @@ static int xfp_reset_phy(struct efx_nic *efx)
 		/* Reset the PHYXS MMD. This is documented as doing
 		 * a complete soft reset. */
 		rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PHYXS,
-					XFP_MAX_RESET_TIME / XFP_RESET_WAIT,
-					XFP_RESET_WAIT);
+					QT2022C2_MAX_RESET_TIME /
+					QT2022C2_RESET_WAIT,
+					QT2022C2_RESET_WAIT);
 		if (rc < 0)
 			goto fail;
 	}
@@ -122,11 +122,11 @@ static int xfp_reset_phy(struct efx_nic *efx)
 
 	/* Check that all the MMDs we expect are present and responding. We
 	 * expect faults on some if the link is down, but not on the PHY XS */
-	rc = efx_mdio_check_mmds(efx, XFP_REQUIRED_DEVS, MDIO_DEVS_PHYXS);
+	rc = efx_mdio_check_mmds(efx, QT202X_REQUIRED_DEVS, MDIO_DEVS_PHYXS);
 	if (rc < 0)
 		goto fail;
 
-	efx->board_info.init_leds(efx);
+	falcon_board(efx)->type->init_phy(efx);
 
 	return rc;
 
@@ -135,60 +135,60 @@ static int xfp_reset_phy(struct efx_nic *efx)
 	return rc;
 }
 
-static int xfp_phy_init(struct efx_nic *efx)
+static int qt202x_phy_probe(struct efx_nic *efx)
 {
-	struct xfp_phy_data *phy_data;
-	u32 devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS);
+	efx->mdio.mmds = QT202X_REQUIRED_DEVS;
+	efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+	efx->loopback_modes = QT202X_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
+	return 0;
+}
+
+static int qt202x_phy_init(struct efx_nic *efx)
+{
+	struct qt202x_phy_data *phy_data;
+	u32 devid;
 	int rc;
 
-	phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL);
+	rc = qt202x_reset_phy(efx);
+	if (rc) {
+		EFX_ERR(efx, "PHY init failed\n");
+		return rc;
+	}
+
+	phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL);
 	if (!phy_data)
 		return -ENOMEM;
 	efx->phy_data = phy_data;
 
+	devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS);
 	EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n",
 		 devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid),
 		 efx_mdio_id_rev(devid));
 
 	phy_data->phy_mode = efx->phy_mode;
-
-	rc = xfp_reset_phy(efx);
-
-	EFX_INFO(efx, "PHY init %s.\n",
-		 rc ? "failed" : "successful");
-	if (rc < 0)
-		goto fail;
-
 	return 0;
-
- fail:
-	kfree(efx->phy_data);
-	efx->phy_data = NULL;
-	return rc;
 }
 
-static void xfp_phy_clear_interrupt(struct efx_nic *efx)
+static int qt202x_link_ok(struct efx_nic *efx)
 {
-	/* Read to clear link status alarm */
-	efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT);
+	return efx_mdio_links_ok(efx, QT202X_REQUIRED_DEVS);
 }
 
-static int xfp_link_ok(struct efx_nic *efx)
+static bool qt202x_phy_poll(struct efx_nic *efx)
 {
-	return efx_mdio_links_ok(efx, XFP_REQUIRED_DEVS);
-}
+	bool was_up = efx->link_state.up;
 
-static void xfp_phy_poll(struct efx_nic *efx)
-{
-	int link_up = xfp_link_ok(efx);
-	/* Simulate a PHY event if link state has changed */
-	if (link_up != efx->link_up)
-		falcon_sim_phy_event(efx);
+	efx->link_state.up = qt202x_link_ok(efx);
+	efx->link_state.speed = 10000;
+	efx->link_state.fd = true;
+	efx->link_state.fc = efx->wanted_fc;
+
+	return efx->link_state.up != was_up;
 }
 
-static void xfp_phy_reconfigure(struct efx_nic *efx)
+static int qt202x_phy_reconfigure(struct efx_nic *efx)
 {
-	struct xfp_phy_data *phy_data = efx->phy_data;
+	struct qt202x_phy_data *phy_data = efx->phy_data;
 
 	if (efx->phy_type == PHY_TYPE_QT2025C) {
 		/* There are several different register bits which can
@@ -207,7 +207,7 @@ static void xfp_phy_reconfigure(struct efx_nic *efx)
 		/* Reset the PHY when moving from tx off to tx on */
 		if (!(efx->phy_mode & PHY_MODE_TX_DISABLED) &&
 		    (phy_data->phy_mode & PHY_MODE_TX_DISABLED))
-			xfp_reset_phy(efx);
+			qt202x_reset_phy(efx);
 
 		efx_mdio_transmit_disable(efx);
 	}
@@ -215,36 +215,28 @@ static void xfp_phy_reconfigure(struct efx_nic *efx)
 	efx_mdio_phy_reconfigure(efx);
 
 	phy_data->phy_mode = efx->phy_mode;
-	efx->link_up = xfp_link_ok(efx);
-	efx->link_speed = 10000;
-	efx->link_fd = true;
-	efx->link_fc = efx->wanted_fc;
+
+	return 0;
 }
 
-static void xfp_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+static void qt202x_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
 {
 	mdio45_ethtool_gset(&efx->mdio, ecmd);
 }
 
-static void xfp_phy_fini(struct efx_nic *efx)
+static void qt202x_phy_fini(struct efx_nic *efx)
 {
-	/* Clobber the LED if it was blinking */
-	efx->board_info.blink(efx, false);
-
 	/* Free the context block */
 	kfree(efx->phy_data);
 	efx->phy_data = NULL;
 }
 
-struct efx_phy_operations falcon_xfp_phy_ops = {
-	.macs		 = EFX_XMAC,
-	.init            = xfp_phy_init,
-	.reconfigure     = xfp_phy_reconfigure,
-	.poll            = xfp_phy_poll,
-	.fini            = xfp_phy_fini,
-	.clear_interrupt = xfp_phy_clear_interrupt,
-	.get_settings    = xfp_phy_get_settings,
+struct efx_phy_operations falcon_qt202x_phy_ops = {
+	.probe		 = qt202x_phy_probe,
+	.init		 = qt202x_phy_init,
+	.reconfigure	 = qt202x_phy_reconfigure,
+	.poll	     	 = qt202x_phy_poll,
+	.fini	  	 = qt202x_phy_fini,
+	.get_settings	 = qt202x_phy_get_settings,
 	.set_settings	 = efx_mdio_set_settings,
-	.mmds            = XFP_REQUIRED_DEVS,
-	.loopbacks       = XFP_LOOPBACKS,
 };
diff --git a/drivers/net/sfc/regs.h b/drivers/net/sfc/regs.h
new file mode 100644
index 000000000000..89d606fe9248
--- /dev/null
+++ b/drivers/net/sfc/regs.h
@@ -0,0 +1,3168 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2009 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_REGS_H
+#define EFX_REGS_H
+
+/*
+ * Falcon hardware architecture definitions have a name prefix following
+ * the format:
+ *
+ *     F<type>_<min-rev><max-rev>_
+ *
+ * The following <type> strings are used:
+ *
+ *             MMIO register  MC register  Host memory structure
+ * -------------------------------------------------------------
+ * Address     R              MCR
+ * Bitfield    RF             MCRF         SF
+ * Enumerator  FE             MCFE         SE
+ *
+ * <min-rev> is the first revision to which the definition applies:
+ *
+ *     A: Falcon A1 (SFC4000AB)
+ *     B: Falcon B0 (SFC4000BA)
+ *     C: Siena A0 (SFL9021AA)
+ *
+ * If the definition has been changed or removed in later revisions
+ * then <max-rev> is the last revision to which the definition applies;
+ * otherwise it is "Z".
+ */
+
+/**************************************************************************
+ *
+ * Falcon/Siena registers and descriptors
+ *
+ **************************************************************************
+ */
+
+/* ADR_REGION_REG: Address region register */
+#define	FR_AZ_ADR_REGION 0x00000000
+#define	FRF_AZ_ADR_REGION3_LBN 96
+#define	FRF_AZ_ADR_REGION3_WIDTH 18
+#define	FRF_AZ_ADR_REGION2_LBN 64
+#define	FRF_AZ_ADR_REGION2_WIDTH 18
+#define	FRF_AZ_ADR_REGION1_LBN 32
+#define	FRF_AZ_ADR_REGION1_WIDTH 18
+#define	FRF_AZ_ADR_REGION0_LBN 0
+#define	FRF_AZ_ADR_REGION0_WIDTH 18
+
+/* INT_EN_REG_KER: Kernel driver Interrupt enable register */
+#define	FR_AZ_INT_EN_KER 0x00000010
+#define	FRF_AZ_KER_INT_LEVE_SEL_LBN 8
+#define	FRF_AZ_KER_INT_LEVE_SEL_WIDTH 6
+#define	FRF_AZ_KER_INT_CHAR_LBN 4
+#define	FRF_AZ_KER_INT_CHAR_WIDTH 1
+#define	FRF_AZ_KER_INT_KER_LBN 3
+#define	FRF_AZ_KER_INT_KER_WIDTH 1
+#define	FRF_AZ_DRV_INT_EN_KER_LBN 0
+#define	FRF_AZ_DRV_INT_EN_KER_WIDTH 1
+
+/* INT_EN_REG_CHAR: Char Driver interrupt enable register */
+#define	FR_BZ_INT_EN_CHAR 0x00000020
+#define	FRF_BZ_CHAR_INT_LEVE_SEL_LBN 8
+#define	FRF_BZ_CHAR_INT_LEVE_SEL_WIDTH 6
+#define	FRF_BZ_CHAR_INT_CHAR_LBN 4
+#define	FRF_BZ_CHAR_INT_CHAR_WIDTH 1
+#define	FRF_BZ_CHAR_INT_KER_LBN 3
+#define	FRF_BZ_CHAR_INT_KER_WIDTH 1
+#define	FRF_BZ_DRV_INT_EN_CHAR_LBN 0
+#define	FRF_BZ_DRV_INT_EN_CHAR_WIDTH 1
+
+/* INT_ADR_REG_KER: Interrupt host address for Kernel driver */
+#define	FR_AZ_INT_ADR_KER 0x00000030
+#define	FRF_AZ_NORM_INT_VEC_DIS_KER_LBN 64
+#define	FRF_AZ_NORM_INT_VEC_DIS_KER_WIDTH 1
+#define	FRF_AZ_INT_ADR_KER_LBN 0
+#define	FRF_AZ_INT_ADR_KER_WIDTH 64
+
+/* INT_ADR_REG_CHAR: Interrupt host address for Char driver */
+#define	FR_BZ_INT_ADR_CHAR 0x00000040
+#define	FRF_BZ_NORM_INT_VEC_DIS_CHAR_LBN 64
+#define	FRF_BZ_NORM_INT_VEC_DIS_CHAR_WIDTH 1
+#define	FRF_BZ_INT_ADR_CHAR_LBN 0
+#define	FRF_BZ_INT_ADR_CHAR_WIDTH 64
+
+/* INT_ACK_KER: Kernel interrupt acknowledge register */
+#define	FR_AA_INT_ACK_KER 0x00000050
+#define	FRF_AA_INT_ACK_KER_FIELD_LBN 0
+#define	FRF_AA_INT_ACK_KER_FIELD_WIDTH 32
+
+/* INT_ISR0_REG: Function 0 Interrupt Acknowlege Status register */
+#define	FR_BZ_INT_ISR0 0x00000090
+#define	FRF_BZ_INT_ISR_REG_LBN 0
+#define	FRF_BZ_INT_ISR_REG_WIDTH 64
+
+/* HW_INIT_REG: Hardware initialization register */
+#define	FR_AZ_HW_INIT 0x000000c0
+#define	FRF_BB_BDMRD_CPLF_FULL_LBN 124
+#define	FRF_BB_BDMRD_CPLF_FULL_WIDTH 1
+#define	FRF_BB_PCIE_CPL_TIMEOUT_CTRL_LBN 121
+#define	FRF_BB_PCIE_CPL_TIMEOUT_CTRL_WIDTH 3
+#define	FRF_CZ_TX_MRG_TAGS_LBN 120
+#define	FRF_CZ_TX_MRG_TAGS_WIDTH 1
+#define	FRF_AB_TRGT_MASK_ALL_LBN 100
+#define	FRF_AB_TRGT_MASK_ALL_WIDTH 1
+#define	FRF_AZ_DOORBELL_DROP_LBN 92
+#define	FRF_AZ_DOORBELL_DROP_WIDTH 8
+#define	FRF_AB_TX_RREQ_MASK_EN_LBN 76
+#define	FRF_AB_TX_RREQ_MASK_EN_WIDTH 1
+#define	FRF_AB_PE_EIDLE_DIS_LBN 75
+#define	FRF_AB_PE_EIDLE_DIS_WIDTH 1
+#define	FRF_AA_FC_BLOCKING_EN_LBN 45
+#define	FRF_AA_FC_BLOCKING_EN_WIDTH 1
+#define	FRF_BZ_B2B_REQ_EN_LBN 45
+#define	FRF_BZ_B2B_REQ_EN_WIDTH 1
+#define	FRF_AA_B2B_REQ_EN_LBN 44
+#define	FRF_AA_B2B_REQ_EN_WIDTH 1
+#define	FRF_BB_FC_BLOCKING_EN_LBN 44
+#define	FRF_BB_FC_BLOCKING_EN_WIDTH 1
+#define	FRF_AZ_POST_WR_MASK_LBN 40
+#define	FRF_AZ_POST_WR_MASK_WIDTH 4
+#define	FRF_AZ_TLP_TC_LBN 34
+#define	FRF_AZ_TLP_TC_WIDTH 3
+#define	FRF_AZ_TLP_ATTR_LBN 32
+#define	FRF_AZ_TLP_ATTR_WIDTH 2
+#define	FRF_AB_INTB_VEC_LBN 24
+#define	FRF_AB_INTB_VEC_WIDTH 5
+#define	FRF_AB_INTA_VEC_LBN 16
+#define	FRF_AB_INTA_VEC_WIDTH 5
+#define	FRF_AZ_WD_TIMER_LBN 8
+#define	FRF_AZ_WD_TIMER_WIDTH 8
+#define	FRF_AZ_US_DISABLE_LBN 5
+#define	FRF_AZ_US_DISABLE_WIDTH 1
+#define	FRF_AZ_TLP_EP_LBN 4
+#define	FRF_AZ_TLP_EP_WIDTH 1
+#define	FRF_AZ_ATTR_SEL_LBN 3
+#define	FRF_AZ_ATTR_SEL_WIDTH 1
+#define	FRF_AZ_TD_SEL_LBN 1
+#define	FRF_AZ_TD_SEL_WIDTH 1
+#define	FRF_AZ_TLP_TD_LBN 0
+#define	FRF_AZ_TLP_TD_WIDTH 1
+
+/* EE_SPI_HCMD_REG: SPI host command register */
+#define	FR_AB_EE_SPI_HCMD 0x00000100
+#define	FRF_AB_EE_SPI_HCMD_CMD_EN_LBN 31
+#define	FRF_AB_EE_SPI_HCMD_CMD_EN_WIDTH 1
+#define	FRF_AB_EE_WR_TIMER_ACTIVE_LBN 28
+#define	FRF_AB_EE_WR_TIMER_ACTIVE_WIDTH 1
+#define	FRF_AB_EE_SPI_HCMD_SF_SEL_LBN 24
+#define	FRF_AB_EE_SPI_HCMD_SF_SEL_WIDTH 1
+#define	FRF_AB_EE_SPI_HCMD_DABCNT_LBN 16
+#define	FRF_AB_EE_SPI_HCMD_DABCNT_WIDTH 5
+#define	FRF_AB_EE_SPI_HCMD_READ_LBN 15
+#define	FRF_AB_EE_SPI_HCMD_READ_WIDTH 1
+#define	FRF_AB_EE_SPI_HCMD_DUBCNT_LBN 12
+#define	FRF_AB_EE_SPI_HCMD_DUBCNT_WIDTH 2
+#define	FRF_AB_EE_SPI_HCMD_ADBCNT_LBN 8
+#define	FRF_AB_EE_SPI_HCMD_ADBCNT_WIDTH 2
+#define	FRF_AB_EE_SPI_HCMD_ENC_LBN 0
+#define	FRF_AB_EE_SPI_HCMD_ENC_WIDTH 8
+
+/* USR_EV_CFG: User Level Event Configuration register */
+#define	FR_CZ_USR_EV_CFG 0x00000100
+#define	FRF_CZ_USREV_DIS_LBN 16
+#define	FRF_CZ_USREV_DIS_WIDTH 1
+#define	FRF_CZ_DFLT_EVQ_LBN 0
+#define	FRF_CZ_DFLT_EVQ_WIDTH 10
+
+/* EE_SPI_HADR_REG: SPI host address register */
+#define	FR_AB_EE_SPI_HADR 0x00000110
+#define	FRF_AB_EE_SPI_HADR_DUBYTE_LBN 24
+#define	FRF_AB_EE_SPI_HADR_DUBYTE_WIDTH 8
+#define	FRF_AB_EE_SPI_HADR_ADR_LBN 0
+#define	FRF_AB_EE_SPI_HADR_ADR_WIDTH 24
+
+/* EE_SPI_HDATA_REG: SPI host data register */
+#define	FR_AB_EE_SPI_HDATA 0x00000120
+#define	FRF_AB_EE_SPI_HDATA3_LBN 96
+#define	FRF_AB_EE_SPI_HDATA3_WIDTH 32
+#define	FRF_AB_EE_SPI_HDATA2_LBN 64
+#define	FRF_AB_EE_SPI_HDATA2_WIDTH 32
+#define	FRF_AB_EE_SPI_HDATA1_LBN 32
+#define	FRF_AB_EE_SPI_HDATA1_WIDTH 32
+#define	FRF_AB_EE_SPI_HDATA0_LBN 0
+#define	FRF_AB_EE_SPI_HDATA0_WIDTH 32
+
+/* EE_BASE_PAGE_REG: Expansion ROM base mirror register */
+#define	FR_AB_EE_BASE_PAGE 0x00000130
+#define	FRF_AB_EE_EXPROM_MASK_LBN 16
+#define	FRF_AB_EE_EXPROM_MASK_WIDTH 13
+#define	FRF_AB_EE_EXP_ROM_WINDOW_BASE_LBN 0
+#define	FRF_AB_EE_EXP_ROM_WINDOW_BASE_WIDTH 13
+
+/* EE_VPD_CFG0_REG: SPI/VPD configuration register 0 */
+#define	FR_AB_EE_VPD_CFG0 0x00000140
+#define	FRF_AB_EE_SF_FASTRD_EN_LBN 127
+#define	FRF_AB_EE_SF_FASTRD_EN_WIDTH 1
+#define	FRF_AB_EE_SF_CLOCK_DIV_LBN 120
+#define	FRF_AB_EE_SF_CLOCK_DIV_WIDTH 7
+#define	FRF_AB_EE_VPD_WIP_POLL_LBN 119
+#define	FRF_AB_EE_VPD_WIP_POLL_WIDTH 1
+#define	FRF_AB_EE_EE_CLOCK_DIV_LBN 112
+#define	FRF_AB_EE_EE_CLOCK_DIV_WIDTH 7
+#define	FRF_AB_EE_EE_WR_TMR_VALUE_LBN 96
+#define	FRF_AB_EE_EE_WR_TMR_VALUE_WIDTH 16
+#define	FRF_AB_EE_VPDW_LENGTH_LBN 80
+#define	FRF_AB_EE_VPDW_LENGTH_WIDTH 15
+#define	FRF_AB_EE_VPDW_BASE_LBN 64
+#define	FRF_AB_EE_VPDW_BASE_WIDTH 15
+#define	FRF_AB_EE_VPD_WR_CMD_EN_LBN 56
+#define	FRF_AB_EE_VPD_WR_CMD_EN_WIDTH 8
+#define	FRF_AB_EE_VPD_BASE_LBN 32
+#define	FRF_AB_EE_VPD_BASE_WIDTH 24
+#define	FRF_AB_EE_VPD_LENGTH_LBN 16
+#define	FRF_AB_EE_VPD_LENGTH_WIDTH 15
+#define	FRF_AB_EE_VPD_AD_SIZE_LBN 8
+#define	FRF_AB_EE_VPD_AD_SIZE_WIDTH 5
+#define	FRF_AB_EE_VPD_ACCESS_ON_LBN 5
+#define	FRF_AB_EE_VPD_ACCESS_ON_WIDTH 1
+#define	FRF_AB_EE_VPD_ACCESS_BLOCK_LBN 4
+#define	FRF_AB_EE_VPD_ACCESS_BLOCK_WIDTH 1
+#define	FRF_AB_EE_VPD_DEV_SF_SEL_LBN 2
+#define	FRF_AB_EE_VPD_DEV_SF_SEL_WIDTH 1
+#define	FRF_AB_EE_VPD_EN_AD9_MODE_LBN 1
+#define	FRF_AB_EE_VPD_EN_AD9_MODE_WIDTH 1
+#define	FRF_AB_EE_VPD_EN_LBN 0
+#define	FRF_AB_EE_VPD_EN_WIDTH 1
+
+/* EE_VPD_SW_CNTL_REG: VPD access SW control register */
+#define	FR_AB_EE_VPD_SW_CNTL 0x00000150
+#define	FRF_AB_EE_VPD_CYCLE_PENDING_LBN 31
+#define	FRF_AB_EE_VPD_CYCLE_PENDING_WIDTH 1
+#define	FRF_AB_EE_VPD_CYC_WRITE_LBN 28
+#define	FRF_AB_EE_VPD_CYC_WRITE_WIDTH 1
+#define	FRF_AB_EE_VPD_CYC_ADR_LBN 0
+#define	FRF_AB_EE_VPD_CYC_ADR_WIDTH 15
+
+/* EE_VPD_SW_DATA_REG: VPD access SW data register */
+#define	FR_AB_EE_VPD_SW_DATA 0x00000160
+#define	FRF_AB_EE_VPD_CYC_DAT_LBN 0
+#define	FRF_AB_EE_VPD_CYC_DAT_WIDTH 32
+
+/* PBMX_DBG_IADDR_REG: Capture Module address register */
+#define	FR_CZ_PBMX_DBG_IADDR 0x000001f0
+#define	FRF_CZ_PBMX_DBG_IADDR_LBN 0
+#define	FRF_CZ_PBMX_DBG_IADDR_WIDTH 32
+
+/* PCIE_CORE_INDIRECT_REG: Indirect Access to PCIE Core registers */
+#define	FR_BB_PCIE_CORE_INDIRECT 0x000001f0
+#define	FRF_BB_PCIE_CORE_TARGET_DATA_LBN 32
+#define	FRF_BB_PCIE_CORE_TARGET_DATA_WIDTH 32
+#define	FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_LBN 15
+#define	FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_WIDTH 1
+#define	FRF_BB_PCIE_CORE_TARGET_REG_ADRS_LBN 0
+#define	FRF_BB_PCIE_CORE_TARGET_REG_ADRS_WIDTH 12
+
+/* PBMX_DBG_IDATA_REG: Capture Module data register */
+#define	FR_CZ_PBMX_DBG_IDATA 0x000001f8
+#define	FRF_CZ_PBMX_DBG_IDATA_LBN 0
+#define	FRF_CZ_PBMX_DBG_IDATA_WIDTH 64
+
+/* NIC_STAT_REG: NIC status register */
+#define	FR_AB_NIC_STAT 0x00000200
+#define	FRF_BB_AER_DIS_LBN 34
+#define	FRF_BB_AER_DIS_WIDTH 1
+#define	FRF_BB_EE_STRAP_EN_LBN 31
+#define	FRF_BB_EE_STRAP_EN_WIDTH 1
+#define	FRF_BB_EE_STRAP_LBN 24
+#define	FRF_BB_EE_STRAP_WIDTH 4
+#define	FRF_BB_REVISION_ID_LBN 17
+#define	FRF_BB_REVISION_ID_WIDTH 7
+#define	FRF_AB_ONCHIP_SRAM_LBN 16
+#define	FRF_AB_ONCHIP_SRAM_WIDTH 1
+#define	FRF_AB_SF_PRST_LBN 9
+#define	FRF_AB_SF_PRST_WIDTH 1
+#define	FRF_AB_EE_PRST_LBN 8
+#define	FRF_AB_EE_PRST_WIDTH 1
+#define	FRF_AB_ATE_MODE_LBN 3
+#define	FRF_AB_ATE_MODE_WIDTH 1
+#define	FRF_AB_STRAP_PINS_LBN 0
+#define	FRF_AB_STRAP_PINS_WIDTH 3
+
+/* GPIO_CTL_REG: GPIO control register */
+#define	FR_AB_GPIO_CTL 0x00000210
+#define	FRF_AB_GPIO_OUT3_LBN 112
+#define	FRF_AB_GPIO_OUT3_WIDTH 16
+#define	FRF_AB_GPIO_IN3_LBN 104
+#define	FRF_AB_GPIO_IN3_WIDTH 8
+#define	FRF_AB_GPIO_PWRUP_VALUE3_LBN 96
+#define	FRF_AB_GPIO_PWRUP_VALUE3_WIDTH 8
+#define	FRF_AB_GPIO_OUT2_LBN 80
+#define	FRF_AB_GPIO_OUT2_WIDTH 16
+#define	FRF_AB_GPIO_IN2_LBN 72
+#define	FRF_AB_GPIO_IN2_WIDTH 8
+#define	FRF_AB_GPIO_PWRUP_VALUE2_LBN 64
+#define	FRF_AB_GPIO_PWRUP_VALUE2_WIDTH 8
+#define	FRF_AB_GPIO15_OEN_LBN 63
+#define	FRF_AB_GPIO15_OEN_WIDTH 1
+#define	FRF_AB_GPIO14_OEN_LBN 62
+#define	FRF_AB_GPIO14_OEN_WIDTH 1
+#define	FRF_AB_GPIO13_OEN_LBN 61
+#define	FRF_AB_GPIO13_OEN_WIDTH 1
+#define	FRF_AB_GPIO12_OEN_LBN 60
+#define	FRF_AB_GPIO12_OEN_WIDTH 1
+#define	FRF_AB_GPIO11_OEN_LBN 59
+#define	FRF_AB_GPIO11_OEN_WIDTH 1
+#define	FRF_AB_GPIO10_OEN_LBN 58
+#define	FRF_AB_GPIO10_OEN_WIDTH 1
+#define	FRF_AB_GPIO9_OEN_LBN 57
+#define	FRF_AB_GPIO9_OEN_WIDTH 1
+#define	FRF_AB_GPIO8_OEN_LBN 56
+#define	FRF_AB_GPIO8_OEN_WIDTH 1
+#define	FRF_AB_GPIO15_OUT_LBN 55
+#define	FRF_AB_GPIO15_OUT_WIDTH 1
+#define	FRF_AB_GPIO14_OUT_LBN 54
+#define	FRF_AB_GPIO14_OUT_WIDTH 1
+#define	FRF_AB_GPIO13_OUT_LBN 53
+#define	FRF_AB_GPIO13_OUT_WIDTH 1
+#define	FRF_AB_GPIO12_OUT_LBN 52
+#define	FRF_AB_GPIO12_OUT_WIDTH 1
+#define	FRF_AB_GPIO11_OUT_LBN 51
+#define	FRF_AB_GPIO11_OUT_WIDTH 1
+#define	FRF_AB_GPIO10_OUT_LBN 50
+#define	FRF_AB_GPIO10_OUT_WIDTH 1
+#define	FRF_AB_GPIO9_OUT_LBN 49
+#define	FRF_AB_GPIO9_OUT_WIDTH 1
+#define	FRF_AB_GPIO8_OUT_LBN 48
+#define	FRF_AB_GPIO8_OUT_WIDTH 1
+#define	FRF_AB_GPIO15_IN_LBN 47
+#define	FRF_AB_GPIO15_IN_WIDTH 1
+#define	FRF_AB_GPIO14_IN_LBN 46
+#define	FRF_AB_GPIO14_IN_WIDTH 1
+#define	FRF_AB_GPIO13_IN_LBN 45
+#define	FRF_AB_GPIO13_IN_WIDTH 1
+#define	FRF_AB_GPIO12_IN_LBN 44
+#define	FRF_AB_GPIO12_IN_WIDTH 1
+#define	FRF_AB_GPIO11_IN_LBN 43
+#define	FRF_AB_GPIO11_IN_WIDTH 1
+#define	FRF_AB_GPIO10_IN_LBN 42
+#define	FRF_AB_GPIO10_IN_WIDTH 1
+#define	FRF_AB_GPIO9_IN_LBN 41
+#define	FRF_AB_GPIO9_IN_WIDTH 1
+#define	FRF_AB_GPIO8_IN_LBN 40
+#define	FRF_AB_GPIO8_IN_WIDTH 1
+#define	FRF_AB_GPIO15_PWRUP_VALUE_LBN 39
+#define	FRF_AB_GPIO15_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO14_PWRUP_VALUE_LBN 38
+#define	FRF_AB_GPIO14_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO13_PWRUP_VALUE_LBN 37
+#define	FRF_AB_GPIO13_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO12_PWRUP_VALUE_LBN 36
+#define	FRF_AB_GPIO12_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO11_PWRUP_VALUE_LBN 35
+#define	FRF_AB_GPIO11_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO10_PWRUP_VALUE_LBN 34
+#define	FRF_AB_GPIO10_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO9_PWRUP_VALUE_LBN 33
+#define	FRF_AB_GPIO9_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO8_PWRUP_VALUE_LBN 32
+#define	FRF_AB_GPIO8_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_CLK156_OUT_EN_LBN 31
+#define	FRF_AB_CLK156_OUT_EN_WIDTH 1
+#define	FRF_AB_USE_NIC_CLK_LBN 30
+#define	FRF_AB_USE_NIC_CLK_WIDTH 1
+#define	FRF_AB_GPIO5_OEN_LBN 29
+#define	FRF_AB_GPIO5_OEN_WIDTH 1
+#define	FRF_AB_GPIO4_OEN_LBN 28
+#define	FRF_AB_GPIO4_OEN_WIDTH 1
+#define	FRF_AB_GPIO3_OEN_LBN 27
+#define	FRF_AB_GPIO3_OEN_WIDTH 1
+#define	FRF_AB_GPIO2_OEN_LBN 26
+#define	FRF_AB_GPIO2_OEN_WIDTH 1
+#define	FRF_AB_GPIO1_OEN_LBN 25
+#define	FRF_AB_GPIO1_OEN_WIDTH 1
+#define	FRF_AB_GPIO0_OEN_LBN 24
+#define	FRF_AB_GPIO0_OEN_WIDTH 1
+#define	FRF_AB_GPIO7_OUT_LBN 23
+#define	FRF_AB_GPIO7_OUT_WIDTH 1
+#define	FRF_AB_GPIO6_OUT_LBN 22
+#define	FRF_AB_GPIO6_OUT_WIDTH 1
+#define	FRF_AB_GPIO5_OUT_LBN 21
+#define	FRF_AB_GPIO5_OUT_WIDTH 1
+#define	FRF_AB_GPIO4_OUT_LBN 20
+#define	FRF_AB_GPIO4_OUT_WIDTH 1
+#define	FRF_AB_GPIO3_OUT_LBN 19
+#define	FRF_AB_GPIO3_OUT_WIDTH 1
+#define	FRF_AB_GPIO2_OUT_LBN 18
+#define	FRF_AB_GPIO2_OUT_WIDTH 1
+#define	FRF_AB_GPIO1_OUT_LBN 17
+#define	FRF_AB_GPIO1_OUT_WIDTH 1
+#define	FRF_AB_GPIO0_OUT_LBN 16
+#define	FRF_AB_GPIO0_OUT_WIDTH 1
+#define	FRF_AB_GPIO7_IN_LBN 15
+#define	FRF_AB_GPIO7_IN_WIDTH 1
+#define	FRF_AB_GPIO6_IN_LBN 14
+#define	FRF_AB_GPIO6_IN_WIDTH 1
+#define	FRF_AB_GPIO5_IN_LBN 13
+#define	FRF_AB_GPIO5_IN_WIDTH 1
+#define	FRF_AB_GPIO4_IN_LBN 12
+#define	FRF_AB_GPIO4_IN_WIDTH 1
+#define	FRF_AB_GPIO3_IN_LBN 11
+#define	FRF_AB_GPIO3_IN_WIDTH 1
+#define	FRF_AB_GPIO2_IN_LBN 10
+#define	FRF_AB_GPIO2_IN_WIDTH 1
+#define	FRF_AB_GPIO1_IN_LBN 9
+#define	FRF_AB_GPIO1_IN_WIDTH 1
+#define	FRF_AB_GPIO0_IN_LBN 8
+#define	FRF_AB_GPIO0_IN_WIDTH 1
+#define	FRF_AB_GPIO7_PWRUP_VALUE_LBN 7
+#define	FRF_AB_GPIO7_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO6_PWRUP_VALUE_LBN 6
+#define	FRF_AB_GPIO6_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO5_PWRUP_VALUE_LBN 5
+#define	FRF_AB_GPIO5_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO4_PWRUP_VALUE_LBN 4
+#define	FRF_AB_GPIO4_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO3_PWRUP_VALUE_LBN 3
+#define	FRF_AB_GPIO3_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO2_PWRUP_VALUE_LBN 2
+#define	FRF_AB_GPIO2_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO1_PWRUP_VALUE_LBN 1
+#define	FRF_AB_GPIO1_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO0_PWRUP_VALUE_LBN 0
+#define	FRF_AB_GPIO0_PWRUP_VALUE_WIDTH 1
+
+/* GLB_CTL_REG: Global control register */
+#define	FR_AB_GLB_CTL 0x00000220
+#define	FRF_AB_EXT_PHY_RST_CTL_LBN 63
+#define	FRF_AB_EXT_PHY_RST_CTL_WIDTH 1
+#define	FRF_AB_XAUI_SD_RST_CTL_LBN 62
+#define	FRF_AB_XAUI_SD_RST_CTL_WIDTH 1
+#define	FRF_AB_PCIE_SD_RST_CTL_LBN 61
+#define	FRF_AB_PCIE_SD_RST_CTL_WIDTH 1
+#define	FRF_AA_PCIX_RST_CTL_LBN 60
+#define	FRF_AA_PCIX_RST_CTL_WIDTH 1
+#define	FRF_BB_BIU_RST_CTL_LBN 60
+#define	FRF_BB_BIU_RST_CTL_WIDTH 1
+#define	FRF_AB_PCIE_STKY_RST_CTL_LBN 59
+#define	FRF_AB_PCIE_STKY_RST_CTL_WIDTH 1
+#define	FRF_AB_PCIE_NSTKY_RST_CTL_LBN 58
+#define	FRF_AB_PCIE_NSTKY_RST_CTL_WIDTH 1
+#define	FRF_AB_PCIE_CORE_RST_CTL_LBN 57
+#define	FRF_AB_PCIE_CORE_RST_CTL_WIDTH 1
+#define	FRF_AB_XGRX_RST_CTL_LBN 56
+#define	FRF_AB_XGRX_RST_CTL_WIDTH 1
+#define	FRF_AB_XGTX_RST_CTL_LBN 55
+#define	FRF_AB_XGTX_RST_CTL_WIDTH 1
+#define	FRF_AB_EM_RST_CTL_LBN 54
+#define	FRF_AB_EM_RST_CTL_WIDTH 1
+#define	FRF_AB_EV_RST_CTL_LBN 53
+#define	FRF_AB_EV_RST_CTL_WIDTH 1
+#define	FRF_AB_SR_RST_CTL_LBN 52
+#define	FRF_AB_SR_RST_CTL_WIDTH 1
+#define	FRF_AB_RX_RST_CTL_LBN 51
+#define	FRF_AB_RX_RST_CTL_WIDTH 1
+#define	FRF_AB_TX_RST_CTL_LBN 50
+#define	FRF_AB_TX_RST_CTL_WIDTH 1
+#define	FRF_AB_EE_RST_CTL_LBN 49
+#define	FRF_AB_EE_RST_CTL_WIDTH 1
+#define	FRF_AB_CS_RST_CTL_LBN 48
+#define	FRF_AB_CS_RST_CTL_WIDTH 1
+#define	FRF_AB_HOT_RST_CTL_LBN 40
+#define	FRF_AB_HOT_RST_CTL_WIDTH 2
+#define	FRF_AB_RST_EXT_PHY_LBN 31
+#define	FRF_AB_RST_EXT_PHY_WIDTH 1
+#define	FRF_AB_RST_XAUI_SD_LBN 30
+#define	FRF_AB_RST_XAUI_SD_WIDTH 1
+#define	FRF_AB_RST_PCIE_SD_LBN 29
+#define	FRF_AB_RST_PCIE_SD_WIDTH 1
+#define	FRF_AA_RST_PCIX_LBN 28
+#define	FRF_AA_RST_PCIX_WIDTH 1
+#define	FRF_BB_RST_BIU_LBN 28
+#define	FRF_BB_RST_BIU_WIDTH 1
+#define	FRF_AB_RST_PCIE_STKY_LBN 27
+#define	FRF_AB_RST_PCIE_STKY_WIDTH 1
+#define	FRF_AB_RST_PCIE_NSTKY_LBN 26
+#define	FRF_AB_RST_PCIE_NSTKY_WIDTH 1
+#define	FRF_AB_RST_PCIE_CORE_LBN 25
+#define	FRF_AB_RST_PCIE_CORE_WIDTH 1
+#define	FRF_AB_RST_XGRX_LBN 24
+#define	FRF_AB_RST_XGRX_WIDTH 1
+#define	FRF_AB_RST_XGTX_LBN 23
+#define	FRF_AB_RST_XGTX_WIDTH 1
+#define	FRF_AB_RST_EM_LBN 22
+#define	FRF_AB_RST_EM_WIDTH 1
+#define	FRF_AB_RST_EV_LBN 21
+#define	FRF_AB_RST_EV_WIDTH 1
+#define	FRF_AB_RST_SR_LBN 20
+#define	FRF_AB_RST_SR_WIDTH 1
+#define	FRF_AB_RST_RX_LBN 19
+#define	FRF_AB_RST_RX_WIDTH 1
+#define	FRF_AB_RST_TX_LBN 18
+#define	FRF_AB_RST_TX_WIDTH 1
+#define	FRF_AB_RST_SF_LBN 17
+#define	FRF_AB_RST_SF_WIDTH 1
+#define	FRF_AB_RST_CS_LBN 16
+#define	FRF_AB_RST_CS_WIDTH 1
+#define	FRF_AB_INT_RST_DUR_LBN 4
+#define	FRF_AB_INT_RST_DUR_WIDTH 3
+#define	FRF_AB_EXT_PHY_RST_DUR_LBN 1
+#define	FRF_AB_EXT_PHY_RST_DUR_WIDTH 3
+#define	FFE_AB_EXT_PHY_RST_DUR_10240US 7
+#define	FFE_AB_EXT_PHY_RST_DUR_5120US 6
+#define	FFE_AB_EXT_PHY_RST_DUR_2560US 5
+#define	FFE_AB_EXT_PHY_RST_DUR_1280US 4
+#define	FFE_AB_EXT_PHY_RST_DUR_640US 3
+#define	FFE_AB_EXT_PHY_RST_DUR_320US 2
+#define	FFE_AB_EXT_PHY_RST_DUR_160US 1
+#define	FFE_AB_EXT_PHY_RST_DUR_80US 0
+#define	FRF_AB_SWRST_LBN 0
+#define	FRF_AB_SWRST_WIDTH 1
+
+/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */
+#define	FR_AZ_FATAL_INTR_KER 0x00000230
+#define	FRF_CZ_SRAM_PERR_INT_P_KER_EN_LBN 44
+#define	FRF_CZ_SRAM_PERR_INT_P_KER_EN_WIDTH 1
+#define	FRF_AB_PCI_BUSERR_INT_KER_EN_LBN 43
+#define	FRF_AB_PCI_BUSERR_INT_KER_EN_WIDTH 1
+#define	FRF_CZ_MBU_PERR_INT_KER_EN_LBN 43
+#define	FRF_CZ_MBU_PERR_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_SRAM_OOB_INT_KER_EN_LBN 42
+#define	FRF_AZ_SRAM_OOB_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_BUFID_OOB_INT_KER_EN_LBN 41
+#define	FRF_AZ_BUFID_OOB_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_MEM_PERR_INT_KER_EN_LBN 40
+#define	FRF_AZ_MEM_PERR_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_RBUF_OWN_INT_KER_EN_LBN 39
+#define	FRF_AZ_RBUF_OWN_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_TBUF_OWN_INT_KER_EN_LBN 38
+#define	FRF_AZ_TBUF_OWN_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_RDESCQ_OWN_INT_KER_EN_LBN 37
+#define	FRF_AZ_RDESCQ_OWN_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_TDESCQ_OWN_INT_KER_EN_LBN 36
+#define	FRF_AZ_TDESCQ_OWN_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_EVQ_OWN_INT_KER_EN_LBN 35
+#define	FRF_AZ_EVQ_OWN_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_EVF_OFLO_INT_KER_EN_LBN 34
+#define	FRF_AZ_EVF_OFLO_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_ILL_ADR_INT_KER_EN_LBN 33
+#define	FRF_AZ_ILL_ADR_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_SRM_PERR_INT_KER_EN_LBN 32
+#define	FRF_AZ_SRM_PERR_INT_KER_EN_WIDTH 1
+#define	FRF_CZ_SRAM_PERR_INT_P_KER_LBN 12
+#define	FRF_CZ_SRAM_PERR_INT_P_KER_WIDTH 1
+#define	FRF_AB_PCI_BUSERR_INT_KER_LBN 11
+#define	FRF_AB_PCI_BUSERR_INT_KER_WIDTH 1
+#define	FRF_CZ_MBU_PERR_INT_KER_LBN 11
+#define	FRF_CZ_MBU_PERR_INT_KER_WIDTH 1
+#define	FRF_AZ_SRAM_OOB_INT_KER_LBN 10
+#define	FRF_AZ_SRAM_OOB_INT_KER_WIDTH 1
+#define	FRF_AZ_BUFID_DC_OOB_INT_KER_LBN 9
+#define	FRF_AZ_BUFID_DC_OOB_INT_KER_WIDTH 1
+#define	FRF_AZ_MEM_PERR_INT_KER_LBN 8
+#define	FRF_AZ_MEM_PERR_INT_KER_WIDTH 1
+#define	FRF_AZ_RBUF_OWN_INT_KER_LBN 7
+#define	FRF_AZ_RBUF_OWN_INT_KER_WIDTH 1
+#define	FRF_AZ_TBUF_OWN_INT_KER_LBN 6
+#define	FRF_AZ_TBUF_OWN_INT_KER_WIDTH 1
+#define	FRF_AZ_RDESCQ_OWN_INT_KER_LBN 5
+#define	FRF_AZ_RDESCQ_OWN_INT_KER_WIDTH 1
+#define	FRF_AZ_TDESCQ_OWN_INT_KER_LBN 4
+#define	FRF_AZ_TDESCQ_OWN_INT_KER_WIDTH 1
+#define	FRF_AZ_EVQ_OWN_INT_KER_LBN 3
+#define	FRF_AZ_EVQ_OWN_INT_KER_WIDTH 1
+#define	FRF_AZ_EVF_OFLO_INT_KER_LBN 2
+#define	FRF_AZ_EVF_OFLO_INT_KER_WIDTH 1
+#define	FRF_AZ_ILL_ADR_INT_KER_LBN 1
+#define	FRF_AZ_ILL_ADR_INT_KER_WIDTH 1
+#define	FRF_AZ_SRM_PERR_INT_KER_LBN 0
+#define	FRF_AZ_SRM_PERR_INT_KER_WIDTH 1
+
+/* FATAL_INTR_REG_CHAR: Fatal interrupt register for Char */
+#define	FR_BZ_FATAL_INTR_CHAR 0x00000240
+#define	FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_LBN 44
+#define	FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_WIDTH 1
+#define	FRF_BB_PCI_BUSERR_INT_CHAR_EN_LBN 43
+#define	FRF_BB_PCI_BUSERR_INT_CHAR_EN_WIDTH 1
+#define	FRF_CZ_MBU_PERR_INT_CHAR_EN_LBN 43
+#define	FRF_CZ_MBU_PERR_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_SRAM_OOB_INT_CHAR_EN_LBN 42
+#define	FRF_BZ_SRAM_OOB_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_BUFID_OOB_INT_CHAR_EN_LBN 41
+#define	FRF_BZ_BUFID_OOB_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_MEM_PERR_INT_CHAR_EN_LBN 40
+#define	FRF_BZ_MEM_PERR_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_RBUF_OWN_INT_CHAR_EN_LBN 39
+#define	FRF_BZ_RBUF_OWN_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_TBUF_OWN_INT_CHAR_EN_LBN 38
+#define	FRF_BZ_TBUF_OWN_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_LBN 37
+#define	FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_LBN 36
+#define	FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_EVQ_OWN_INT_CHAR_EN_LBN 35
+#define	FRF_BZ_EVQ_OWN_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_EVF_OFLO_INT_CHAR_EN_LBN 34
+#define	FRF_BZ_EVF_OFLO_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_ILL_ADR_INT_CHAR_EN_LBN 33
+#define	FRF_BZ_ILL_ADR_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_SRM_PERR_INT_CHAR_EN_LBN 32
+#define	FRF_BZ_SRM_PERR_INT_CHAR_EN_WIDTH 1
+#define	FRF_CZ_SRAM_PERR_INT_P_CHAR_LBN 12
+#define	FRF_CZ_SRAM_PERR_INT_P_CHAR_WIDTH 1
+#define	FRF_BB_PCI_BUSERR_INT_CHAR_LBN 11
+#define	FRF_BB_PCI_BUSERR_INT_CHAR_WIDTH 1
+#define	FRF_CZ_MBU_PERR_INT_CHAR_LBN 11
+#define	FRF_CZ_MBU_PERR_INT_CHAR_WIDTH 1
+#define	FRF_BZ_SRAM_OOB_INT_CHAR_LBN 10
+#define	FRF_BZ_SRAM_OOB_INT_CHAR_WIDTH 1
+#define	FRF_BZ_BUFID_DC_OOB_INT_CHAR_LBN 9
+#define	FRF_BZ_BUFID_DC_OOB_INT_CHAR_WIDTH 1
+#define	FRF_BZ_MEM_PERR_INT_CHAR_LBN 8
+#define	FRF_BZ_MEM_PERR_INT_CHAR_WIDTH 1
+#define	FRF_BZ_RBUF_OWN_INT_CHAR_LBN 7
+#define	FRF_BZ_RBUF_OWN_INT_CHAR_WIDTH 1
+#define	FRF_BZ_TBUF_OWN_INT_CHAR_LBN 6
+#define	FRF_BZ_TBUF_OWN_INT_CHAR_WIDTH 1
+#define	FRF_BZ_RDESCQ_OWN_INT_CHAR_LBN 5
+#define	FRF_BZ_RDESCQ_OWN_INT_CHAR_WIDTH 1
+#define	FRF_BZ_TDESCQ_OWN_INT_CHAR_LBN 4
+#define	FRF_BZ_TDESCQ_OWN_INT_CHAR_WIDTH 1
+#define	FRF_BZ_EVQ_OWN_INT_CHAR_LBN 3
+#define	FRF_BZ_EVQ_OWN_INT_CHAR_WIDTH 1
+#define	FRF_BZ_EVF_OFLO_INT_CHAR_LBN 2
+#define	FRF_BZ_EVF_OFLO_INT_CHAR_WIDTH 1
+#define	FRF_BZ_ILL_ADR_INT_CHAR_LBN 1
+#define	FRF_BZ_ILL_ADR_INT_CHAR_WIDTH 1
+#define	FRF_BZ_SRM_PERR_INT_CHAR_LBN 0
+#define	FRF_BZ_SRM_PERR_INT_CHAR_WIDTH 1
+
+/* DP_CTRL_REG: Datapath control register */
+#define	FR_BZ_DP_CTRL 0x00000250
+#define	FRF_BZ_FLS_EVQ_ID_LBN 0
+#define	FRF_BZ_FLS_EVQ_ID_WIDTH 12
+
+/* MEM_STAT_REG: Memory status register */
+#define	FR_AZ_MEM_STAT 0x00000260
+#define	FRF_AB_MEM_PERR_VEC_LBN 53
+#define	FRF_AB_MEM_PERR_VEC_WIDTH 38
+#define	FRF_AB_MBIST_CORR_LBN 38
+#define	FRF_AB_MBIST_CORR_WIDTH 15
+#define	FRF_AB_MBIST_ERR_LBN 0
+#define	FRF_AB_MBIST_ERR_WIDTH 40
+#define	FRF_CZ_MEM_PERR_VEC_LBN 0
+#define	FRF_CZ_MEM_PERR_VEC_WIDTH 35
+
+/* CS_DEBUG_REG: Debug register */
+#define	FR_AZ_CS_DEBUG 0x00000270
+#define	FRF_AB_GLB_DEBUG2_SEL_LBN 50
+#define	FRF_AB_GLB_DEBUG2_SEL_WIDTH 3
+#define	FRF_AB_DEBUG_BLK_SEL2_LBN 47
+#define	FRF_AB_DEBUG_BLK_SEL2_WIDTH 3
+#define	FRF_AB_DEBUG_BLK_SEL1_LBN 44
+#define	FRF_AB_DEBUG_BLK_SEL1_WIDTH 3
+#define	FRF_AB_DEBUG_BLK_SEL0_LBN 41
+#define	FRF_AB_DEBUG_BLK_SEL0_WIDTH 3
+#define	FRF_CZ_CS_PORT_NUM_LBN 40
+#define	FRF_CZ_CS_PORT_NUM_WIDTH 2
+#define	FRF_AB_MISC_DEBUG_ADDR_LBN 36
+#define	FRF_AB_MISC_DEBUG_ADDR_WIDTH 5
+#define	FRF_AB_SERDES_DEBUG_ADDR_LBN 31
+#define	FRF_AB_SERDES_DEBUG_ADDR_WIDTH 5
+#define	FRF_CZ_CS_PORT_FPE_LBN 1
+#define	FRF_CZ_CS_PORT_FPE_WIDTH 35
+#define	FRF_AB_EM_DEBUG_ADDR_LBN 26
+#define	FRF_AB_EM_DEBUG_ADDR_WIDTH 5
+#define	FRF_AB_SR_DEBUG_ADDR_LBN 21
+#define	FRF_AB_SR_DEBUG_ADDR_WIDTH 5
+#define	FRF_AB_EV_DEBUG_ADDR_LBN 16
+#define	FRF_AB_EV_DEBUG_ADDR_WIDTH 5
+#define	FRF_AB_RX_DEBUG_ADDR_LBN 11
+#define	FRF_AB_RX_DEBUG_ADDR_WIDTH 5
+#define	FRF_AB_TX_DEBUG_ADDR_LBN 6
+#define	FRF_AB_TX_DEBUG_ADDR_WIDTH 5
+#define	FRF_AB_CS_BIU_DEBUG_ADDR_LBN 1
+#define	FRF_AB_CS_BIU_DEBUG_ADDR_WIDTH 5
+#define	FRF_AZ_CS_DEBUG_EN_LBN 0
+#define	FRF_AZ_CS_DEBUG_EN_WIDTH 1
+
+/* DRIVER_REG: Driver scratch register [0-7] */
+#define	FR_AZ_DRIVER 0x00000280
+#define	FR_AZ_DRIVER_STEP 16
+#define	FR_AZ_DRIVER_ROWS 8
+#define	FRF_AZ_DRIVER_DW0_LBN 0
+#define	FRF_AZ_DRIVER_DW0_WIDTH 32
+
+/* ALTERA_BUILD_REG: Altera build register */
+#define	FR_AZ_ALTERA_BUILD 0x00000300
+#define	FRF_AZ_ALTERA_BUILD_VER_LBN 0
+#define	FRF_AZ_ALTERA_BUILD_VER_WIDTH 32
+
+/* CSR_SPARE_REG: Spare register */
+#define	FR_AZ_CSR_SPARE 0x00000310
+#define	FRF_AB_MEM_PERR_EN_LBN 64
+#define	FRF_AB_MEM_PERR_EN_WIDTH 38
+#define	FRF_CZ_MEM_PERR_EN_LBN 64
+#define	FRF_CZ_MEM_PERR_EN_WIDTH 35
+#define	FRF_AB_MEM_PERR_EN_TX_DATA_LBN 72
+#define	FRF_AB_MEM_PERR_EN_TX_DATA_WIDTH 2
+#define	FRF_AZ_CSR_SPARE_BITS_LBN 0
+#define	FRF_AZ_CSR_SPARE_BITS_WIDTH 32
+
+/* PCIE_SD_CTL0123_REG: PCIE SerDes control register 0 to 3 */
+#define	FR_AB_PCIE_SD_CTL0123 0x00000320
+#define	FRF_AB_PCIE_TESTSIG_H_LBN 96
+#define	FRF_AB_PCIE_TESTSIG_H_WIDTH 19
+#define	FRF_AB_PCIE_TESTSIG_L_LBN 64
+#define	FRF_AB_PCIE_TESTSIG_L_WIDTH 19
+#define	FRF_AB_PCIE_OFFSET_LBN 56
+#define	FRF_AB_PCIE_OFFSET_WIDTH 8
+#define	FRF_AB_PCIE_OFFSETEN_H_LBN 55
+#define	FRF_AB_PCIE_OFFSETEN_H_WIDTH 1
+#define	FRF_AB_PCIE_OFFSETEN_L_LBN 54
+#define	FRF_AB_PCIE_OFFSETEN_L_WIDTH 1
+#define	FRF_AB_PCIE_HIVMODE_H_LBN 53
+#define	FRF_AB_PCIE_HIVMODE_H_WIDTH 1
+#define	FRF_AB_PCIE_HIVMODE_L_LBN 52
+#define	FRF_AB_PCIE_HIVMODE_L_WIDTH 1
+#define	FRF_AB_PCIE_PARRESET_H_LBN 51
+#define	FRF_AB_PCIE_PARRESET_H_WIDTH 1
+#define	FRF_AB_PCIE_PARRESET_L_LBN 50
+#define	FRF_AB_PCIE_PARRESET_L_WIDTH 1
+#define	FRF_AB_PCIE_LPBKWDRV_H_LBN 49
+#define	FRF_AB_PCIE_LPBKWDRV_H_WIDTH 1
+#define	FRF_AB_PCIE_LPBKWDRV_L_LBN 48
+#define	FRF_AB_PCIE_LPBKWDRV_L_WIDTH 1
+#define	FRF_AB_PCIE_LPBK_LBN 40
+#define	FRF_AB_PCIE_LPBK_WIDTH 8
+#define	FRF_AB_PCIE_PARLPBK_LBN 32
+#define	FRF_AB_PCIE_PARLPBK_WIDTH 8
+#define	FRF_AB_PCIE_RXTERMADJ_H_LBN 30
+#define	FRF_AB_PCIE_RXTERMADJ_H_WIDTH 2
+#define	FRF_AB_PCIE_RXTERMADJ_L_LBN 28
+#define	FRF_AB_PCIE_RXTERMADJ_L_WIDTH 2
+#define	FFE_AB_PCIE_RXTERMADJ_MIN15PCNT 3
+#define	FFE_AB_PCIE_RXTERMADJ_PL10PCNT 2
+#define	FFE_AB_PCIE_RXTERMADJ_MIN17PCNT 1
+#define	FFE_AB_PCIE_RXTERMADJ_NOMNL 0
+#define	FRF_AB_PCIE_TXTERMADJ_H_LBN 26
+#define	FRF_AB_PCIE_TXTERMADJ_H_WIDTH 2
+#define	FRF_AB_PCIE_TXTERMADJ_L_LBN 24
+#define	FRF_AB_PCIE_TXTERMADJ_L_WIDTH 2
+#define	FFE_AB_PCIE_TXTERMADJ_MIN15PCNT 3
+#define	FFE_AB_PCIE_TXTERMADJ_PL10PCNT 2
+#define	FFE_AB_PCIE_TXTERMADJ_MIN17PCNT 1
+#define	FFE_AB_PCIE_TXTERMADJ_NOMNL 0
+#define	FRF_AB_PCIE_RXEQCTL_H_LBN 18
+#define	FRF_AB_PCIE_RXEQCTL_H_WIDTH 2
+#define	FRF_AB_PCIE_RXEQCTL_L_LBN 16
+#define	FRF_AB_PCIE_RXEQCTL_L_WIDTH 2
+#define	FFE_AB_PCIE_RXEQCTL_OFF_ALT 3
+#define	FFE_AB_PCIE_RXEQCTL_OFF 2
+#define	FFE_AB_PCIE_RXEQCTL_MIN 1
+#define	FFE_AB_PCIE_RXEQCTL_MAX 0
+#define	FRF_AB_PCIE_HIDRV_LBN 8
+#define	FRF_AB_PCIE_HIDRV_WIDTH 8
+#define	FRF_AB_PCIE_LODRV_LBN 0
+#define	FRF_AB_PCIE_LODRV_WIDTH 8
+
+/* PCIE_SD_CTL45_REG: PCIE SerDes control register 4 and 5 */
+#define	FR_AB_PCIE_SD_CTL45 0x00000330
+#define	FRF_AB_PCIE_DTX7_LBN 60
+#define	FRF_AB_PCIE_DTX7_WIDTH 4
+#define	FRF_AB_PCIE_DTX6_LBN 56
+#define	FRF_AB_PCIE_DTX6_WIDTH 4
+#define	FRF_AB_PCIE_DTX5_LBN 52
+#define	FRF_AB_PCIE_DTX5_WIDTH 4
+#define	FRF_AB_PCIE_DTX4_LBN 48
+#define	FRF_AB_PCIE_DTX4_WIDTH 4
+#define	FRF_AB_PCIE_DTX3_LBN 44
+#define	FRF_AB_PCIE_DTX3_WIDTH 4
+#define	FRF_AB_PCIE_DTX2_LBN 40
+#define	FRF_AB_PCIE_DTX2_WIDTH 4
+#define	FRF_AB_PCIE_DTX1_LBN 36
+#define	FRF_AB_PCIE_DTX1_WIDTH 4
+#define	FRF_AB_PCIE_DTX0_LBN 32
+#define	FRF_AB_PCIE_DTX0_WIDTH 4
+#define	FRF_AB_PCIE_DEQ7_LBN 28
+#define	FRF_AB_PCIE_DEQ7_WIDTH 4
+#define	FRF_AB_PCIE_DEQ6_LBN 24
+#define	FRF_AB_PCIE_DEQ6_WIDTH 4
+#define	FRF_AB_PCIE_DEQ5_LBN 20
+#define	FRF_AB_PCIE_DEQ5_WIDTH 4
+#define	FRF_AB_PCIE_DEQ4_LBN 16
+#define	FRF_AB_PCIE_DEQ4_WIDTH 4
+#define	FRF_AB_PCIE_DEQ3_LBN 12
+#define	FRF_AB_PCIE_DEQ3_WIDTH 4
+#define	FRF_AB_PCIE_DEQ2_LBN 8
+#define	FRF_AB_PCIE_DEQ2_WIDTH 4
+#define	FRF_AB_PCIE_DEQ1_LBN 4
+#define	FRF_AB_PCIE_DEQ1_WIDTH 4
+#define	FRF_AB_PCIE_DEQ0_LBN 0
+#define	FRF_AB_PCIE_DEQ0_WIDTH 4
+
+/* PCIE_PCS_CTL_STAT_REG: PCIE PCS control and status register */
+#define	FR_AB_PCIE_PCS_CTL_STAT 0x00000340
+#define	FRF_AB_PCIE_PRBSERRCOUNT0_H_LBN 52
+#define	FRF_AB_PCIE_PRBSERRCOUNT0_H_WIDTH 4
+#define	FRF_AB_PCIE_PRBSERRCOUNT0_L_LBN 48
+#define	FRF_AB_PCIE_PRBSERRCOUNT0_L_WIDTH 4
+#define	FRF_AB_PCIE_PRBSERR_LBN 40
+#define	FRF_AB_PCIE_PRBSERR_WIDTH 8
+#define	FRF_AB_PCIE_PRBSERRH0_LBN 32
+#define	FRF_AB_PCIE_PRBSERRH0_WIDTH 8
+#define	FRF_AB_PCIE_FASTINIT_H_LBN 15
+#define	FRF_AB_PCIE_FASTINIT_H_WIDTH 1
+#define	FRF_AB_PCIE_FASTINIT_L_LBN 14
+#define	FRF_AB_PCIE_FASTINIT_L_WIDTH 1
+#define	FRF_AB_PCIE_CTCDISABLE_H_LBN 13
+#define	FRF_AB_PCIE_CTCDISABLE_H_WIDTH 1
+#define	FRF_AB_PCIE_CTCDISABLE_L_LBN 12
+#define	FRF_AB_PCIE_CTCDISABLE_L_WIDTH 1
+#define	FRF_AB_PCIE_PRBSSYNC_H_LBN 11
+#define	FRF_AB_PCIE_PRBSSYNC_H_WIDTH 1
+#define	FRF_AB_PCIE_PRBSSYNC_L_LBN 10
+#define	FRF_AB_PCIE_PRBSSYNC_L_WIDTH 1
+#define	FRF_AB_PCIE_PRBSERRACK_H_LBN 9
+#define	FRF_AB_PCIE_PRBSERRACK_H_WIDTH 1
+#define	FRF_AB_PCIE_PRBSERRACK_L_LBN 8
+#define	FRF_AB_PCIE_PRBSERRACK_L_WIDTH 1
+#define	FRF_AB_PCIE_PRBSSEL_LBN 0
+#define	FRF_AB_PCIE_PRBSSEL_WIDTH 8
+
+/* DEBUG_DATA_OUT_REG: Live Debug and Debug 2 out ports */
+#define	FR_BB_DEBUG_DATA_OUT 0x00000350
+#define	FRF_BB_DEBUG2_PORT_LBN 25
+#define	FRF_BB_DEBUG2_PORT_WIDTH 15
+#define	FRF_BB_DEBUG1_PORT_LBN 0
+#define	FRF_BB_DEBUG1_PORT_WIDTH 25
+
+/* EVQ_RPTR_REGP0: Event queue read pointer register */
+#define	FR_BZ_EVQ_RPTR_P0 0x00000400
+#define	FR_BZ_EVQ_RPTR_P0_STEP 8192
+#define	FR_BZ_EVQ_RPTR_P0_ROWS 1024
+/* EVQ_RPTR_REG_KER: Event queue read pointer register */
+#define	FR_AA_EVQ_RPTR_KER 0x00011b00
+#define	FR_AA_EVQ_RPTR_KER_STEP 4
+#define	FR_AA_EVQ_RPTR_KER_ROWS 4
+/* EVQ_RPTR_REG: Event queue read pointer register */
+#define	FR_BZ_EVQ_RPTR 0x00fa0000
+#define	FR_BZ_EVQ_RPTR_STEP 16
+#define	FR_BB_EVQ_RPTR_ROWS 4096
+#define	FR_CZ_EVQ_RPTR_ROWS 1024
+/* EVQ_RPTR_REGP123: Event queue read pointer register */
+#define	FR_BB_EVQ_RPTR_P123 0x01000400
+#define	FR_BB_EVQ_RPTR_P123_STEP 8192
+#define	FR_BB_EVQ_RPTR_P123_ROWS 3072
+#define	FRF_AZ_EVQ_RPTR_VLD_LBN 15
+#define	FRF_AZ_EVQ_RPTR_VLD_WIDTH 1
+#define	FRF_AZ_EVQ_RPTR_LBN 0
+#define	FRF_AZ_EVQ_RPTR_WIDTH 15
+
+/* TIMER_COMMAND_REGP0: Timer Command Registers */
+#define	FR_BZ_TIMER_COMMAND_P0 0x00000420
+#define	FR_BZ_TIMER_COMMAND_P0_STEP 8192
+#define	FR_BZ_TIMER_COMMAND_P0_ROWS 1024
+/* TIMER_COMMAND_REG_KER: Timer Command Registers */
+#define	FR_AA_TIMER_COMMAND_KER 0x00000420
+#define	FR_AA_TIMER_COMMAND_KER_STEP 8192
+#define	FR_AA_TIMER_COMMAND_KER_ROWS 4
+/* TIMER_COMMAND_REGP123: Timer Command Registers */
+#define	FR_BB_TIMER_COMMAND_P123 0x01000420
+#define	FR_BB_TIMER_COMMAND_P123_STEP 8192
+#define	FR_BB_TIMER_COMMAND_P123_ROWS 3072
+#define	FRF_CZ_TC_TIMER_MODE_LBN 14
+#define	FRF_CZ_TC_TIMER_MODE_WIDTH 2
+#define	FRF_AB_TC_TIMER_MODE_LBN 12
+#define	FRF_AB_TC_TIMER_MODE_WIDTH 2
+#define	FRF_CZ_TC_TIMER_VAL_LBN 0
+#define	FRF_CZ_TC_TIMER_VAL_WIDTH 14
+#define	FRF_AB_TC_TIMER_VAL_LBN 0
+#define	FRF_AB_TC_TIMER_VAL_WIDTH 12
+
+/* DRV_EV_REG: Driver generated event register */
+#define	FR_AZ_DRV_EV 0x00000440
+#define	FRF_AZ_DRV_EV_QID_LBN 64
+#define	FRF_AZ_DRV_EV_QID_WIDTH 12
+#define	FRF_AZ_DRV_EV_DATA_LBN 0
+#define	FRF_AZ_DRV_EV_DATA_WIDTH 64
+
+/* EVQ_CTL_REG: Event queue control register */
+#define	FR_AZ_EVQ_CTL 0x00000450
+#define	FRF_CZ_RX_EVQ_WAKEUP_MASK_LBN 15
+#define	FRF_CZ_RX_EVQ_WAKEUP_MASK_WIDTH 10
+#define	FRF_BB_RX_EVQ_WAKEUP_MASK_LBN 15
+#define	FRF_BB_RX_EVQ_WAKEUP_MASK_WIDTH 6
+#define	FRF_AZ_EVQ_OWNERR_CTL_LBN 14
+#define	FRF_AZ_EVQ_OWNERR_CTL_WIDTH 1
+#define	FRF_AZ_EVQ_FIFO_AF_TH_LBN 7
+#define	FRF_AZ_EVQ_FIFO_AF_TH_WIDTH 7
+#define	FRF_AZ_EVQ_FIFO_NOTAF_TH_LBN 0
+#define	FRF_AZ_EVQ_FIFO_NOTAF_TH_WIDTH 7
+
+/* EVQ_CNT1_REG: Event counter 1 register */
+#define	FR_AZ_EVQ_CNT1 0x00000460
+#define	FRF_AZ_EVQ_CNT_PRE_FIFO_LBN 120
+#define	FRF_AZ_EVQ_CNT_PRE_FIFO_WIDTH 7
+#define	FRF_AZ_EVQ_CNT_TOBIU_LBN 100
+#define	FRF_AZ_EVQ_CNT_TOBIU_WIDTH 20
+#define	FRF_AZ_EVQ_TX_REQ_CNT_LBN 80
+#define	FRF_AZ_EVQ_TX_REQ_CNT_WIDTH 20
+#define	FRF_AZ_EVQ_RX_REQ_CNT_LBN 60
+#define	FRF_AZ_EVQ_RX_REQ_CNT_WIDTH 20
+#define	FRF_AZ_EVQ_EM_REQ_CNT_LBN 40
+#define	FRF_AZ_EVQ_EM_REQ_CNT_WIDTH 20
+#define	FRF_AZ_EVQ_CSR_REQ_CNT_LBN 20
+#define	FRF_AZ_EVQ_CSR_REQ_CNT_WIDTH 20
+#define	FRF_AZ_EVQ_ERR_REQ_CNT_LBN 0
+#define	FRF_AZ_EVQ_ERR_REQ_CNT_WIDTH 20
+
+/* EVQ_CNT2_REG: Event counter 2 register */
+#define	FR_AZ_EVQ_CNT2 0x00000470
+#define	FRF_AZ_EVQ_UPD_REQ_CNT_LBN 104
+#define	FRF_AZ_EVQ_UPD_REQ_CNT_WIDTH 20
+#define	FRF_AZ_EVQ_CLR_REQ_CNT_LBN 84
+#define	FRF_AZ_EVQ_CLR_REQ_CNT_WIDTH 20
+#define	FRF_AZ_EVQ_RDY_CNT_LBN 80
+#define	FRF_AZ_EVQ_RDY_CNT_WIDTH 4
+#define	FRF_AZ_EVQ_WU_REQ_CNT_LBN 60
+#define	FRF_AZ_EVQ_WU_REQ_CNT_WIDTH 20
+#define	FRF_AZ_EVQ_WET_REQ_CNT_LBN 40
+#define	FRF_AZ_EVQ_WET_REQ_CNT_WIDTH 20
+#define	FRF_AZ_EVQ_INIT_REQ_CNT_LBN 20
+#define	FRF_AZ_EVQ_INIT_REQ_CNT_WIDTH 20
+#define	FRF_AZ_EVQ_TM_REQ_CNT_LBN 0
+#define	FRF_AZ_EVQ_TM_REQ_CNT_WIDTH 20
+
+/* USR_EV_REG: Event mailbox register */
+#define	FR_CZ_USR_EV 0x00000540
+#define	FR_CZ_USR_EV_STEP 8192
+#define	FR_CZ_USR_EV_ROWS 1024
+#define	FRF_CZ_USR_EV_DATA_LBN 0
+#define	FRF_CZ_USR_EV_DATA_WIDTH 32
+
+/* BUF_TBL_CFG_REG: Buffer table configuration register */
+#define	FR_AZ_BUF_TBL_CFG 0x00000600
+#define	FRF_AZ_BUF_TBL_MODE_LBN 3
+#define	FRF_AZ_BUF_TBL_MODE_WIDTH 1
+
+/* SRM_RX_DC_CFG_REG: SRAM receive descriptor cache configuration register */
+#define	FR_AZ_SRM_RX_DC_CFG 0x00000610
+#define	FRF_AZ_SRM_CLK_TMP_EN_LBN 21
+#define	FRF_AZ_SRM_CLK_TMP_EN_WIDTH 1
+#define	FRF_AZ_SRM_RX_DC_BASE_ADR_LBN 0
+#define	FRF_AZ_SRM_RX_DC_BASE_ADR_WIDTH 21
+
+/* SRM_TX_DC_CFG_REG: SRAM transmit descriptor cache configuration register */
+#define	FR_AZ_SRM_TX_DC_CFG 0x00000620
+#define	FRF_AZ_SRM_TX_DC_BASE_ADR_LBN 0
+#define	FRF_AZ_SRM_TX_DC_BASE_ADR_WIDTH 21
+
+/* SRM_CFG_REG: SRAM configuration register */
+#define	FR_AZ_SRM_CFG 0x00000630
+#define	FRF_AZ_SRM_OOB_ADR_INTEN_LBN 5
+#define	FRF_AZ_SRM_OOB_ADR_INTEN_WIDTH 1
+#define	FRF_AZ_SRM_OOB_BUF_INTEN_LBN 4
+#define	FRF_AZ_SRM_OOB_BUF_INTEN_WIDTH 1
+#define	FRF_AZ_SRM_INIT_EN_LBN 3
+#define	FRF_AZ_SRM_INIT_EN_WIDTH 1
+#define	FRF_AZ_SRM_NUM_BANK_LBN 2
+#define	FRF_AZ_SRM_NUM_BANK_WIDTH 1
+#define	FRF_AZ_SRM_BANK_SIZE_LBN 0
+#define	FRF_AZ_SRM_BANK_SIZE_WIDTH 2
+
+/* BUF_TBL_UPD_REG: Buffer table update register */
+#define	FR_AZ_BUF_TBL_UPD 0x00000650
+#define	FRF_AZ_BUF_UPD_CMD_LBN 63
+#define	FRF_AZ_BUF_UPD_CMD_WIDTH 1
+#define	FRF_AZ_BUF_CLR_CMD_LBN 62
+#define	FRF_AZ_BUF_CLR_CMD_WIDTH 1
+#define	FRF_AZ_BUF_CLR_END_ID_LBN 32
+#define	FRF_AZ_BUF_CLR_END_ID_WIDTH 20
+#define	FRF_AZ_BUF_CLR_START_ID_LBN 0
+#define	FRF_AZ_BUF_CLR_START_ID_WIDTH 20
+
+/* SRM_UPD_EVQ_REG: Buffer table update register */
+#define	FR_AZ_SRM_UPD_EVQ 0x00000660
+#define	FRF_AZ_SRM_UPD_EVQ_ID_LBN 0
+#define	FRF_AZ_SRM_UPD_EVQ_ID_WIDTH 12
+
+/* SRAM_PARITY_REG: SRAM parity register. */
+#define	FR_AZ_SRAM_PARITY 0x00000670
+#define	FRF_CZ_BYPASS_ECC_LBN 3
+#define	FRF_CZ_BYPASS_ECC_WIDTH 1
+#define	FRF_CZ_SEC_INT_LBN 2
+#define	FRF_CZ_SEC_INT_WIDTH 1
+#define	FRF_CZ_FORCE_SRAM_DOUBLE_ERR_LBN 1
+#define	FRF_CZ_FORCE_SRAM_DOUBLE_ERR_WIDTH 1
+#define	FRF_AB_FORCE_SRAM_PERR_LBN 0
+#define	FRF_AB_FORCE_SRAM_PERR_WIDTH 1
+#define	FRF_CZ_FORCE_SRAM_SINGLE_ERR_LBN 0
+#define	FRF_CZ_FORCE_SRAM_SINGLE_ERR_WIDTH 1
+
+/* RX_CFG_REG: Receive configuration register */
+#define	FR_AZ_RX_CFG 0x00000800
+#define	FRF_CZ_RX_MIN_KBUF_SIZE_LBN 72
+#define	FRF_CZ_RX_MIN_KBUF_SIZE_WIDTH 14
+#define	FRF_CZ_RX_HDR_SPLIT_EN_LBN 71
+#define	FRF_CZ_RX_HDR_SPLIT_EN_WIDTH 1
+#define	FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_LBN 62
+#define	FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH 9
+#define	FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_LBN 53
+#define	FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH 9
+#define	FRF_CZ_RX_PRE_RFF_IPG_LBN 49
+#define	FRF_CZ_RX_PRE_RFF_IPG_WIDTH 4
+#define	FRF_BZ_RX_TCP_SUP_LBN 48
+#define	FRF_BZ_RX_TCP_SUP_WIDTH 1
+#define	FRF_BZ_RX_INGR_EN_LBN 47
+#define	FRF_BZ_RX_INGR_EN_WIDTH 1
+#define	FRF_BZ_RX_IP_HASH_LBN 46
+#define	FRF_BZ_RX_IP_HASH_WIDTH 1
+#define	FRF_BZ_RX_HASH_ALG_LBN 45
+#define	FRF_BZ_RX_HASH_ALG_WIDTH 1
+#define	FRF_BZ_RX_HASH_INSRT_HDR_LBN 44
+#define	FRF_BZ_RX_HASH_INSRT_HDR_WIDTH 1
+#define	FRF_BZ_RX_DESC_PUSH_EN_LBN 43
+#define	FRF_BZ_RX_DESC_PUSH_EN_WIDTH 1
+#define	FRF_BZ_RX_RDW_PATCH_EN_LBN 42
+#define	FRF_BZ_RX_RDW_PATCH_EN_WIDTH 1
+#define	FRF_BB_RX_PCI_BURST_SIZE_LBN 39
+#define	FRF_BB_RX_PCI_BURST_SIZE_WIDTH 3
+#define	FRF_BZ_RX_OWNERR_CTL_LBN 38
+#define	FRF_BZ_RX_OWNERR_CTL_WIDTH 1
+#define	FRF_BZ_RX_XON_TX_TH_LBN 33
+#define	FRF_BZ_RX_XON_TX_TH_WIDTH 5
+#define	FRF_AA_RX_DESC_PUSH_EN_LBN 35
+#define	FRF_AA_RX_DESC_PUSH_EN_WIDTH 1
+#define	FRF_AA_RX_RDW_PATCH_EN_LBN 34
+#define	FRF_AA_RX_RDW_PATCH_EN_WIDTH 1
+#define	FRF_AA_RX_PCI_BURST_SIZE_LBN 31
+#define	FRF_AA_RX_PCI_BURST_SIZE_WIDTH 3
+#define	FRF_BZ_RX_XOFF_TX_TH_LBN 28
+#define	FRF_BZ_RX_XOFF_TX_TH_WIDTH 5
+#define	FRF_AA_RX_OWNERR_CTL_LBN 30
+#define	FRF_AA_RX_OWNERR_CTL_WIDTH 1
+#define	FRF_AA_RX_XON_TX_TH_LBN 25
+#define	FRF_AA_RX_XON_TX_TH_WIDTH 5
+#define	FRF_BZ_RX_USR_BUF_SIZE_LBN 19
+#define	FRF_BZ_RX_USR_BUF_SIZE_WIDTH 9
+#define	FRF_AA_RX_XOFF_TX_TH_LBN 20
+#define	FRF_AA_RX_XOFF_TX_TH_WIDTH 5
+#define	FRF_AA_RX_USR_BUF_SIZE_LBN 11
+#define	FRF_AA_RX_USR_BUF_SIZE_WIDTH 9
+#define	FRF_BZ_RX_XON_MAC_TH_LBN 10
+#define	FRF_BZ_RX_XON_MAC_TH_WIDTH 9
+#define	FRF_AA_RX_XON_MAC_TH_LBN 6
+#define	FRF_AA_RX_XON_MAC_TH_WIDTH 5
+#define	FRF_BZ_RX_XOFF_MAC_TH_LBN 1
+#define	FRF_BZ_RX_XOFF_MAC_TH_WIDTH 9
+#define	FRF_AA_RX_XOFF_MAC_TH_LBN 1
+#define	FRF_AA_RX_XOFF_MAC_TH_WIDTH 5
+#define	FRF_AZ_RX_XOFF_MAC_EN_LBN 0
+#define	FRF_AZ_RX_XOFF_MAC_EN_WIDTH 1
+
+/* RX_FILTER_CTL_REG: Receive filter control registers */
+#define	FR_BZ_RX_FILTER_CTL 0x00000810
+#define	FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_LBN 94
+#define	FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_WIDTH 8
+#define	FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_LBN 86
+#define	FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_WIDTH 8
+#define	FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_LBN 85
+#define	FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_WIDTH 1
+#define	FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_LBN 69
+#define	FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_WIDTH 16
+#define	FRF_CZ_MULTICAST_NOMATCH_Q_ID_LBN 57
+#define	FRF_CZ_MULTICAST_NOMATCH_Q_ID_WIDTH 12
+#define	FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_LBN 56
+#define	FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_WIDTH 1
+#define	FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_LBN 55
+#define	FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
+#define	FRF_CZ_UNICAST_NOMATCH_Q_ID_LBN 43
+#define	FRF_CZ_UNICAST_NOMATCH_Q_ID_WIDTH 12
+#define	FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_LBN 42
+#define	FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_WIDTH 1
+#define	FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_LBN 41
+#define	FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
+#define	FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_LBN 40
+#define	FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_WIDTH 1
+#define	FRF_BZ_UDP_FULL_SRCH_LIMIT_LBN 32
+#define	FRF_BZ_UDP_FULL_SRCH_LIMIT_WIDTH 8
+#define	FRF_BZ_NUM_KER_LBN 24
+#define	FRF_BZ_NUM_KER_WIDTH 2
+#define	FRF_BZ_UDP_WILD_SRCH_LIMIT_LBN 16
+#define	FRF_BZ_UDP_WILD_SRCH_LIMIT_WIDTH 8
+#define	FRF_BZ_TCP_WILD_SRCH_LIMIT_LBN 8
+#define	FRF_BZ_TCP_WILD_SRCH_LIMIT_WIDTH 8
+#define	FRF_BZ_TCP_FULL_SRCH_LIMIT_LBN 0
+#define	FRF_BZ_TCP_FULL_SRCH_LIMIT_WIDTH 8
+
+/* RX_FLUSH_DESCQ_REG: Receive flush descriptor queue register */
+#define	FR_AZ_RX_FLUSH_DESCQ 0x00000820
+#define	FRF_AZ_RX_FLUSH_DESCQ_CMD_LBN 24
+#define	FRF_AZ_RX_FLUSH_DESCQ_CMD_WIDTH 1
+#define	FRF_AZ_RX_FLUSH_DESCQ_LBN 0
+#define	FRF_AZ_RX_FLUSH_DESCQ_WIDTH 12
+
+/* RX_DESC_UPD_REGP0: Receive descriptor update register. */
+#define	FR_BZ_RX_DESC_UPD_P0 0x00000830
+#define	FR_BZ_RX_DESC_UPD_P0_STEP 8192
+#define	FR_BZ_RX_DESC_UPD_P0_ROWS 1024
+/* RX_DESC_UPD_REG_KER: Receive descriptor update register. */
+#define	FR_AA_RX_DESC_UPD_KER 0x00000830
+#define	FR_AA_RX_DESC_UPD_KER_STEP 8192
+#define	FR_AA_RX_DESC_UPD_KER_ROWS 4
+/* RX_DESC_UPD_REGP123: Receive descriptor update register. */
+#define	FR_BB_RX_DESC_UPD_P123 0x01000830
+#define	FR_BB_RX_DESC_UPD_P123_STEP 8192
+#define	FR_BB_RX_DESC_UPD_P123_ROWS 3072
+#define	FRF_AZ_RX_DESC_WPTR_LBN 96
+#define	FRF_AZ_RX_DESC_WPTR_WIDTH 12
+#define	FRF_AZ_RX_DESC_PUSH_CMD_LBN 95
+#define	FRF_AZ_RX_DESC_PUSH_CMD_WIDTH 1
+#define	FRF_AZ_RX_DESC_LBN 0
+#define	FRF_AZ_RX_DESC_WIDTH 64
+
+/* RX_DC_CFG_REG: Receive descriptor cache configuration register */
+#define	FR_AZ_RX_DC_CFG 0x00000840
+#define	FRF_AB_RX_MAX_PF_LBN 2
+#define	FRF_AB_RX_MAX_PF_WIDTH 2
+#define	FRF_AZ_RX_DC_SIZE_LBN 0
+#define	FRF_AZ_RX_DC_SIZE_WIDTH 2
+#define	FFE_AZ_RX_DC_SIZE_64 3
+#define	FFE_AZ_RX_DC_SIZE_32 2
+#define	FFE_AZ_RX_DC_SIZE_16 1
+#define	FFE_AZ_RX_DC_SIZE_8 0
+
+/* RX_DC_PF_WM_REG: Receive descriptor cache pre-fetch watermark register */
+#define	FR_AZ_RX_DC_PF_WM 0x00000850
+#define	FRF_AZ_RX_DC_PF_HWM_LBN 6
+#define	FRF_AZ_RX_DC_PF_HWM_WIDTH 6
+#define	FRF_AZ_RX_DC_PF_LWM_LBN 0
+#define	FRF_AZ_RX_DC_PF_LWM_WIDTH 6
+
+/* RX_RSS_TKEY_REG: RSS Toeplitz hash key */
+#define	FR_BZ_RX_RSS_TKEY 0x00000860
+#define	FRF_BZ_RX_RSS_TKEY_HI_LBN 64
+#define	FRF_BZ_RX_RSS_TKEY_HI_WIDTH 64
+#define	FRF_BZ_RX_RSS_TKEY_LO_LBN 0
+#define	FRF_BZ_RX_RSS_TKEY_LO_WIDTH 64
+
+/* RX_NODESC_DROP_REG: Receive dropped packet counter register */
+#define	FR_AZ_RX_NODESC_DROP 0x00000880
+#define	FRF_CZ_RX_NODESC_DROP_CNT_LBN 0
+#define	FRF_CZ_RX_NODESC_DROP_CNT_WIDTH 32
+#define	FRF_AB_RX_NODESC_DROP_CNT_LBN 0
+#define	FRF_AB_RX_NODESC_DROP_CNT_WIDTH 16
+
+/* RX_SELF_RST_REG: Receive self reset register */
+#define	FR_AA_RX_SELF_RST 0x00000890
+#define	FRF_AA_RX_ISCSI_DIS_LBN 17
+#define	FRF_AA_RX_ISCSI_DIS_WIDTH 1
+#define	FRF_AA_RX_SW_RST_REG_LBN 16
+#define	FRF_AA_RX_SW_RST_REG_WIDTH 1
+#define FRF_AA_RX_NODESC_WAIT_DIS_LBN 9
+#define FRF_AA_RX_NODESC_WAIT_DIS_WIDTH 1
+#define	FRF_AA_RX_SELF_RST_EN_LBN 8
+#define	FRF_AA_RX_SELF_RST_EN_WIDTH 1
+#define	FRF_AA_RX_MAX_PF_LAT_LBN 4
+#define	FRF_AA_RX_MAX_PF_LAT_WIDTH 4
+#define	FRF_AA_RX_MAX_LU_LAT_LBN 0
+#define	FRF_AA_RX_MAX_LU_LAT_WIDTH 4
+
+/* RX_DEBUG_REG: undocumented register */
+#define	FR_AZ_RX_DEBUG 0x000008a0
+#define	FRF_AZ_RX_DEBUG_LBN 0
+#define	FRF_AZ_RX_DEBUG_WIDTH 64
+
+/* RX_PUSH_DROP_REG: Receive descriptor push dropped counter register */
+#define	FR_AZ_RX_PUSH_DROP 0x000008b0
+#define	FRF_AZ_RX_PUSH_DROP_CNT_LBN 0
+#define	FRF_AZ_RX_PUSH_DROP_CNT_WIDTH 32
+
+/* RX_RSS_IPV6_REG1: IPv6 RSS Toeplitz hash key low bytes */
+#define	FR_CZ_RX_RSS_IPV6_REG1 0x000008d0
+#define	FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN 0
+#define	FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH 128
+
+/* RX_RSS_IPV6_REG2: IPv6 RSS Toeplitz hash key middle bytes */
+#define	FR_CZ_RX_RSS_IPV6_REG2 0x000008e0
+#define	FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN 0
+#define	FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH 128
+
+/* RX_RSS_IPV6_REG3: IPv6 RSS Toeplitz hash key upper bytes and IPv6 RSS settings */
+#define	FR_CZ_RX_RSS_IPV6_REG3 0x000008f0
+#define	FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_LBN 66
+#define	FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_WIDTH 1
+#define	FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_LBN 65
+#define	FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_WIDTH 1
+#define	FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_LBN 64
+#define	FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_WIDTH 1
+#define	FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN 0
+#define	FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH 64
+
+/* TX_FLUSH_DESCQ_REG: Transmit flush descriptor queue register */
+#define	FR_AZ_TX_FLUSH_DESCQ 0x00000a00
+#define	FRF_AZ_TX_FLUSH_DESCQ_CMD_LBN 12
+#define	FRF_AZ_TX_FLUSH_DESCQ_CMD_WIDTH 1
+#define	FRF_AZ_TX_FLUSH_DESCQ_LBN 0
+#define	FRF_AZ_TX_FLUSH_DESCQ_WIDTH 12
+
+/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */
+#define	FR_BZ_TX_DESC_UPD_P0 0x00000a10
+#define	FR_BZ_TX_DESC_UPD_P0_STEP 8192
+#define	FR_BZ_TX_DESC_UPD_P0_ROWS 1024
+/* TX_DESC_UPD_REG_KER: Transmit descriptor update register. */
+#define	FR_AA_TX_DESC_UPD_KER 0x00000a10
+#define	FR_AA_TX_DESC_UPD_KER_STEP 8192
+#define	FR_AA_TX_DESC_UPD_KER_ROWS 8
+/* TX_DESC_UPD_REGP123: Transmit descriptor update register. */
+#define	FR_BB_TX_DESC_UPD_P123 0x01000a10
+#define	FR_BB_TX_DESC_UPD_P123_STEP 8192
+#define	FR_BB_TX_DESC_UPD_P123_ROWS 3072
+#define	FRF_AZ_TX_DESC_WPTR_LBN 96
+#define	FRF_AZ_TX_DESC_WPTR_WIDTH 12
+#define	FRF_AZ_TX_DESC_PUSH_CMD_LBN 95
+#define	FRF_AZ_TX_DESC_PUSH_CMD_WIDTH 1
+#define	FRF_AZ_TX_DESC_LBN 0
+#define	FRF_AZ_TX_DESC_WIDTH 95
+
+/* TX_DC_CFG_REG: Transmit descriptor cache configuration register */
+#define	FR_AZ_TX_DC_CFG 0x00000a20
+#define	FRF_AZ_TX_DC_SIZE_LBN 0
+#define	FRF_AZ_TX_DC_SIZE_WIDTH 2
+#define	FFE_AZ_TX_DC_SIZE_32 2
+#define	FFE_AZ_TX_DC_SIZE_16 1
+#define	FFE_AZ_TX_DC_SIZE_8 0
+
+/* TX_CHKSM_CFG_REG: Transmit checksum configuration register */
+#define	FR_AA_TX_CHKSM_CFG 0x00000a30
+#define	FRF_AA_TX_Q_CHKSM_DIS_96_127_LBN 96
+#define	FRF_AA_TX_Q_CHKSM_DIS_96_127_WIDTH 32
+#define	FRF_AA_TX_Q_CHKSM_DIS_64_95_LBN 64
+#define	FRF_AA_TX_Q_CHKSM_DIS_64_95_WIDTH 32
+#define	FRF_AA_TX_Q_CHKSM_DIS_32_63_LBN 32
+#define	FRF_AA_TX_Q_CHKSM_DIS_32_63_WIDTH 32
+#define	FRF_AA_TX_Q_CHKSM_DIS_0_31_LBN 0
+#define	FRF_AA_TX_Q_CHKSM_DIS_0_31_WIDTH 32
+
+/* TX_CFG_REG: Transmit configuration register */
+#define	FR_AZ_TX_CFG 0x00000a50
+#define	FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_LBN 114
+#define	FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_WIDTH 8
+#define	FRF_CZ_TX_FILTER_TEST_MODE_BIT_LBN 113
+#define	FRF_CZ_TX_FILTER_TEST_MODE_BIT_WIDTH 1
+#define	FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_LBN 105
+#define	FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define	FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_LBN 97
+#define	FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define	FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_LBN 89
+#define	FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define	FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_LBN 81
+#define	FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define	FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_LBN 73
+#define	FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define	FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_LBN 65
+#define	FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define	FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_LBN 64
+#define	FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_WIDTH 1
+#define	FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_LBN 48
+#define	FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_WIDTH 16
+#define	FRF_CZ_TX_FILTER_EN_BIT_LBN 47
+#define	FRF_CZ_TX_FILTER_EN_BIT_WIDTH 1
+#define	FRF_AZ_TX_IP_ID_P0_OFS_LBN 16
+#define	FRF_AZ_TX_IP_ID_P0_OFS_WIDTH 15
+#define	FRF_AZ_TX_NO_EOP_DISC_EN_LBN 5
+#define	FRF_AZ_TX_NO_EOP_DISC_EN_WIDTH 1
+#define	FRF_AZ_TX_P1_PRI_EN_LBN 4
+#define	FRF_AZ_TX_P1_PRI_EN_WIDTH 1
+#define	FRF_AZ_TX_OWNERR_CTL_LBN 2
+#define	FRF_AZ_TX_OWNERR_CTL_WIDTH 1
+#define	FRF_AA_TX_NON_IP_DROP_DIS_LBN 1
+#define	FRF_AA_TX_NON_IP_DROP_DIS_WIDTH 1
+#define	FRF_AZ_TX_IP_ID_REP_EN_LBN 0
+#define	FRF_AZ_TX_IP_ID_REP_EN_WIDTH 1
+
+/* TX_PUSH_DROP_REG: Transmit push dropped register */
+#define	FR_AZ_TX_PUSH_DROP 0x00000a60
+#define	FRF_AZ_TX_PUSH_DROP_CNT_LBN 0
+#define	FRF_AZ_TX_PUSH_DROP_CNT_WIDTH 32
+
+/* TX_RESERVED_REG: Transmit configuration register */
+#define	FR_AZ_TX_RESERVED 0x00000a80
+#define	FRF_AZ_TX_EVT_CNT_LBN 121
+#define	FRF_AZ_TX_EVT_CNT_WIDTH 7
+#define	FRF_AZ_TX_PREF_AGE_CNT_LBN 119
+#define	FRF_AZ_TX_PREF_AGE_CNT_WIDTH 2
+#define	FRF_AZ_TX_RD_COMP_TMR_LBN 96
+#define	FRF_AZ_TX_RD_COMP_TMR_WIDTH 23
+#define	FRF_AZ_TX_PUSH_EN_LBN 89
+#define	FRF_AZ_TX_PUSH_EN_WIDTH 1
+#define	FRF_AZ_TX_PUSH_CHK_DIS_LBN 88
+#define	FRF_AZ_TX_PUSH_CHK_DIS_WIDTH 1
+#define	FRF_AZ_TX_D_FF_FULL_P0_LBN 85
+#define	FRF_AZ_TX_D_FF_FULL_P0_WIDTH 1
+#define	FRF_AZ_TX_DMAR_ST_P0_LBN 81
+#define	FRF_AZ_TX_DMAR_ST_P0_WIDTH 1
+#define	FRF_AZ_TX_DMAQ_ST_LBN 78
+#define	FRF_AZ_TX_DMAQ_ST_WIDTH 1
+#define	FRF_AZ_TX_RX_SPACER_LBN 64
+#define	FRF_AZ_TX_RX_SPACER_WIDTH 8
+#define	FRF_AZ_TX_DROP_ABORT_EN_LBN 60
+#define	FRF_AZ_TX_DROP_ABORT_EN_WIDTH 1
+#define	FRF_AZ_TX_SOFT_EVT_EN_LBN 59
+#define	FRF_AZ_TX_SOFT_EVT_EN_WIDTH 1
+#define	FRF_AZ_TX_PS_EVT_DIS_LBN 58
+#define	FRF_AZ_TX_PS_EVT_DIS_WIDTH 1
+#define	FRF_AZ_TX_RX_SPACER_EN_LBN 57
+#define	FRF_AZ_TX_RX_SPACER_EN_WIDTH 1
+#define	FRF_AZ_TX_XP_TIMER_LBN 52
+#define	FRF_AZ_TX_XP_TIMER_WIDTH 5
+#define	FRF_AZ_TX_PREF_SPACER_LBN 44
+#define	FRF_AZ_TX_PREF_SPACER_WIDTH 8
+#define	FRF_AZ_TX_PREF_WD_TMR_LBN 22
+#define	FRF_AZ_TX_PREF_WD_TMR_WIDTH 22
+#define	FRF_AZ_TX_ONLY1TAG_LBN 21
+#define	FRF_AZ_TX_ONLY1TAG_WIDTH 1
+#define	FRF_AZ_TX_PREF_THRESHOLD_LBN 19
+#define	FRF_AZ_TX_PREF_THRESHOLD_WIDTH 2
+#define	FRF_AZ_TX_ONE_PKT_PER_Q_LBN 18
+#define	FRF_AZ_TX_ONE_PKT_PER_Q_WIDTH 1
+#define	FRF_AZ_TX_DIS_NON_IP_EV_LBN 17
+#define	FRF_AZ_TX_DIS_NON_IP_EV_WIDTH 1
+#define	FRF_AA_TX_DMA_FF_THR_LBN 16
+#define	FRF_AA_TX_DMA_FF_THR_WIDTH 1
+#define	FRF_AZ_TX_DMA_SPACER_LBN 8
+#define	FRF_AZ_TX_DMA_SPACER_WIDTH 8
+#define	FRF_AA_TX_TCP_DIS_LBN 7
+#define	FRF_AA_TX_TCP_DIS_WIDTH 1
+#define	FRF_BZ_TX_FLUSH_MIN_LEN_EN_LBN 7
+#define	FRF_BZ_TX_FLUSH_MIN_LEN_EN_WIDTH 1
+#define	FRF_AA_TX_IP_DIS_LBN 6
+#define	FRF_AA_TX_IP_DIS_WIDTH 1
+#define	FRF_AZ_TX_MAX_CPL_LBN 2
+#define	FRF_AZ_TX_MAX_CPL_WIDTH 2
+#define	FFE_AZ_TX_MAX_CPL_16 3
+#define	FFE_AZ_TX_MAX_CPL_8 2
+#define	FFE_AZ_TX_MAX_CPL_4 1
+#define	FFE_AZ_TX_MAX_CPL_NOLIMIT 0
+#define	FRF_AZ_TX_MAX_PREF_LBN 0
+#define	FRF_AZ_TX_MAX_PREF_WIDTH 2
+#define	FFE_AZ_TX_MAX_PREF_32 3
+#define	FFE_AZ_TX_MAX_PREF_16 2
+#define	FFE_AZ_TX_MAX_PREF_8 1
+#define	FFE_AZ_TX_MAX_PREF_OFF 0
+
+/* TX_PACE_REG: Transmit pace control register */
+#define	FR_BZ_TX_PACE 0x00000a90
+#define	FRF_BZ_TX_PACE_SB_NOT_AF_LBN 19
+#define	FRF_BZ_TX_PACE_SB_NOT_AF_WIDTH 10
+#define	FRF_BZ_TX_PACE_SB_AF_LBN 9
+#define	FRF_BZ_TX_PACE_SB_AF_WIDTH 10
+#define	FRF_BZ_TX_PACE_FB_BASE_LBN 5
+#define	FRF_BZ_TX_PACE_FB_BASE_WIDTH 4
+#define	FRF_BZ_TX_PACE_BIN_TH_LBN 0
+#define	FRF_BZ_TX_PACE_BIN_TH_WIDTH 5
+
+/* TX_PACE_DROP_QID_REG: PACE Drop QID Counter */
+#define	FR_BZ_TX_PACE_DROP_QID 0x00000aa0
+#define	FRF_BZ_TX_PACE_QID_DRP_CNT_LBN 0
+#define	FRF_BZ_TX_PACE_QID_DRP_CNT_WIDTH 16
+
+/* TX_VLAN_REG: Transmit VLAN tag register */
+#define	FR_BB_TX_VLAN 0x00000ae0
+#define	FRF_BB_TX_VLAN_EN_LBN 127
+#define	FRF_BB_TX_VLAN_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN7_PORT1_EN_LBN 125
+#define	FRF_BB_TX_VLAN7_PORT1_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN7_PORT0_EN_LBN 124
+#define	FRF_BB_TX_VLAN7_PORT0_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN7_LBN 112
+#define	FRF_BB_TX_VLAN7_WIDTH 12
+#define	FRF_BB_TX_VLAN6_PORT1_EN_LBN 109
+#define	FRF_BB_TX_VLAN6_PORT1_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN6_PORT0_EN_LBN 108
+#define	FRF_BB_TX_VLAN6_PORT0_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN6_LBN 96
+#define	FRF_BB_TX_VLAN6_WIDTH 12
+#define	FRF_BB_TX_VLAN5_PORT1_EN_LBN 93
+#define	FRF_BB_TX_VLAN5_PORT1_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN5_PORT0_EN_LBN 92
+#define	FRF_BB_TX_VLAN5_PORT0_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN5_LBN 80
+#define	FRF_BB_TX_VLAN5_WIDTH 12
+#define	FRF_BB_TX_VLAN4_PORT1_EN_LBN 77
+#define	FRF_BB_TX_VLAN4_PORT1_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN4_PORT0_EN_LBN 76
+#define	FRF_BB_TX_VLAN4_PORT0_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN4_LBN 64
+#define	FRF_BB_TX_VLAN4_WIDTH 12
+#define	FRF_BB_TX_VLAN3_PORT1_EN_LBN 61
+#define	FRF_BB_TX_VLAN3_PORT1_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN3_PORT0_EN_LBN 60
+#define	FRF_BB_TX_VLAN3_PORT0_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN3_LBN 48
+#define	FRF_BB_TX_VLAN3_WIDTH 12
+#define	FRF_BB_TX_VLAN2_PORT1_EN_LBN 45
+#define	FRF_BB_TX_VLAN2_PORT1_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN2_PORT0_EN_LBN 44
+#define	FRF_BB_TX_VLAN2_PORT0_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN2_LBN 32
+#define	FRF_BB_TX_VLAN2_WIDTH 12
+#define	FRF_BB_TX_VLAN1_PORT1_EN_LBN 29
+#define	FRF_BB_TX_VLAN1_PORT1_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN1_PORT0_EN_LBN 28
+#define	FRF_BB_TX_VLAN1_PORT0_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN1_LBN 16
+#define	FRF_BB_TX_VLAN1_WIDTH 12
+#define	FRF_BB_TX_VLAN0_PORT1_EN_LBN 13
+#define	FRF_BB_TX_VLAN0_PORT1_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN0_PORT0_EN_LBN 12
+#define	FRF_BB_TX_VLAN0_PORT0_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN0_LBN 0
+#define	FRF_BB_TX_VLAN0_WIDTH 12
+
+/* TX_IPFIL_PORTEN_REG: Transmit filter control register */
+#define	FR_BZ_TX_IPFIL_PORTEN 0x00000af0
+#define	FRF_BZ_TX_MADR0_FIL_EN_LBN 64
+#define	FRF_BZ_TX_MADR0_FIL_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL31_PORT_EN_LBN 62
+#define	FRF_BB_TX_IPFIL31_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL30_PORT_EN_LBN 60
+#define	FRF_BB_TX_IPFIL30_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL29_PORT_EN_LBN 58
+#define	FRF_BB_TX_IPFIL29_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL28_PORT_EN_LBN 56
+#define	FRF_BB_TX_IPFIL28_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL27_PORT_EN_LBN 54
+#define	FRF_BB_TX_IPFIL27_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL26_PORT_EN_LBN 52
+#define	FRF_BB_TX_IPFIL26_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL25_PORT_EN_LBN 50
+#define	FRF_BB_TX_IPFIL25_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL24_PORT_EN_LBN 48
+#define	FRF_BB_TX_IPFIL24_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL23_PORT_EN_LBN 46
+#define	FRF_BB_TX_IPFIL23_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL22_PORT_EN_LBN 44
+#define	FRF_BB_TX_IPFIL22_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL21_PORT_EN_LBN 42
+#define	FRF_BB_TX_IPFIL21_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL20_PORT_EN_LBN 40
+#define	FRF_BB_TX_IPFIL20_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL19_PORT_EN_LBN 38
+#define	FRF_BB_TX_IPFIL19_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL18_PORT_EN_LBN 36
+#define	FRF_BB_TX_IPFIL18_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL17_PORT_EN_LBN 34
+#define	FRF_BB_TX_IPFIL17_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL16_PORT_EN_LBN 32
+#define	FRF_BB_TX_IPFIL16_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL15_PORT_EN_LBN 30
+#define	FRF_BB_TX_IPFIL15_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL14_PORT_EN_LBN 28
+#define	FRF_BB_TX_IPFIL14_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL13_PORT_EN_LBN 26
+#define	FRF_BB_TX_IPFIL13_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL12_PORT_EN_LBN 24
+#define	FRF_BB_TX_IPFIL12_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL11_PORT_EN_LBN 22
+#define	FRF_BB_TX_IPFIL11_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL10_PORT_EN_LBN 20
+#define	FRF_BB_TX_IPFIL10_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL9_PORT_EN_LBN 18
+#define	FRF_BB_TX_IPFIL9_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL8_PORT_EN_LBN 16
+#define	FRF_BB_TX_IPFIL8_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL7_PORT_EN_LBN 14
+#define	FRF_BB_TX_IPFIL7_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL6_PORT_EN_LBN 12
+#define	FRF_BB_TX_IPFIL6_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL5_PORT_EN_LBN 10
+#define	FRF_BB_TX_IPFIL5_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL4_PORT_EN_LBN 8
+#define	FRF_BB_TX_IPFIL4_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL3_PORT_EN_LBN 6
+#define	FRF_BB_TX_IPFIL3_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL2_PORT_EN_LBN 4
+#define	FRF_BB_TX_IPFIL2_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL1_PORT_EN_LBN 2
+#define	FRF_BB_TX_IPFIL1_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL0_PORT_EN_LBN 0
+#define	FRF_BB_TX_IPFIL0_PORT_EN_WIDTH 1
+
+/* TX_IPFIL_TBL: Transmit IP source address filter table */
+#define	FR_BB_TX_IPFIL_TBL 0x00000b00
+#define	FR_BB_TX_IPFIL_TBL_STEP 16
+#define	FR_BB_TX_IPFIL_TBL_ROWS 16
+#define	FRF_BB_TX_IPFIL_MASK_1_LBN 96
+#define	FRF_BB_TX_IPFIL_MASK_1_WIDTH 32
+#define	FRF_BB_TX_IP_SRC_ADR_1_LBN 64
+#define	FRF_BB_TX_IP_SRC_ADR_1_WIDTH 32
+#define	FRF_BB_TX_IPFIL_MASK_0_LBN 32
+#define	FRF_BB_TX_IPFIL_MASK_0_WIDTH 32
+#define	FRF_BB_TX_IP_SRC_ADR_0_LBN 0
+#define	FRF_BB_TX_IP_SRC_ADR_0_WIDTH 32
+
+/* MD_TXD_REG: PHY management transmit data register */
+#define	FR_AB_MD_TXD 0x00000c00
+#define	FRF_AB_MD_TXD_LBN 0
+#define	FRF_AB_MD_TXD_WIDTH 16
+
+/* MD_RXD_REG: PHY management receive data register */
+#define	FR_AB_MD_RXD 0x00000c10
+#define	FRF_AB_MD_RXD_LBN 0
+#define	FRF_AB_MD_RXD_WIDTH 16
+
+/* MD_CS_REG: PHY management configuration & status register */
+#define	FR_AB_MD_CS 0x00000c20
+#define	FRF_AB_MD_RD_EN_CMD_LBN 15
+#define	FRF_AB_MD_RD_EN_CMD_WIDTH 1
+#define	FRF_AB_MD_WR_EN_CMD_LBN 14
+#define	FRF_AB_MD_WR_EN_CMD_WIDTH 1
+#define	FRF_AB_MD_ADDR_CMD_LBN 13
+#define	FRF_AB_MD_ADDR_CMD_WIDTH 1
+#define	FRF_AB_MD_PT_LBN 7
+#define	FRF_AB_MD_PT_WIDTH 3
+#define	FRF_AB_MD_PL_LBN 6
+#define	FRF_AB_MD_PL_WIDTH 1
+#define	FRF_AB_MD_INT_CLR_LBN 5
+#define	FRF_AB_MD_INT_CLR_WIDTH 1
+#define	FRF_AB_MD_GC_LBN 4
+#define	FRF_AB_MD_GC_WIDTH 1
+#define	FRF_AB_MD_PRSP_LBN 3
+#define	FRF_AB_MD_PRSP_WIDTH 1
+#define	FRF_AB_MD_RIC_LBN 2
+#define	FRF_AB_MD_RIC_WIDTH 1
+#define	FRF_AB_MD_RDC_LBN 1
+#define	FRF_AB_MD_RDC_WIDTH 1
+#define	FRF_AB_MD_WRC_LBN 0
+#define	FRF_AB_MD_WRC_WIDTH 1
+
+/* MD_PHY_ADR_REG: PHY management PHY address register */
+#define	FR_AB_MD_PHY_ADR 0x00000c30
+#define	FRF_AB_MD_PHY_ADR_LBN 0
+#define	FRF_AB_MD_PHY_ADR_WIDTH 16
+
+/* MD_ID_REG: PHY management ID register */
+#define	FR_AB_MD_ID 0x00000c40
+#define	FRF_AB_MD_PRT_ADR_LBN 11
+#define	FRF_AB_MD_PRT_ADR_WIDTH 5
+#define	FRF_AB_MD_DEV_ADR_LBN 6
+#define	FRF_AB_MD_DEV_ADR_WIDTH 5
+
+/* MD_STAT_REG: PHY management status & mask register */
+#define	FR_AB_MD_STAT 0x00000c50
+#define	FRF_AB_MD_PINT_LBN 4
+#define	FRF_AB_MD_PINT_WIDTH 1
+#define	FRF_AB_MD_DONE_LBN 3
+#define	FRF_AB_MD_DONE_WIDTH 1
+#define	FRF_AB_MD_BSERR_LBN 2
+#define	FRF_AB_MD_BSERR_WIDTH 1
+#define	FRF_AB_MD_LNFL_LBN 1
+#define	FRF_AB_MD_LNFL_WIDTH 1
+#define	FRF_AB_MD_BSY_LBN 0
+#define	FRF_AB_MD_BSY_WIDTH 1
+
+/* MAC_STAT_DMA_REG: Port MAC statistical counter DMA register */
+#define	FR_AB_MAC_STAT_DMA 0x00000c60
+#define	FRF_AB_MAC_STAT_DMA_CMD_LBN 48
+#define	FRF_AB_MAC_STAT_DMA_CMD_WIDTH 1
+#define	FRF_AB_MAC_STAT_DMA_ADR_LBN 0
+#define	FRF_AB_MAC_STAT_DMA_ADR_WIDTH 48
+
+/* MAC_CTRL_REG: Port MAC control register */
+#define	FR_AB_MAC_CTRL 0x00000c80
+#define	FRF_AB_MAC_XOFF_VAL_LBN 16
+#define	FRF_AB_MAC_XOFF_VAL_WIDTH 16
+#define	FRF_BB_TXFIFO_DRAIN_EN_LBN 7
+#define	FRF_BB_TXFIFO_DRAIN_EN_WIDTH 1
+#define	FRF_AB_MAC_XG_DISTXCRC_LBN 5
+#define	FRF_AB_MAC_XG_DISTXCRC_WIDTH 1
+#define	FRF_AB_MAC_BCAD_ACPT_LBN 4
+#define	FRF_AB_MAC_BCAD_ACPT_WIDTH 1
+#define	FRF_AB_MAC_UC_PROM_LBN 3
+#define	FRF_AB_MAC_UC_PROM_WIDTH 1
+#define	FRF_AB_MAC_LINK_STATUS_LBN 2
+#define	FRF_AB_MAC_LINK_STATUS_WIDTH 1
+#define	FRF_AB_MAC_SPEED_LBN 0
+#define	FRF_AB_MAC_SPEED_WIDTH 2
+#define	FFE_AB_MAC_SPEED_10G 3
+#define	FFE_AB_MAC_SPEED_1G 2
+#define	FFE_AB_MAC_SPEED_100M 1
+#define	FFE_AB_MAC_SPEED_10M 0
+
+/* GEN_MODE_REG: General Purpose mode register (external interrupt mask) */
+#define	FR_BB_GEN_MODE 0x00000c90
+#define	FRF_BB_XFP_PHY_INT_POL_SEL_LBN 3
+#define	FRF_BB_XFP_PHY_INT_POL_SEL_WIDTH 1
+#define	FRF_BB_XG_PHY_INT_POL_SEL_LBN 2
+#define	FRF_BB_XG_PHY_INT_POL_SEL_WIDTH 1
+#define	FRF_BB_XFP_PHY_INT_MASK_LBN 1
+#define	FRF_BB_XFP_PHY_INT_MASK_WIDTH 1
+#define	FRF_BB_XG_PHY_INT_MASK_LBN 0
+#define	FRF_BB_XG_PHY_INT_MASK_WIDTH 1
+
+/* MAC_MC_HASH_REG0: Multicast address hash table */
+#define	FR_AB_MAC_MC_HASH_REG0 0x00000ca0
+#define	FRF_AB_MAC_MCAST_HASH0_LBN 0
+#define	FRF_AB_MAC_MCAST_HASH0_WIDTH 128
+
+/* MAC_MC_HASH_REG1: Multicast address hash table */
+#define	FR_AB_MAC_MC_HASH_REG1 0x00000cb0
+#define	FRF_AB_MAC_MCAST_HASH1_LBN 0
+#define	FRF_AB_MAC_MCAST_HASH1_WIDTH 128
+
+/* GM_CFG1_REG: GMAC configuration register 1 */
+#define	FR_AB_GM_CFG1 0x00000e00
+#define	FRF_AB_GM_SW_RST_LBN 31
+#define	FRF_AB_GM_SW_RST_WIDTH 1
+#define	FRF_AB_GM_SIM_RST_LBN 30
+#define	FRF_AB_GM_SIM_RST_WIDTH 1
+#define	FRF_AB_GM_RST_RX_MAC_CTL_LBN 19
+#define	FRF_AB_GM_RST_RX_MAC_CTL_WIDTH 1
+#define	FRF_AB_GM_RST_TX_MAC_CTL_LBN 18
+#define	FRF_AB_GM_RST_TX_MAC_CTL_WIDTH 1
+#define	FRF_AB_GM_RST_RX_FUNC_LBN 17
+#define	FRF_AB_GM_RST_RX_FUNC_WIDTH 1
+#define	FRF_AB_GM_RST_TX_FUNC_LBN 16
+#define	FRF_AB_GM_RST_TX_FUNC_WIDTH 1
+#define	FRF_AB_GM_LOOP_LBN 8
+#define	FRF_AB_GM_LOOP_WIDTH 1
+#define	FRF_AB_GM_RX_FC_EN_LBN 5
+#define	FRF_AB_GM_RX_FC_EN_WIDTH 1
+#define	FRF_AB_GM_TX_FC_EN_LBN 4
+#define	FRF_AB_GM_TX_FC_EN_WIDTH 1
+#define	FRF_AB_GM_SYNC_RXEN_LBN 3
+#define	FRF_AB_GM_SYNC_RXEN_WIDTH 1
+#define	FRF_AB_GM_RX_EN_LBN 2
+#define	FRF_AB_GM_RX_EN_WIDTH 1
+#define	FRF_AB_GM_SYNC_TXEN_LBN 1
+#define	FRF_AB_GM_SYNC_TXEN_WIDTH 1
+#define	FRF_AB_GM_TX_EN_LBN 0
+#define	FRF_AB_GM_TX_EN_WIDTH 1
+
+/* GM_CFG2_REG: GMAC configuration register 2 */
+#define	FR_AB_GM_CFG2 0x00000e10
+#define	FRF_AB_GM_PAMBL_LEN_LBN 12
+#define	FRF_AB_GM_PAMBL_LEN_WIDTH 4
+#define	FRF_AB_GM_IF_MODE_LBN 8
+#define	FRF_AB_GM_IF_MODE_WIDTH 2
+#define	FFE_AB_IF_MODE_BYTE_MODE 2
+#define	FFE_AB_IF_MODE_NIBBLE_MODE 1
+#define	FRF_AB_GM_HUGE_FRM_EN_LBN 5
+#define	FRF_AB_GM_HUGE_FRM_EN_WIDTH 1
+#define	FRF_AB_GM_LEN_CHK_LBN 4
+#define	FRF_AB_GM_LEN_CHK_WIDTH 1
+#define	FRF_AB_GM_PAD_CRC_EN_LBN 2
+#define	FRF_AB_GM_PAD_CRC_EN_WIDTH 1
+#define	FRF_AB_GM_CRC_EN_LBN 1
+#define	FRF_AB_GM_CRC_EN_WIDTH 1
+#define	FRF_AB_GM_FD_LBN 0
+#define	FRF_AB_GM_FD_WIDTH 1
+
+/* GM_IPG_REG: GMAC IPG register */
+#define	FR_AB_GM_IPG 0x00000e20
+#define	FRF_AB_GM_NONB2B_IPG1_LBN 24
+#define	FRF_AB_GM_NONB2B_IPG1_WIDTH 7
+#define	FRF_AB_GM_NONB2B_IPG2_LBN 16
+#define	FRF_AB_GM_NONB2B_IPG2_WIDTH 7
+#define	FRF_AB_GM_MIN_IPG_ENF_LBN 8
+#define	FRF_AB_GM_MIN_IPG_ENF_WIDTH 8
+#define	FRF_AB_GM_B2B_IPG_LBN 0
+#define	FRF_AB_GM_B2B_IPG_WIDTH 7
+
+/* GM_HD_REG: GMAC half duplex register */
+#define	FR_AB_GM_HD 0x00000e30
+#define	FRF_AB_GM_ALT_BOFF_VAL_LBN 20
+#define	FRF_AB_GM_ALT_BOFF_VAL_WIDTH 4
+#define	FRF_AB_GM_ALT_BOFF_EN_LBN 19
+#define	FRF_AB_GM_ALT_BOFF_EN_WIDTH 1
+#define	FRF_AB_GM_BP_NO_BOFF_LBN 18
+#define	FRF_AB_GM_BP_NO_BOFF_WIDTH 1
+#define	FRF_AB_GM_DIS_BOFF_LBN 17
+#define	FRF_AB_GM_DIS_BOFF_WIDTH 1
+#define	FRF_AB_GM_EXDEF_TX_EN_LBN 16
+#define	FRF_AB_GM_EXDEF_TX_EN_WIDTH 1
+#define	FRF_AB_GM_RTRY_LIMIT_LBN 12
+#define	FRF_AB_GM_RTRY_LIMIT_WIDTH 4
+#define	FRF_AB_GM_COL_WIN_LBN 0
+#define	FRF_AB_GM_COL_WIN_WIDTH 10
+
+/* GM_MAX_FLEN_REG: GMAC maximum frame length register */
+#define	FR_AB_GM_MAX_FLEN 0x00000e40
+#define	FRF_AB_GM_MAX_FLEN_LBN 0
+#define	FRF_AB_GM_MAX_FLEN_WIDTH 16
+
+/* GM_TEST_REG: GMAC test register */
+#define	FR_AB_GM_TEST 0x00000e70
+#define	FRF_AB_GM_MAX_BOFF_LBN 3
+#define	FRF_AB_GM_MAX_BOFF_WIDTH 1
+#define	FRF_AB_GM_REG_TX_FLOW_EN_LBN 2
+#define	FRF_AB_GM_REG_TX_FLOW_EN_WIDTH 1
+#define	FRF_AB_GM_TEST_PAUSE_LBN 1
+#define	FRF_AB_GM_TEST_PAUSE_WIDTH 1
+#define	FRF_AB_GM_SHORT_SLOT_LBN 0
+#define	FRF_AB_GM_SHORT_SLOT_WIDTH 1
+
+/* GM_ADR1_REG: GMAC station address register 1 */
+#define	FR_AB_GM_ADR1 0x00000f00
+#define	FRF_AB_GM_ADR_B0_LBN 24
+#define	FRF_AB_GM_ADR_B0_WIDTH 8
+#define	FRF_AB_GM_ADR_B1_LBN 16
+#define	FRF_AB_GM_ADR_B1_WIDTH 8
+#define	FRF_AB_GM_ADR_B2_LBN 8
+#define	FRF_AB_GM_ADR_B2_WIDTH 8
+#define	FRF_AB_GM_ADR_B3_LBN 0
+#define	FRF_AB_GM_ADR_B3_WIDTH 8
+
+/* GM_ADR2_REG: GMAC station address register 2 */
+#define	FR_AB_GM_ADR2 0x00000f10
+#define	FRF_AB_GM_ADR_B4_LBN 24
+#define	FRF_AB_GM_ADR_B4_WIDTH 8
+#define	FRF_AB_GM_ADR_B5_LBN 16
+#define	FRF_AB_GM_ADR_B5_WIDTH 8
+
+/* GMF_CFG0_REG: GMAC FIFO configuration register 0 */
+#define	FR_AB_GMF_CFG0 0x00000f20
+#define	FRF_AB_GMF_FTFENRPLY_LBN 20
+#define	FRF_AB_GMF_FTFENRPLY_WIDTH 1
+#define	FRF_AB_GMF_STFENRPLY_LBN 19
+#define	FRF_AB_GMF_STFENRPLY_WIDTH 1
+#define	FRF_AB_GMF_FRFENRPLY_LBN 18
+#define	FRF_AB_GMF_FRFENRPLY_WIDTH 1
+#define	FRF_AB_GMF_SRFENRPLY_LBN 17
+#define	FRF_AB_GMF_SRFENRPLY_WIDTH 1
+#define	FRF_AB_GMF_WTMENRPLY_LBN 16
+#define	FRF_AB_GMF_WTMENRPLY_WIDTH 1
+#define	FRF_AB_GMF_FTFENREQ_LBN 12
+#define	FRF_AB_GMF_FTFENREQ_WIDTH 1
+#define	FRF_AB_GMF_STFENREQ_LBN 11
+#define	FRF_AB_GMF_STFENREQ_WIDTH 1
+#define	FRF_AB_GMF_FRFENREQ_LBN 10
+#define	FRF_AB_GMF_FRFENREQ_WIDTH 1
+#define	FRF_AB_GMF_SRFENREQ_LBN 9
+#define	FRF_AB_GMF_SRFENREQ_WIDTH 1
+#define	FRF_AB_GMF_WTMENREQ_LBN 8
+#define	FRF_AB_GMF_WTMENREQ_WIDTH 1
+#define	FRF_AB_GMF_HSTRSTFT_LBN 4
+#define	FRF_AB_GMF_HSTRSTFT_WIDTH 1
+#define	FRF_AB_GMF_HSTRSTST_LBN 3
+#define	FRF_AB_GMF_HSTRSTST_WIDTH 1
+#define	FRF_AB_GMF_HSTRSTFR_LBN 2
+#define	FRF_AB_GMF_HSTRSTFR_WIDTH 1
+#define	FRF_AB_GMF_HSTRSTSR_LBN 1
+#define	FRF_AB_GMF_HSTRSTSR_WIDTH 1
+#define	FRF_AB_GMF_HSTRSTWT_LBN 0
+#define	FRF_AB_GMF_HSTRSTWT_WIDTH 1
+
+/* GMF_CFG1_REG: GMAC FIFO configuration register 1 */
+#define	FR_AB_GMF_CFG1 0x00000f30
+#define	FRF_AB_GMF_CFGFRTH_LBN 16
+#define	FRF_AB_GMF_CFGFRTH_WIDTH 5
+#define	FRF_AB_GMF_CFGXOFFRTX_LBN 0
+#define	FRF_AB_GMF_CFGXOFFRTX_WIDTH 16
+
+/* GMF_CFG2_REG: GMAC FIFO configuration register 2 */
+#define	FR_AB_GMF_CFG2 0x00000f40
+#define	FRF_AB_GMF_CFGHWM_LBN 16
+#define	FRF_AB_GMF_CFGHWM_WIDTH 6
+#define	FRF_AB_GMF_CFGLWM_LBN 0
+#define	FRF_AB_GMF_CFGLWM_WIDTH 6
+
+/* GMF_CFG3_REG: GMAC FIFO configuration register 3 */
+#define	FR_AB_GMF_CFG3 0x00000f50
+#define	FRF_AB_GMF_CFGHWMFT_LBN 16
+#define	FRF_AB_GMF_CFGHWMFT_WIDTH 6
+#define	FRF_AB_GMF_CFGFTTH_LBN 0
+#define	FRF_AB_GMF_CFGFTTH_WIDTH 6
+
+/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */
+#define	FR_AB_GMF_CFG4 0x00000f60
+#define	FRF_AB_GMF_HSTFLTRFRM_LBN 0
+#define	FRF_AB_GMF_HSTFLTRFRM_WIDTH 18
+
+/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */
+#define	FR_AB_GMF_CFG5 0x00000f70
+#define	FRF_AB_GMF_CFGHDPLX_LBN 22
+#define	FRF_AB_GMF_CFGHDPLX_WIDTH 1
+#define	FRF_AB_GMF_SRFULL_LBN 21
+#define	FRF_AB_GMF_SRFULL_WIDTH 1
+#define	FRF_AB_GMF_HSTSRFULLCLR_LBN 20
+#define	FRF_AB_GMF_HSTSRFULLCLR_WIDTH 1
+#define	FRF_AB_GMF_CFGBYTMODE_LBN 19
+#define	FRF_AB_GMF_CFGBYTMODE_WIDTH 1
+#define	FRF_AB_GMF_HSTDRPLT64_LBN 18
+#define	FRF_AB_GMF_HSTDRPLT64_WIDTH 1
+#define	FRF_AB_GMF_HSTFLTRFRMDC_LBN 0
+#define	FRF_AB_GMF_HSTFLTRFRMDC_WIDTH 18
+
+/* TX_SRC_MAC_TBL: Transmit IP source address filter table */
+#define	FR_BB_TX_SRC_MAC_TBL 0x00001000
+#define	FR_BB_TX_SRC_MAC_TBL_STEP 16
+#define	FR_BB_TX_SRC_MAC_TBL_ROWS 16
+#define	FRF_BB_TX_SRC_MAC_ADR_1_LBN 64
+#define	FRF_BB_TX_SRC_MAC_ADR_1_WIDTH 48
+#define	FRF_BB_TX_SRC_MAC_ADR_0_LBN 0
+#define	FRF_BB_TX_SRC_MAC_ADR_0_WIDTH 48
+
+/* TX_SRC_MAC_CTL_REG: Transmit MAC source address filter control */
+#define	FR_BB_TX_SRC_MAC_CTL 0x00001100
+#define	FRF_BB_TX_SRC_DROP_CTR_LBN 16
+#define	FRF_BB_TX_SRC_DROP_CTR_WIDTH 16
+#define	FRF_BB_TX_SRC_FLTR_EN_LBN 15
+#define	FRF_BB_TX_SRC_FLTR_EN_WIDTH 1
+#define	FRF_BB_TX_DROP_CTR_CLR_LBN 12
+#define	FRF_BB_TX_DROP_CTR_CLR_WIDTH 1
+#define	FRF_BB_TX_MAC_QID_SEL_LBN 0
+#define	FRF_BB_TX_MAC_QID_SEL_WIDTH 3
+
+/* XM_ADR_LO_REG: XGMAC address register low */
+#define	FR_AB_XM_ADR_LO 0x00001200
+#define	FRF_AB_XM_ADR_LO_LBN 0
+#define	FRF_AB_XM_ADR_LO_WIDTH 32
+
+/* XM_ADR_HI_REG: XGMAC address register high */
+#define	FR_AB_XM_ADR_HI 0x00001210
+#define	FRF_AB_XM_ADR_HI_LBN 0
+#define	FRF_AB_XM_ADR_HI_WIDTH 16
+
+/* XM_GLB_CFG_REG: XGMAC global configuration */
+#define	FR_AB_XM_GLB_CFG 0x00001220
+#define	FRF_AB_XM_RMTFLT_GEN_LBN 17
+#define	FRF_AB_XM_RMTFLT_GEN_WIDTH 1
+#define	FRF_AB_XM_DEBUG_MODE_LBN 16
+#define	FRF_AB_XM_DEBUG_MODE_WIDTH 1
+#define	FRF_AB_XM_RX_STAT_EN_LBN 11
+#define	FRF_AB_XM_RX_STAT_EN_WIDTH 1
+#define	FRF_AB_XM_TX_STAT_EN_LBN 10
+#define	FRF_AB_XM_TX_STAT_EN_WIDTH 1
+#define	FRF_AB_XM_RX_JUMBO_MODE_LBN 6
+#define	FRF_AB_XM_RX_JUMBO_MODE_WIDTH 1
+#define	FRF_AB_XM_WAN_MODE_LBN 5
+#define	FRF_AB_XM_WAN_MODE_WIDTH 1
+#define	FRF_AB_XM_INTCLR_MODE_LBN 3
+#define	FRF_AB_XM_INTCLR_MODE_WIDTH 1
+#define	FRF_AB_XM_CORE_RST_LBN 0
+#define	FRF_AB_XM_CORE_RST_WIDTH 1
+
+/* XM_TX_CFG_REG: XGMAC transmit configuration */
+#define	FR_AB_XM_TX_CFG 0x00001230
+#define	FRF_AB_XM_TX_PROG_LBN 24
+#define	FRF_AB_XM_TX_PROG_WIDTH 1
+#define	FRF_AB_XM_IPG_LBN 16
+#define	FRF_AB_XM_IPG_WIDTH 4
+#define	FRF_AB_XM_FCNTL_LBN 10
+#define	FRF_AB_XM_FCNTL_WIDTH 1
+#define	FRF_AB_XM_TXCRC_LBN 8
+#define	FRF_AB_XM_TXCRC_WIDTH 1
+#define	FRF_AB_XM_EDRC_LBN 6
+#define	FRF_AB_XM_EDRC_WIDTH 1
+#define	FRF_AB_XM_AUTO_PAD_LBN 5
+#define	FRF_AB_XM_AUTO_PAD_WIDTH 1
+#define	FRF_AB_XM_TX_PRMBL_LBN 2
+#define	FRF_AB_XM_TX_PRMBL_WIDTH 1
+#define	FRF_AB_XM_TXEN_LBN 1
+#define	FRF_AB_XM_TXEN_WIDTH 1
+#define	FRF_AB_XM_TX_RST_LBN 0
+#define	FRF_AB_XM_TX_RST_WIDTH 1
+
+/* XM_RX_CFG_REG: XGMAC receive configuration */
+#define	FR_AB_XM_RX_CFG 0x00001240
+#define	FRF_AB_XM_PASS_LENERR_LBN 26
+#define	FRF_AB_XM_PASS_LENERR_WIDTH 1
+#define	FRF_AB_XM_PASS_CRC_ERR_LBN 25
+#define	FRF_AB_XM_PASS_CRC_ERR_WIDTH 1
+#define	FRF_AB_XM_PASS_PRMBLE_ERR_LBN 24
+#define	FRF_AB_XM_PASS_PRMBLE_ERR_WIDTH 1
+#define	FRF_AB_XM_REJ_BCAST_LBN 20
+#define	FRF_AB_XM_REJ_BCAST_WIDTH 1
+#define	FRF_AB_XM_ACPT_ALL_MCAST_LBN 11
+#define	FRF_AB_XM_ACPT_ALL_MCAST_WIDTH 1
+#define	FRF_AB_XM_ACPT_ALL_UCAST_LBN 9
+#define	FRF_AB_XM_ACPT_ALL_UCAST_WIDTH 1
+#define	FRF_AB_XM_AUTO_DEPAD_LBN 8
+#define	FRF_AB_XM_AUTO_DEPAD_WIDTH 1
+#define	FRF_AB_XM_RXCRC_LBN 3
+#define	FRF_AB_XM_RXCRC_WIDTH 1
+#define	FRF_AB_XM_RX_PRMBL_LBN 2
+#define	FRF_AB_XM_RX_PRMBL_WIDTH 1
+#define	FRF_AB_XM_RXEN_LBN 1
+#define	FRF_AB_XM_RXEN_WIDTH 1
+#define	FRF_AB_XM_RX_RST_LBN 0
+#define	FRF_AB_XM_RX_RST_WIDTH 1
+
+/* XM_MGT_INT_MASK: documentation to be written for sum_XM_MGT_INT_MASK */
+#define	FR_AB_XM_MGT_INT_MASK 0x00001250
+#define	FRF_AB_XM_MSK_STA_INTR_LBN 16
+#define	FRF_AB_XM_MSK_STA_INTR_WIDTH 1
+#define	FRF_AB_XM_MSK_STAT_CNTR_HF_LBN 9
+#define	FRF_AB_XM_MSK_STAT_CNTR_HF_WIDTH 1
+#define	FRF_AB_XM_MSK_STAT_CNTR_OF_LBN 8
+#define	FRF_AB_XM_MSK_STAT_CNTR_OF_WIDTH 1
+#define	FRF_AB_XM_MSK_PRMBLE_ERR_LBN 2
+#define	FRF_AB_XM_MSK_PRMBLE_ERR_WIDTH 1
+#define	FRF_AB_XM_MSK_RMTFLT_LBN 1
+#define	FRF_AB_XM_MSK_RMTFLT_WIDTH 1
+#define	FRF_AB_XM_MSK_LCLFLT_LBN 0
+#define	FRF_AB_XM_MSK_LCLFLT_WIDTH 1
+
+/* XM_FC_REG: XGMAC flow control register */
+#define	FR_AB_XM_FC 0x00001270
+#define	FRF_AB_XM_PAUSE_TIME_LBN 16
+#define	FRF_AB_XM_PAUSE_TIME_WIDTH 16
+#define	FRF_AB_XM_RX_MAC_STAT_LBN 11
+#define	FRF_AB_XM_RX_MAC_STAT_WIDTH 1
+#define	FRF_AB_XM_TX_MAC_STAT_LBN 10
+#define	FRF_AB_XM_TX_MAC_STAT_WIDTH 1
+#define	FRF_AB_XM_MCNTL_PASS_LBN 8
+#define	FRF_AB_XM_MCNTL_PASS_WIDTH 2
+#define	FRF_AB_XM_REJ_CNTL_UCAST_LBN 6
+#define	FRF_AB_XM_REJ_CNTL_UCAST_WIDTH 1
+#define	FRF_AB_XM_REJ_CNTL_MCAST_LBN 5
+#define	FRF_AB_XM_REJ_CNTL_MCAST_WIDTH 1
+#define	FRF_AB_XM_ZPAUSE_LBN 2
+#define	FRF_AB_XM_ZPAUSE_WIDTH 1
+#define	FRF_AB_XM_XMIT_PAUSE_LBN 1
+#define	FRF_AB_XM_XMIT_PAUSE_WIDTH 1
+#define	FRF_AB_XM_DIS_FCNTL_LBN 0
+#define	FRF_AB_XM_DIS_FCNTL_WIDTH 1
+
+/* XM_PAUSE_TIME_REG: XGMAC pause time register */
+#define	FR_AB_XM_PAUSE_TIME 0x00001290
+#define	FRF_AB_XM_TX_PAUSE_CNT_LBN 16
+#define	FRF_AB_XM_TX_PAUSE_CNT_WIDTH 16
+#define	FRF_AB_XM_RX_PAUSE_CNT_LBN 0
+#define	FRF_AB_XM_RX_PAUSE_CNT_WIDTH 16
+
+/* XM_TX_PARAM_REG: XGMAC transmit parameter register */
+#define	FR_AB_XM_TX_PARAM 0x000012d0
+#define	FRF_AB_XM_TX_JUMBO_MODE_LBN 31
+#define	FRF_AB_XM_TX_JUMBO_MODE_WIDTH 1
+#define	FRF_AB_XM_MAX_TX_FRM_SIZE_HI_LBN 19
+#define	FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH 11
+#define	FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN 16
+#define	FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH 3
+#define	FRF_AB_XM_PAD_CHAR_LBN 0
+#define	FRF_AB_XM_PAD_CHAR_WIDTH 8
+
+/* XM_RX_PARAM_REG: XGMAC receive parameter register */
+#define	FR_AB_XM_RX_PARAM 0x000012e0
+#define	FRF_AB_XM_MAX_RX_FRM_SIZE_HI_LBN 3
+#define	FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH 11
+#define	FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN 0
+#define	FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH 3
+
+/* XM_MGT_INT_MSK_REG: XGMAC management interrupt mask register */
+#define	FR_AB_XM_MGT_INT_MSK 0x000012f0
+#define	FRF_AB_XM_STAT_CNTR_OF_LBN 9
+#define	FRF_AB_XM_STAT_CNTR_OF_WIDTH 1
+#define	FRF_AB_XM_STAT_CNTR_HF_LBN 8
+#define	FRF_AB_XM_STAT_CNTR_HF_WIDTH 1
+#define	FRF_AB_XM_PRMBLE_ERR_LBN 2
+#define	FRF_AB_XM_PRMBLE_ERR_WIDTH 1
+#define	FRF_AB_XM_RMTFLT_LBN 1
+#define	FRF_AB_XM_RMTFLT_WIDTH 1
+#define	FRF_AB_XM_LCLFLT_LBN 0
+#define	FRF_AB_XM_LCLFLT_WIDTH 1
+
+/* XX_PWR_RST_REG: XGXS/XAUI powerdown/reset register */
+#define	FR_AB_XX_PWR_RST 0x00001300
+#define	FRF_AB_XX_PWRDND_SIG_LBN 31
+#define	FRF_AB_XX_PWRDND_SIG_WIDTH 1
+#define	FRF_AB_XX_PWRDNC_SIG_LBN 30
+#define	FRF_AB_XX_PWRDNC_SIG_WIDTH 1
+#define	FRF_AB_XX_PWRDNB_SIG_LBN 29
+#define	FRF_AB_XX_PWRDNB_SIG_WIDTH 1
+#define	FRF_AB_XX_PWRDNA_SIG_LBN 28
+#define	FRF_AB_XX_PWRDNA_SIG_WIDTH 1
+#define	FRF_AB_XX_SIM_MODE_LBN 27
+#define	FRF_AB_XX_SIM_MODE_WIDTH 1
+#define	FRF_AB_XX_RSTPLLCD_SIG_LBN 25
+#define	FRF_AB_XX_RSTPLLCD_SIG_WIDTH 1
+#define	FRF_AB_XX_RSTPLLAB_SIG_LBN 24
+#define	FRF_AB_XX_RSTPLLAB_SIG_WIDTH 1
+#define	FRF_AB_XX_RESETD_SIG_LBN 23
+#define	FRF_AB_XX_RESETD_SIG_WIDTH 1
+#define	FRF_AB_XX_RESETC_SIG_LBN 22
+#define	FRF_AB_XX_RESETC_SIG_WIDTH 1
+#define	FRF_AB_XX_RESETB_SIG_LBN 21
+#define	FRF_AB_XX_RESETB_SIG_WIDTH 1
+#define	FRF_AB_XX_RESETA_SIG_LBN 20
+#define	FRF_AB_XX_RESETA_SIG_WIDTH 1
+#define	FRF_AB_XX_RSTXGXSRX_SIG_LBN 18
+#define	FRF_AB_XX_RSTXGXSRX_SIG_WIDTH 1
+#define	FRF_AB_XX_RSTXGXSTX_SIG_LBN 17
+#define	FRF_AB_XX_RSTXGXSTX_SIG_WIDTH 1
+#define	FRF_AB_XX_SD_RST_ACT_LBN 16
+#define	FRF_AB_XX_SD_RST_ACT_WIDTH 1
+#define	FRF_AB_XX_PWRDND_EN_LBN 15
+#define	FRF_AB_XX_PWRDND_EN_WIDTH 1
+#define	FRF_AB_XX_PWRDNC_EN_LBN 14
+#define	FRF_AB_XX_PWRDNC_EN_WIDTH 1
+#define	FRF_AB_XX_PWRDNB_EN_LBN 13
+#define	FRF_AB_XX_PWRDNB_EN_WIDTH 1
+#define	FRF_AB_XX_PWRDNA_EN_LBN 12
+#define	FRF_AB_XX_PWRDNA_EN_WIDTH 1
+#define	FRF_AB_XX_RSTPLLCD_EN_LBN 9
+#define	FRF_AB_XX_RSTPLLCD_EN_WIDTH 1
+#define	FRF_AB_XX_RSTPLLAB_EN_LBN 8
+#define	FRF_AB_XX_RSTPLLAB_EN_WIDTH 1
+#define	FRF_AB_XX_RESETD_EN_LBN 7
+#define	FRF_AB_XX_RESETD_EN_WIDTH 1
+#define	FRF_AB_XX_RESETC_EN_LBN 6
+#define	FRF_AB_XX_RESETC_EN_WIDTH 1
+#define	FRF_AB_XX_RESETB_EN_LBN 5
+#define	FRF_AB_XX_RESETB_EN_WIDTH 1
+#define	FRF_AB_XX_RESETA_EN_LBN 4
+#define	FRF_AB_XX_RESETA_EN_WIDTH 1
+#define	FRF_AB_XX_RSTXGXSRX_EN_LBN 2
+#define	FRF_AB_XX_RSTXGXSRX_EN_WIDTH 1
+#define	FRF_AB_XX_RSTXGXSTX_EN_LBN 1
+#define	FRF_AB_XX_RSTXGXSTX_EN_WIDTH 1
+#define	FRF_AB_XX_RST_XX_EN_LBN 0
+#define	FRF_AB_XX_RST_XX_EN_WIDTH 1
+
+/* XX_SD_CTL_REG: XGXS/XAUI powerdown/reset control register */
+#define	FR_AB_XX_SD_CTL 0x00001310
+#define	FRF_AB_XX_TERMADJ1_LBN 17
+#define	FRF_AB_XX_TERMADJ1_WIDTH 1
+#define	FRF_AB_XX_TERMADJ0_LBN 16
+#define	FRF_AB_XX_TERMADJ0_WIDTH 1
+#define	FRF_AB_XX_HIDRVD_LBN 15
+#define	FRF_AB_XX_HIDRVD_WIDTH 1
+#define	FRF_AB_XX_LODRVD_LBN 14
+#define	FRF_AB_XX_LODRVD_WIDTH 1
+#define	FRF_AB_XX_HIDRVC_LBN 13
+#define	FRF_AB_XX_HIDRVC_WIDTH 1
+#define	FRF_AB_XX_LODRVC_LBN 12
+#define	FRF_AB_XX_LODRVC_WIDTH 1
+#define	FRF_AB_XX_HIDRVB_LBN 11
+#define	FRF_AB_XX_HIDRVB_WIDTH 1
+#define	FRF_AB_XX_LODRVB_LBN 10
+#define	FRF_AB_XX_LODRVB_WIDTH 1
+#define	FRF_AB_XX_HIDRVA_LBN 9
+#define	FRF_AB_XX_HIDRVA_WIDTH 1
+#define	FRF_AB_XX_LODRVA_LBN 8
+#define	FRF_AB_XX_LODRVA_WIDTH 1
+#define	FRF_AB_XX_LPBKD_LBN 3
+#define	FRF_AB_XX_LPBKD_WIDTH 1
+#define	FRF_AB_XX_LPBKC_LBN 2
+#define	FRF_AB_XX_LPBKC_WIDTH 1
+#define	FRF_AB_XX_LPBKB_LBN 1
+#define	FRF_AB_XX_LPBKB_WIDTH 1
+#define	FRF_AB_XX_LPBKA_LBN 0
+#define	FRF_AB_XX_LPBKA_WIDTH 1
+
+/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */
+#define	FR_AB_XX_TXDRV_CTL 0x00001320
+#define	FRF_AB_XX_DEQD_LBN 28
+#define	FRF_AB_XX_DEQD_WIDTH 4
+#define	FRF_AB_XX_DEQC_LBN 24
+#define	FRF_AB_XX_DEQC_WIDTH 4
+#define	FRF_AB_XX_DEQB_LBN 20
+#define	FRF_AB_XX_DEQB_WIDTH 4
+#define	FRF_AB_XX_DEQA_LBN 16
+#define	FRF_AB_XX_DEQA_WIDTH 4
+#define	FRF_AB_XX_DTXD_LBN 12
+#define	FRF_AB_XX_DTXD_WIDTH 4
+#define	FRF_AB_XX_DTXC_LBN 8
+#define	FRF_AB_XX_DTXC_WIDTH 4
+#define	FRF_AB_XX_DTXB_LBN 4
+#define	FRF_AB_XX_DTXB_WIDTH 4
+#define	FRF_AB_XX_DTXA_LBN 0
+#define	FRF_AB_XX_DTXA_WIDTH 4
+
+/* XX_PRBS_CTL_REG: documentation to be written for sum_XX_PRBS_CTL_REG */
+#define	FR_AB_XX_PRBS_CTL 0x00001330
+#define	FRF_AB_XX_CH3_RX_PRBS_SEL_LBN 30
+#define	FRF_AB_XX_CH3_RX_PRBS_SEL_WIDTH 2
+#define	FRF_AB_XX_CH3_RX_PRBS_INV_LBN 29
+#define	FRF_AB_XX_CH3_RX_PRBS_INV_WIDTH 1
+#define	FRF_AB_XX_CH3_RX_PRBS_CHKEN_LBN 28
+#define	FRF_AB_XX_CH3_RX_PRBS_CHKEN_WIDTH 1
+#define	FRF_AB_XX_CH2_RX_PRBS_SEL_LBN 26
+#define	FRF_AB_XX_CH2_RX_PRBS_SEL_WIDTH 2
+#define	FRF_AB_XX_CH2_RX_PRBS_INV_LBN 25
+#define	FRF_AB_XX_CH2_RX_PRBS_INV_WIDTH 1
+#define	FRF_AB_XX_CH2_RX_PRBS_CHKEN_LBN 24
+#define	FRF_AB_XX_CH2_RX_PRBS_CHKEN_WIDTH 1
+#define	FRF_AB_XX_CH1_RX_PRBS_SEL_LBN 22
+#define	FRF_AB_XX_CH1_RX_PRBS_SEL_WIDTH 2
+#define	FRF_AB_XX_CH1_RX_PRBS_INV_LBN 21
+#define	FRF_AB_XX_CH1_RX_PRBS_INV_WIDTH 1
+#define	FRF_AB_XX_CH1_RX_PRBS_CHKEN_LBN 20
+#define	FRF_AB_XX_CH1_RX_PRBS_CHKEN_WIDTH 1
+#define	FRF_AB_XX_CH0_RX_PRBS_SEL_LBN 18
+#define	FRF_AB_XX_CH0_RX_PRBS_SEL_WIDTH 2
+#define	FRF_AB_XX_CH0_RX_PRBS_INV_LBN 17
+#define	FRF_AB_XX_CH0_RX_PRBS_INV_WIDTH 1
+#define	FRF_AB_XX_CH0_RX_PRBS_CHKEN_LBN 16
+#define	FRF_AB_XX_CH0_RX_PRBS_CHKEN_WIDTH 1
+#define	FRF_AB_XX_CH3_TX_PRBS_SEL_LBN 14
+#define	FRF_AB_XX_CH3_TX_PRBS_SEL_WIDTH 2
+#define	FRF_AB_XX_CH3_TX_PRBS_INV_LBN 13
+#define	FRF_AB_XX_CH3_TX_PRBS_INV_WIDTH 1
+#define	FRF_AB_XX_CH3_TX_PRBS_CHKEN_LBN 12
+#define	FRF_AB_XX_CH3_TX_PRBS_CHKEN_WIDTH 1
+#define	FRF_AB_XX_CH2_TX_PRBS_SEL_LBN 10
+#define	FRF_AB_XX_CH2_TX_PRBS_SEL_WIDTH 2
+#define	FRF_AB_XX_CH2_TX_PRBS_INV_LBN 9
+#define	FRF_AB_XX_CH2_TX_PRBS_INV_WIDTH 1
+#define	FRF_AB_XX_CH2_TX_PRBS_CHKEN_LBN 8
+#define	FRF_AB_XX_CH2_TX_PRBS_CHKEN_WIDTH 1
+#define	FRF_AB_XX_CH1_TX_PRBS_SEL_LBN 6
+#define	FRF_AB_XX_CH1_TX_PRBS_SEL_WIDTH 2
+#define	FRF_AB_XX_CH1_TX_PRBS_INV_LBN 5
+#define	FRF_AB_XX_CH1_TX_PRBS_INV_WIDTH 1
+#define	FRF_AB_XX_CH1_TX_PRBS_CHKEN_LBN 4
+#define	FRF_AB_XX_CH1_TX_PRBS_CHKEN_WIDTH 1
+#define	FRF_AB_XX_CH0_TX_PRBS_SEL_LBN 2
+#define	FRF_AB_XX_CH0_TX_PRBS_SEL_WIDTH 2
+#define	FRF_AB_XX_CH0_TX_PRBS_INV_LBN 1
+#define	FRF_AB_XX_CH0_TX_PRBS_INV_WIDTH 1
+#define	FRF_AB_XX_CH0_TX_PRBS_CHKEN_LBN 0
+#define	FRF_AB_XX_CH0_TX_PRBS_CHKEN_WIDTH 1
+
+/* XX_PRBS_CHK_REG: documentation to be written for sum_XX_PRBS_CHK_REG */
+#define	FR_AB_XX_PRBS_CHK 0x00001340
+#define	FRF_AB_XX_REV_LB_EN_LBN 16
+#define	FRF_AB_XX_REV_LB_EN_WIDTH 1
+#define	FRF_AB_XX_CH3_DEG_DET_LBN 15
+#define	FRF_AB_XX_CH3_DEG_DET_WIDTH 1
+#define	FRF_AB_XX_CH3_LFSR_LOCK_IND_LBN 14
+#define	FRF_AB_XX_CH3_LFSR_LOCK_IND_WIDTH 1
+#define	FRF_AB_XX_CH3_PRBS_FRUN_LBN 13
+#define	FRF_AB_XX_CH3_PRBS_FRUN_WIDTH 1
+#define	FRF_AB_XX_CH3_ERR_CHK_LBN 12
+#define	FRF_AB_XX_CH3_ERR_CHK_WIDTH 1
+#define	FRF_AB_XX_CH2_DEG_DET_LBN 11
+#define	FRF_AB_XX_CH2_DEG_DET_WIDTH 1
+#define	FRF_AB_XX_CH2_LFSR_LOCK_IND_LBN 10
+#define	FRF_AB_XX_CH2_LFSR_LOCK_IND_WIDTH 1
+#define	FRF_AB_XX_CH2_PRBS_FRUN_LBN 9
+#define	FRF_AB_XX_CH2_PRBS_FRUN_WIDTH 1
+#define	FRF_AB_XX_CH2_ERR_CHK_LBN 8
+#define	FRF_AB_XX_CH2_ERR_CHK_WIDTH 1
+#define	FRF_AB_XX_CH1_DEG_DET_LBN 7
+#define	FRF_AB_XX_CH1_DEG_DET_WIDTH 1
+#define	FRF_AB_XX_CH1_LFSR_LOCK_IND_LBN 6
+#define	FRF_AB_XX_CH1_LFSR_LOCK_IND_WIDTH 1
+#define	FRF_AB_XX_CH1_PRBS_FRUN_LBN 5
+#define	FRF_AB_XX_CH1_PRBS_FRUN_WIDTH 1
+#define	FRF_AB_XX_CH1_ERR_CHK_LBN 4
+#define	FRF_AB_XX_CH1_ERR_CHK_WIDTH 1
+#define	FRF_AB_XX_CH0_DEG_DET_LBN 3
+#define	FRF_AB_XX_CH0_DEG_DET_WIDTH 1
+#define	FRF_AB_XX_CH0_LFSR_LOCK_IND_LBN 2
+#define	FRF_AB_XX_CH0_LFSR_LOCK_IND_WIDTH 1
+#define	FRF_AB_XX_CH0_PRBS_FRUN_LBN 1
+#define	FRF_AB_XX_CH0_PRBS_FRUN_WIDTH 1
+#define	FRF_AB_XX_CH0_ERR_CHK_LBN 0
+#define	FRF_AB_XX_CH0_ERR_CHK_WIDTH 1
+
+/* XX_PRBS_ERR_REG: documentation to be written for sum_XX_PRBS_ERR_REG */
+#define	FR_AB_XX_PRBS_ERR 0x00001350
+#define	FRF_AB_XX_CH3_PRBS_ERR_CNT_LBN 24
+#define	FRF_AB_XX_CH3_PRBS_ERR_CNT_WIDTH 8
+#define	FRF_AB_XX_CH2_PRBS_ERR_CNT_LBN 16
+#define	FRF_AB_XX_CH2_PRBS_ERR_CNT_WIDTH 8
+#define	FRF_AB_XX_CH1_PRBS_ERR_CNT_LBN 8
+#define	FRF_AB_XX_CH1_PRBS_ERR_CNT_WIDTH 8
+#define	FRF_AB_XX_CH0_PRBS_ERR_CNT_LBN 0
+#define	FRF_AB_XX_CH0_PRBS_ERR_CNT_WIDTH 8
+
+/* XX_CORE_STAT_REG: XAUI XGXS core status register */
+#define	FR_AB_XX_CORE_STAT 0x00001360
+#define	FRF_AB_XX_FORCE_SIG3_LBN 31
+#define	FRF_AB_XX_FORCE_SIG3_WIDTH 1
+#define	FRF_AB_XX_FORCE_SIG3_VAL_LBN 30
+#define	FRF_AB_XX_FORCE_SIG3_VAL_WIDTH 1
+#define	FRF_AB_XX_FORCE_SIG2_LBN 29
+#define	FRF_AB_XX_FORCE_SIG2_WIDTH 1
+#define	FRF_AB_XX_FORCE_SIG2_VAL_LBN 28
+#define	FRF_AB_XX_FORCE_SIG2_VAL_WIDTH 1
+#define	FRF_AB_XX_FORCE_SIG1_LBN 27
+#define	FRF_AB_XX_FORCE_SIG1_WIDTH 1
+#define	FRF_AB_XX_FORCE_SIG1_VAL_LBN 26
+#define	FRF_AB_XX_FORCE_SIG1_VAL_WIDTH 1
+#define	FRF_AB_XX_FORCE_SIG0_LBN 25
+#define	FRF_AB_XX_FORCE_SIG0_WIDTH 1
+#define	FRF_AB_XX_FORCE_SIG0_VAL_LBN 24
+#define	FRF_AB_XX_FORCE_SIG0_VAL_WIDTH 1
+#define	FRF_AB_XX_XGXS_LB_EN_LBN 23
+#define	FRF_AB_XX_XGXS_LB_EN_WIDTH 1
+#define	FRF_AB_XX_XGMII_LB_EN_LBN 22
+#define	FRF_AB_XX_XGMII_LB_EN_WIDTH 1
+#define	FRF_AB_XX_MATCH_FAULT_LBN 21
+#define	FRF_AB_XX_MATCH_FAULT_WIDTH 1
+#define	FRF_AB_XX_ALIGN_DONE_LBN 20
+#define	FRF_AB_XX_ALIGN_DONE_WIDTH 1
+#define	FRF_AB_XX_SYNC_STAT3_LBN 19
+#define	FRF_AB_XX_SYNC_STAT3_WIDTH 1
+#define	FRF_AB_XX_SYNC_STAT2_LBN 18
+#define	FRF_AB_XX_SYNC_STAT2_WIDTH 1
+#define	FRF_AB_XX_SYNC_STAT1_LBN 17
+#define	FRF_AB_XX_SYNC_STAT1_WIDTH 1
+#define	FRF_AB_XX_SYNC_STAT0_LBN 16
+#define	FRF_AB_XX_SYNC_STAT0_WIDTH 1
+#define	FRF_AB_XX_COMMA_DET_CH3_LBN 15
+#define	FRF_AB_XX_COMMA_DET_CH3_WIDTH 1
+#define	FRF_AB_XX_COMMA_DET_CH2_LBN 14
+#define	FRF_AB_XX_COMMA_DET_CH2_WIDTH 1
+#define	FRF_AB_XX_COMMA_DET_CH1_LBN 13
+#define	FRF_AB_XX_COMMA_DET_CH1_WIDTH 1
+#define	FRF_AB_XX_COMMA_DET_CH0_LBN 12
+#define	FRF_AB_XX_COMMA_DET_CH0_WIDTH 1
+#define	FRF_AB_XX_CGRP_ALIGN_CH3_LBN 11
+#define	FRF_AB_XX_CGRP_ALIGN_CH3_WIDTH 1
+#define	FRF_AB_XX_CGRP_ALIGN_CH2_LBN 10
+#define	FRF_AB_XX_CGRP_ALIGN_CH2_WIDTH 1
+#define	FRF_AB_XX_CGRP_ALIGN_CH1_LBN 9
+#define	FRF_AB_XX_CGRP_ALIGN_CH1_WIDTH 1
+#define	FRF_AB_XX_CGRP_ALIGN_CH0_LBN 8
+#define	FRF_AB_XX_CGRP_ALIGN_CH0_WIDTH 1
+#define	FRF_AB_XX_CHAR_ERR_CH3_LBN 7
+#define	FRF_AB_XX_CHAR_ERR_CH3_WIDTH 1
+#define	FRF_AB_XX_CHAR_ERR_CH2_LBN 6
+#define	FRF_AB_XX_CHAR_ERR_CH2_WIDTH 1
+#define	FRF_AB_XX_CHAR_ERR_CH1_LBN 5
+#define	FRF_AB_XX_CHAR_ERR_CH1_WIDTH 1
+#define	FRF_AB_XX_CHAR_ERR_CH0_LBN 4
+#define	FRF_AB_XX_CHAR_ERR_CH0_WIDTH 1
+#define	FRF_AB_XX_DISPERR_CH3_LBN 3
+#define	FRF_AB_XX_DISPERR_CH3_WIDTH 1
+#define	FRF_AB_XX_DISPERR_CH2_LBN 2
+#define	FRF_AB_XX_DISPERR_CH2_WIDTH 1
+#define	FRF_AB_XX_DISPERR_CH1_LBN 1
+#define	FRF_AB_XX_DISPERR_CH1_WIDTH 1
+#define	FRF_AB_XX_DISPERR_CH0_LBN 0
+#define	FRF_AB_XX_DISPERR_CH0_WIDTH 1
+
+/* RX_DESC_PTR_TBL_KER: Receive descriptor pointer table */
+#define	FR_AA_RX_DESC_PTR_TBL_KER 0x00011800
+#define	FR_AA_RX_DESC_PTR_TBL_KER_STEP 16
+#define	FR_AA_RX_DESC_PTR_TBL_KER_ROWS 4
+/* RX_DESC_PTR_TBL: Receive descriptor pointer table */
+#define	FR_BZ_RX_DESC_PTR_TBL 0x00f40000
+#define	FR_BZ_RX_DESC_PTR_TBL_STEP 16
+#define	FR_BB_RX_DESC_PTR_TBL_ROWS 4096
+#define	FR_CZ_RX_DESC_PTR_TBL_ROWS 1024
+#define	FRF_CZ_RX_HDR_SPLIT_LBN 90
+#define	FRF_CZ_RX_HDR_SPLIT_WIDTH 1
+#define	FRF_AA_RX_RESET_LBN 89
+#define	FRF_AA_RX_RESET_WIDTH 1
+#define	FRF_AZ_RX_ISCSI_DDIG_EN_LBN 88
+#define	FRF_AZ_RX_ISCSI_DDIG_EN_WIDTH 1
+#define	FRF_AZ_RX_ISCSI_HDIG_EN_LBN 87
+#define	FRF_AZ_RX_ISCSI_HDIG_EN_WIDTH 1
+#define	FRF_AZ_RX_DESC_PREF_ACT_LBN 86
+#define	FRF_AZ_RX_DESC_PREF_ACT_WIDTH 1
+#define	FRF_AZ_RX_DC_HW_RPTR_LBN 80
+#define	FRF_AZ_RX_DC_HW_RPTR_WIDTH 6
+#define	FRF_AZ_RX_DESCQ_HW_RPTR_LBN 68
+#define	FRF_AZ_RX_DESCQ_HW_RPTR_WIDTH 12
+#define	FRF_AZ_RX_DESCQ_SW_WPTR_LBN 56
+#define	FRF_AZ_RX_DESCQ_SW_WPTR_WIDTH 12
+#define	FRF_AZ_RX_DESCQ_BUF_BASE_ID_LBN 36
+#define	FRF_AZ_RX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define	FRF_AZ_RX_DESCQ_EVQ_ID_LBN 24
+#define	FRF_AZ_RX_DESCQ_EVQ_ID_WIDTH 12
+#define	FRF_AZ_RX_DESCQ_OWNER_ID_LBN 10
+#define	FRF_AZ_RX_DESCQ_OWNER_ID_WIDTH 14
+#define	FRF_AZ_RX_DESCQ_LABEL_LBN 5
+#define	FRF_AZ_RX_DESCQ_LABEL_WIDTH 5
+#define	FRF_AZ_RX_DESCQ_SIZE_LBN 3
+#define	FRF_AZ_RX_DESCQ_SIZE_WIDTH 2
+#define	FFE_AZ_RX_DESCQ_SIZE_4K 3
+#define	FFE_AZ_RX_DESCQ_SIZE_2K 2
+#define	FFE_AZ_RX_DESCQ_SIZE_1K 1
+#define	FFE_AZ_RX_DESCQ_SIZE_512 0
+#define	FRF_AZ_RX_DESCQ_TYPE_LBN 2
+#define	FRF_AZ_RX_DESCQ_TYPE_WIDTH 1
+#define	FRF_AZ_RX_DESCQ_JUMBO_LBN 1
+#define	FRF_AZ_RX_DESCQ_JUMBO_WIDTH 1
+#define	FRF_AZ_RX_DESCQ_EN_LBN 0
+#define	FRF_AZ_RX_DESCQ_EN_WIDTH 1
+
+/* TX_DESC_PTR_TBL_KER: Transmit descriptor pointer */
+#define	FR_AA_TX_DESC_PTR_TBL_KER 0x00011900
+#define	FR_AA_TX_DESC_PTR_TBL_KER_STEP 16
+#define	FR_AA_TX_DESC_PTR_TBL_KER_ROWS 8
+/* TX_DESC_PTR_TBL: Transmit descriptor pointer */
+#define	FR_BZ_TX_DESC_PTR_TBL 0x00f50000
+#define	FR_BZ_TX_DESC_PTR_TBL_STEP 16
+#define	FR_BB_TX_DESC_PTR_TBL_ROWS 4096
+#define	FR_CZ_TX_DESC_PTR_TBL_ROWS 1024
+#define	FRF_CZ_TX_DPT_Q_MASK_WIDTH_LBN 94
+#define	FRF_CZ_TX_DPT_Q_MASK_WIDTH_WIDTH 2
+#define	FRF_CZ_TX_DPT_ETH_FILT_EN_LBN 93
+#define	FRF_CZ_TX_DPT_ETH_FILT_EN_WIDTH 1
+#define	FRF_CZ_TX_DPT_IP_FILT_EN_LBN 92
+#define	FRF_CZ_TX_DPT_IP_FILT_EN_WIDTH 1
+#define	FRF_BZ_TX_NON_IP_DROP_DIS_LBN 91
+#define	FRF_BZ_TX_NON_IP_DROP_DIS_WIDTH 1
+#define	FRF_BZ_TX_IP_CHKSM_DIS_LBN 90
+#define	FRF_BZ_TX_IP_CHKSM_DIS_WIDTH 1
+#define	FRF_BZ_TX_TCP_CHKSM_DIS_LBN 89
+#define	FRF_BZ_TX_TCP_CHKSM_DIS_WIDTH 1
+#define	FRF_AZ_TX_DESCQ_EN_LBN 88
+#define	FRF_AZ_TX_DESCQ_EN_WIDTH 1
+#define	FRF_AZ_TX_ISCSI_DDIG_EN_LBN 87
+#define	FRF_AZ_TX_ISCSI_DDIG_EN_WIDTH 1
+#define	FRF_AZ_TX_ISCSI_HDIG_EN_LBN 86
+#define	FRF_AZ_TX_ISCSI_HDIG_EN_WIDTH 1
+#define	FRF_AZ_TX_DC_HW_RPTR_LBN 80
+#define	FRF_AZ_TX_DC_HW_RPTR_WIDTH 6
+#define	FRF_AZ_TX_DESCQ_HW_RPTR_LBN 68
+#define	FRF_AZ_TX_DESCQ_HW_RPTR_WIDTH 12
+#define	FRF_AZ_TX_DESCQ_SW_WPTR_LBN 56
+#define	FRF_AZ_TX_DESCQ_SW_WPTR_WIDTH 12
+#define	FRF_AZ_TX_DESCQ_BUF_BASE_ID_LBN 36
+#define	FRF_AZ_TX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define	FRF_AZ_TX_DESCQ_EVQ_ID_LBN 24
+#define	FRF_AZ_TX_DESCQ_EVQ_ID_WIDTH 12
+#define	FRF_AZ_TX_DESCQ_OWNER_ID_LBN 10
+#define	FRF_AZ_TX_DESCQ_OWNER_ID_WIDTH 14
+#define	FRF_AZ_TX_DESCQ_LABEL_LBN 5
+#define	FRF_AZ_TX_DESCQ_LABEL_WIDTH 5
+#define	FRF_AZ_TX_DESCQ_SIZE_LBN 3
+#define	FRF_AZ_TX_DESCQ_SIZE_WIDTH 2
+#define	FFE_AZ_TX_DESCQ_SIZE_4K 3
+#define	FFE_AZ_TX_DESCQ_SIZE_2K 2
+#define	FFE_AZ_TX_DESCQ_SIZE_1K 1
+#define	FFE_AZ_TX_DESCQ_SIZE_512 0
+#define	FRF_AZ_TX_DESCQ_TYPE_LBN 1
+#define	FRF_AZ_TX_DESCQ_TYPE_WIDTH 2
+#define	FRF_AZ_TX_DESCQ_FLUSH_LBN 0
+#define	FRF_AZ_TX_DESCQ_FLUSH_WIDTH 1
+
+/* EVQ_PTR_TBL_KER: Event queue pointer table */
+#define	FR_AA_EVQ_PTR_TBL_KER 0x00011a00
+#define	FR_AA_EVQ_PTR_TBL_KER_STEP 16
+#define	FR_AA_EVQ_PTR_TBL_KER_ROWS 4
+/* EVQ_PTR_TBL: Event queue pointer table */
+#define	FR_BZ_EVQ_PTR_TBL 0x00f60000
+#define	FR_BZ_EVQ_PTR_TBL_STEP 16
+#define	FR_CZ_EVQ_PTR_TBL_ROWS 1024
+#define	FR_BB_EVQ_PTR_TBL_ROWS 4096
+#define	FRF_BZ_EVQ_RPTR_IGN_LBN 40
+#define	FRF_BZ_EVQ_RPTR_IGN_WIDTH 1
+#define	FRF_AB_EVQ_WKUP_OR_INT_EN_LBN 39
+#define	FRF_AB_EVQ_WKUP_OR_INT_EN_WIDTH 1
+#define	FRF_CZ_EVQ_DOS_PROTECT_EN_LBN 39
+#define	FRF_CZ_EVQ_DOS_PROTECT_EN_WIDTH 1
+#define	FRF_AZ_EVQ_NXT_WPTR_LBN 24
+#define	FRF_AZ_EVQ_NXT_WPTR_WIDTH 15
+#define	FRF_AZ_EVQ_EN_LBN 23
+#define	FRF_AZ_EVQ_EN_WIDTH 1
+#define	FRF_AZ_EVQ_SIZE_LBN 20
+#define	FRF_AZ_EVQ_SIZE_WIDTH 3
+#define	FFE_AZ_EVQ_SIZE_32K 6
+#define	FFE_AZ_EVQ_SIZE_16K 5
+#define	FFE_AZ_EVQ_SIZE_8K 4
+#define	FFE_AZ_EVQ_SIZE_4K 3
+#define	FFE_AZ_EVQ_SIZE_2K 2
+#define	FFE_AZ_EVQ_SIZE_1K 1
+#define	FFE_AZ_EVQ_SIZE_512 0
+#define	FRF_AZ_EVQ_BUF_BASE_ID_LBN 0
+#define	FRF_AZ_EVQ_BUF_BASE_ID_WIDTH 20
+
+/* BUF_HALF_TBL_KER: Buffer table in half buffer table mode direct access by driver */
+#define	FR_AA_BUF_HALF_TBL_KER 0x00018000
+#define	FR_AA_BUF_HALF_TBL_KER_STEP 8
+#define	FR_AA_BUF_HALF_TBL_KER_ROWS 4096
+/* BUF_HALF_TBL: Buffer table in half buffer table mode direct access by driver */
+#define	FR_BZ_BUF_HALF_TBL 0x00800000
+#define	FR_BZ_BUF_HALF_TBL_STEP 8
+#define	FR_CZ_BUF_HALF_TBL_ROWS 147456
+#define	FR_BB_BUF_HALF_TBL_ROWS 524288
+#define	FRF_AZ_BUF_ADR_HBUF_ODD_LBN 44
+#define	FRF_AZ_BUF_ADR_HBUF_ODD_WIDTH 20
+#define	FRF_AZ_BUF_OWNER_ID_HBUF_ODD_LBN 32
+#define	FRF_AZ_BUF_OWNER_ID_HBUF_ODD_WIDTH 12
+#define	FRF_AZ_BUF_ADR_HBUF_EVEN_LBN 12
+#define	FRF_AZ_BUF_ADR_HBUF_EVEN_WIDTH 20
+#define	FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_LBN 0
+#define	FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_WIDTH 12
+
+/* BUF_FULL_TBL_KER: Buffer table in full buffer table mode direct access by driver */
+#define	FR_AA_BUF_FULL_TBL_KER 0x00018000
+#define	FR_AA_BUF_FULL_TBL_KER_STEP 8
+#define	FR_AA_BUF_FULL_TBL_KER_ROWS 4096
+/* BUF_FULL_TBL: Buffer table in full buffer table mode direct access by driver */
+#define	FR_BZ_BUF_FULL_TBL 0x00800000
+#define	FR_BZ_BUF_FULL_TBL_STEP 8
+#define	FR_CZ_BUF_FULL_TBL_ROWS 147456
+#define	FR_BB_BUF_FULL_TBL_ROWS 917504
+#define	FRF_AZ_BUF_FULL_UNUSED_LBN 51
+#define	FRF_AZ_BUF_FULL_UNUSED_WIDTH 13
+#define	FRF_AZ_IP_DAT_BUF_SIZE_LBN 50
+#define	FRF_AZ_IP_DAT_BUF_SIZE_WIDTH 1
+#define	FRF_AZ_BUF_ADR_REGION_LBN 48
+#define	FRF_AZ_BUF_ADR_REGION_WIDTH 2
+#define	FFE_AZ_BUF_ADR_REGN3 3
+#define	FFE_AZ_BUF_ADR_REGN2 2
+#define	FFE_AZ_BUF_ADR_REGN1 1
+#define	FFE_AZ_BUF_ADR_REGN0 0
+#define	FRF_AZ_BUF_ADR_FBUF_LBN 14
+#define	FRF_AZ_BUF_ADR_FBUF_WIDTH 34
+#define	FRF_AZ_BUF_OWNER_ID_FBUF_LBN 0
+#define	FRF_AZ_BUF_OWNER_ID_FBUF_WIDTH 14
+
+/* RX_FILTER_TBL0: TCP/IPv4 Receive filter table */
+#define	FR_BZ_RX_FILTER_TBL0 0x00f00000
+#define	FR_BZ_RX_FILTER_TBL0_STEP 32
+#define	FR_BZ_RX_FILTER_TBL0_ROWS 8192
+/* RX_FILTER_TBL1: TCP/IPv4 Receive filter table */
+#define	FR_BB_RX_FILTER_TBL1 0x00f00010
+#define	FR_BB_RX_FILTER_TBL1_STEP 32
+#define	FR_BB_RX_FILTER_TBL1_ROWS 8192
+#define	FRF_BZ_RSS_EN_LBN 110
+#define	FRF_BZ_RSS_EN_WIDTH 1
+#define	FRF_BZ_SCATTER_EN_LBN 109
+#define	FRF_BZ_SCATTER_EN_WIDTH 1
+#define	FRF_BZ_TCP_UDP_LBN 108
+#define	FRF_BZ_TCP_UDP_WIDTH 1
+#define	FRF_BZ_RXQ_ID_LBN 96
+#define	FRF_BZ_RXQ_ID_WIDTH 12
+#define	FRF_BZ_DEST_IP_LBN 64
+#define	FRF_BZ_DEST_IP_WIDTH 32
+#define	FRF_BZ_DEST_PORT_TCP_LBN 48
+#define	FRF_BZ_DEST_PORT_TCP_WIDTH 16
+#define	FRF_BZ_SRC_IP_LBN 16
+#define	FRF_BZ_SRC_IP_WIDTH 32
+#define	FRF_BZ_SRC_TCP_DEST_UDP_LBN 0
+#define	FRF_BZ_SRC_TCP_DEST_UDP_WIDTH 16
+
+/* RX_MAC_FILTER_TBL0: Receive Ethernet filter table */
+#define	FR_CZ_RX_MAC_FILTER_TBL0 0x00f00010
+#define	FR_CZ_RX_MAC_FILTER_TBL0_STEP 32
+#define	FR_CZ_RX_MAC_FILTER_TBL0_ROWS 512
+#define	FRF_CZ_RMFT_RSS_EN_LBN 75
+#define	FRF_CZ_RMFT_RSS_EN_WIDTH 1
+#define	FRF_CZ_RMFT_SCATTER_EN_LBN 74
+#define	FRF_CZ_RMFT_SCATTER_EN_WIDTH 1
+#define	FRF_CZ_RMFT_IP_OVERRIDE_LBN 73
+#define	FRF_CZ_RMFT_IP_OVERRIDE_WIDTH 1
+#define	FRF_CZ_RMFT_RXQ_ID_LBN 61
+#define	FRF_CZ_RMFT_RXQ_ID_WIDTH 12
+#define	FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60
+#define	FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1
+#define	FRF_CZ_RMFT_DEST_MAC_LBN 16
+#define	FRF_CZ_RMFT_DEST_MAC_WIDTH 44
+#define	FRF_CZ_RMFT_VLAN_ID_LBN 0
+#define	FRF_CZ_RMFT_VLAN_ID_WIDTH 12
+
+/* TIMER_TBL: Timer table */
+#define	FR_BZ_TIMER_TBL 0x00f70000
+#define	FR_BZ_TIMER_TBL_STEP 16
+#define	FR_CZ_TIMER_TBL_ROWS 1024
+#define	FR_BB_TIMER_TBL_ROWS 4096
+#define	FRF_CZ_TIMER_Q_EN_LBN 33
+#define	FRF_CZ_TIMER_Q_EN_WIDTH 1
+#define	FRF_CZ_INT_ARMD_LBN 32
+#define	FRF_CZ_INT_ARMD_WIDTH 1
+#define	FRF_CZ_INT_PEND_LBN 31
+#define	FRF_CZ_INT_PEND_WIDTH 1
+#define	FRF_CZ_HOST_NOTIFY_MODE_LBN 30
+#define	FRF_CZ_HOST_NOTIFY_MODE_WIDTH 1
+#define	FRF_CZ_RELOAD_TIMER_VAL_LBN 16
+#define	FRF_CZ_RELOAD_TIMER_VAL_WIDTH 14
+#define	FRF_CZ_TIMER_MODE_LBN 14
+#define	FRF_CZ_TIMER_MODE_WIDTH 2
+#define	FFE_CZ_TIMER_MODE_INT_HLDOFF 3
+#define	FFE_CZ_TIMER_MODE_TRIG_START 2
+#define	FFE_CZ_TIMER_MODE_IMMED_START 1
+#define	FFE_CZ_TIMER_MODE_DIS 0
+#define	FRF_BB_TIMER_MODE_LBN 12
+#define	FRF_BB_TIMER_MODE_WIDTH 2
+#define	FFE_BB_TIMER_MODE_INT_HLDOFF 2
+#define	FFE_BB_TIMER_MODE_TRIG_START 2
+#define	FFE_BB_TIMER_MODE_IMMED_START 1
+#define	FFE_BB_TIMER_MODE_DIS 0
+#define	FRF_CZ_TIMER_VAL_LBN 0
+#define	FRF_CZ_TIMER_VAL_WIDTH 14
+#define	FRF_BB_TIMER_VAL_LBN 0
+#define	FRF_BB_TIMER_VAL_WIDTH 12
+
+/* TX_PACE_TBL: Transmit pacing table */
+#define	FR_BZ_TX_PACE_TBL 0x00f80000
+#define	FR_BZ_TX_PACE_TBL_STEP 16
+#define	FR_CZ_TX_PACE_TBL_ROWS 1024
+#define	FR_BB_TX_PACE_TBL_ROWS 4096
+#define	FRF_BZ_TX_PACE_LBN 0
+#define	FRF_BZ_TX_PACE_WIDTH 5
+
+/* RX_INDIRECTION_TBL: RX Indirection Table */
+#define	FR_BZ_RX_INDIRECTION_TBL 0x00fb0000
+#define	FR_BZ_RX_INDIRECTION_TBL_STEP 16
+#define	FR_BZ_RX_INDIRECTION_TBL_ROWS 128
+#define	FRF_BZ_IT_QUEUE_LBN 0
+#define	FRF_BZ_IT_QUEUE_WIDTH 6
+
+/* TX_FILTER_TBL0: TCP/IPv4 Transmit filter table */
+#define	FR_CZ_TX_FILTER_TBL0 0x00fc0000
+#define	FR_CZ_TX_FILTER_TBL0_STEP 16
+#define	FR_CZ_TX_FILTER_TBL0_ROWS 8192
+#define	FRF_CZ_TIFT_TCP_UDP_LBN 108
+#define	FRF_CZ_TIFT_TCP_UDP_WIDTH 1
+#define	FRF_CZ_TIFT_TXQ_ID_LBN 96
+#define	FRF_CZ_TIFT_TXQ_ID_WIDTH 12
+#define	FRF_CZ_TIFT_DEST_IP_LBN 64
+#define	FRF_CZ_TIFT_DEST_IP_WIDTH 32
+#define	FRF_CZ_TIFT_DEST_PORT_TCP_LBN 48
+#define	FRF_CZ_TIFT_DEST_PORT_TCP_WIDTH 16
+#define	FRF_CZ_TIFT_SRC_IP_LBN 16
+#define	FRF_CZ_TIFT_SRC_IP_WIDTH 32
+#define	FRF_CZ_TIFT_SRC_TCP_DEST_UDP_LBN 0
+#define	FRF_CZ_TIFT_SRC_TCP_DEST_UDP_WIDTH 16
+
+/* TX_MAC_FILTER_TBL0: Transmit Ethernet filter table */
+#define	FR_CZ_TX_MAC_FILTER_TBL0 0x00fe0000
+#define	FR_CZ_TX_MAC_FILTER_TBL0_STEP 16
+#define	FR_CZ_TX_MAC_FILTER_TBL0_ROWS 512
+#define	FRF_CZ_TMFT_TXQ_ID_LBN 61
+#define	FRF_CZ_TMFT_TXQ_ID_WIDTH 12
+#define	FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60
+#define	FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1
+#define	FRF_CZ_TMFT_SRC_MAC_LBN 16
+#define	FRF_CZ_TMFT_SRC_MAC_WIDTH 44
+#define	FRF_CZ_TMFT_VLAN_ID_LBN 0
+#define	FRF_CZ_TMFT_VLAN_ID_WIDTH 12
+
+/* MC_TREG_SMEM: MC Shared Memory */
+#define	FR_CZ_MC_TREG_SMEM 0x00ff0000
+#define	FR_CZ_MC_TREG_SMEM_STEP 4
+#define	FR_CZ_MC_TREG_SMEM_ROWS 512
+#define	FRF_CZ_MC_TREG_SMEM_ROW_LBN 0
+#define	FRF_CZ_MC_TREG_SMEM_ROW_WIDTH 32
+
+/* MSIX_VECTOR_TABLE: MSIX Vector Table */
+#define	FR_BB_MSIX_VECTOR_TABLE 0x00ff0000
+#define	FR_BZ_MSIX_VECTOR_TABLE_STEP 16
+#define	FR_BB_MSIX_VECTOR_TABLE_ROWS 64
+/* MSIX_VECTOR_TABLE: MSIX Vector Table */
+#define	FR_CZ_MSIX_VECTOR_TABLE 0x00000000
+/* FR_BZ_MSIX_VECTOR_TABLE_STEP 16 */
+#define	FR_CZ_MSIX_VECTOR_TABLE_ROWS 1024
+#define	FRF_BZ_MSIX_VECTOR_RESERVED_LBN 97
+#define	FRF_BZ_MSIX_VECTOR_RESERVED_WIDTH 31
+#define	FRF_BZ_MSIX_VECTOR_MASK_LBN 96
+#define	FRF_BZ_MSIX_VECTOR_MASK_WIDTH 1
+#define	FRF_BZ_MSIX_MESSAGE_DATA_LBN 64
+#define	FRF_BZ_MSIX_MESSAGE_DATA_WIDTH 32
+#define	FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_LBN 32
+#define	FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_WIDTH 32
+#define	FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_LBN 0
+#define	FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_WIDTH 32
+
+/* MSIX_PBA_TABLE: MSIX Pending Bit Array */
+#define	FR_BB_MSIX_PBA_TABLE 0x00ff2000
+#define	FR_BZ_MSIX_PBA_TABLE_STEP 4
+#define	FR_BB_MSIX_PBA_TABLE_ROWS 2
+/* MSIX_PBA_TABLE: MSIX Pending Bit Array */
+#define	FR_CZ_MSIX_PBA_TABLE 0x00008000
+/* FR_BZ_MSIX_PBA_TABLE_STEP 4 */
+#define	FR_CZ_MSIX_PBA_TABLE_ROWS 32
+#define	FRF_BZ_MSIX_PBA_PEND_DWORD_LBN 0
+#define	FRF_BZ_MSIX_PBA_PEND_DWORD_WIDTH 32
+
+/* SRM_DBG_REG: SRAM debug access */
+#define	FR_BZ_SRM_DBG 0x03000000
+#define	FR_BZ_SRM_DBG_STEP 8
+#define	FR_CZ_SRM_DBG_ROWS 262144
+#define	FR_BB_SRM_DBG_ROWS 2097152
+#define	FRF_BZ_SRM_DBG_LBN 0
+#define	FRF_BZ_SRM_DBG_WIDTH 64
+
+/* TB_MSIX_PBA_TABLE: MSIX Pending Bit Array */
+#define	FR_CZ_TB_MSIX_PBA_TABLE 0x00008000
+#define	FR_CZ_TB_MSIX_PBA_TABLE_STEP 4
+#define	FR_CZ_TB_MSIX_PBA_TABLE_ROWS 1024
+#define	FRF_CZ_TB_MSIX_PBA_PEND_DWORD_LBN 0
+#define	FRF_CZ_TB_MSIX_PBA_PEND_DWORD_WIDTH 32
+
+/* DRIVER_EV */
+#define	FSF_AZ_DRIVER_EV_SUBCODE_LBN 56
+#define	FSF_AZ_DRIVER_EV_SUBCODE_WIDTH 4
+#define	FSE_BZ_TX_DSC_ERROR_EV 15
+#define	FSE_BZ_RX_DSC_ERROR_EV 14
+#define	FSE_AA_RX_RECOVER_EV 11
+#define	FSE_AZ_TIMER_EV 10
+#define	FSE_AZ_TX_PKT_NON_TCP_UDP 9
+#define	FSE_AZ_WAKE_UP_EV 6
+#define	FSE_AZ_SRM_UPD_DONE_EV 5
+#define	FSE_AB_EVQ_NOT_EN_EV 3
+#define	FSE_AZ_EVQ_INIT_DONE_EV 2
+#define	FSE_AZ_RX_DESCQ_FLS_DONE_EV 1
+#define	FSE_AZ_TX_DESCQ_FLS_DONE_EV 0
+#define	FSF_AZ_DRIVER_EV_SUBDATA_LBN 0
+#define	FSF_AZ_DRIVER_EV_SUBDATA_WIDTH 14
+
+/* EVENT_ENTRY */
+#define	FSF_AZ_EV_CODE_LBN 60
+#define	FSF_AZ_EV_CODE_WIDTH 4
+#define	FSE_CZ_EV_CODE_MCDI_EV 12
+#define	FSE_CZ_EV_CODE_USER_EV 8
+#define	FSE_AZ_EV_CODE_DRV_GEN_EV 7
+#define	FSE_AZ_EV_CODE_GLOBAL_EV 6
+#define	FSE_AZ_EV_CODE_DRIVER_EV 5
+#define	FSE_AZ_EV_CODE_TX_EV 2
+#define	FSE_AZ_EV_CODE_RX_EV 0
+#define	FSF_AZ_EV_DATA_LBN 0
+#define	FSF_AZ_EV_DATA_WIDTH 60
+
+/* GLOBAL_EV */
+#define	FSF_BB_GLB_EV_RX_RECOVERY_LBN 12
+#define	FSF_BB_GLB_EV_RX_RECOVERY_WIDTH 1
+#define	FSF_AA_GLB_EV_RX_RECOVERY_LBN 11
+#define	FSF_AA_GLB_EV_RX_RECOVERY_WIDTH 1
+#define	FSF_BB_GLB_EV_XG_MGT_INTR_LBN 11
+#define	FSF_BB_GLB_EV_XG_MGT_INTR_WIDTH 1
+#define	FSF_AB_GLB_EV_XFP_PHY0_INTR_LBN 10
+#define	FSF_AB_GLB_EV_XFP_PHY0_INTR_WIDTH 1
+#define	FSF_AB_GLB_EV_XG_PHY0_INTR_LBN 9
+#define	FSF_AB_GLB_EV_XG_PHY0_INTR_WIDTH 1
+#define	FSF_AB_GLB_EV_G_PHY0_INTR_LBN 7
+#define	FSF_AB_GLB_EV_G_PHY0_INTR_WIDTH 1
+
+/* LEGACY_INT_VEC */
+#define	FSF_AZ_NET_IVEC_FATAL_INT_LBN 64
+#define	FSF_AZ_NET_IVEC_FATAL_INT_WIDTH 1
+#define	FSF_AZ_NET_IVEC_INT_Q_LBN 40
+#define	FSF_AZ_NET_IVEC_INT_Q_WIDTH 4
+#define	FSF_AZ_NET_IVEC_INT_FLAG_LBN 32
+#define	FSF_AZ_NET_IVEC_INT_FLAG_WIDTH 1
+#define	FSF_AZ_NET_IVEC_EVQ_FIFO_HF_LBN 1
+#define	FSF_AZ_NET_IVEC_EVQ_FIFO_HF_WIDTH 1
+#define	FSF_AZ_NET_IVEC_EVQ_FIFO_AF_LBN 0
+#define	FSF_AZ_NET_IVEC_EVQ_FIFO_AF_WIDTH 1
+
+/* MC_XGMAC_FLTR_RULE_DEF */
+#define	FSF_CZ_MC_XFRC_MODE_LBN 416
+#define	FSF_CZ_MC_XFRC_MODE_WIDTH 1
+#define	FSE_CZ_MC_XFRC_MODE_LAYERED 1
+#define	FSE_CZ_MC_XFRC_MODE_SIMPLE 0
+#define	FSF_CZ_MC_XFRC_HASH_LBN 384
+#define	FSF_CZ_MC_XFRC_HASH_WIDTH 32
+#define	FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_LBN 256
+#define	FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_WIDTH 128
+#define	FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_LBN 128
+#define	FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_WIDTH 128
+#define	FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_LBN 0
+#define	FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_WIDTH 128
+
+/* RX_EV */
+#define	FSF_CZ_RX_EV_PKT_NOT_PARSED_LBN 58
+#define	FSF_CZ_RX_EV_PKT_NOT_PARSED_WIDTH 1
+#define	FSF_CZ_RX_EV_IPV6_PKT_LBN 57
+#define	FSF_CZ_RX_EV_IPV6_PKT_WIDTH 1
+#define	FSF_AZ_RX_EV_PKT_OK_LBN 56
+#define	FSF_AZ_RX_EV_PKT_OK_WIDTH 1
+#define	FSF_AZ_RX_EV_PAUSE_FRM_ERR_LBN 55
+#define	FSF_AZ_RX_EV_PAUSE_FRM_ERR_WIDTH 1
+#define	FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_LBN 54
+#define	FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
+#define	FSF_AZ_RX_EV_IP_FRAG_ERR_LBN 53
+#define	FSF_AZ_RX_EV_IP_FRAG_ERR_WIDTH 1
+#define	FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
+#define	FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
+#define	FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
+#define	FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
+#define	FSF_AZ_RX_EV_ETH_CRC_ERR_LBN 50
+#define	FSF_AZ_RX_EV_ETH_CRC_ERR_WIDTH 1
+#define	FSF_AZ_RX_EV_FRM_TRUNC_LBN 49
+#define	FSF_AZ_RX_EV_FRM_TRUNC_WIDTH 1
+#define	FSF_AA_RX_EV_DRIB_NIB_LBN 49
+#define	FSF_AA_RX_EV_DRIB_NIB_WIDTH 1
+#define	FSF_AZ_RX_EV_TOBE_DISC_LBN 47
+#define	FSF_AZ_RX_EV_TOBE_DISC_WIDTH 1
+#define	FSF_AZ_RX_EV_PKT_TYPE_LBN 44
+#define	FSF_AZ_RX_EV_PKT_TYPE_WIDTH 3
+#define	FSE_AZ_RX_EV_PKT_TYPE_VLAN_JUMBO 5
+#define	FSE_AZ_RX_EV_PKT_TYPE_VLAN_LLC 4
+#define	FSE_AZ_RX_EV_PKT_TYPE_VLAN 3
+#define	FSE_AZ_RX_EV_PKT_TYPE_JUMBO 2
+#define	FSE_AZ_RX_EV_PKT_TYPE_LLC 1
+#define	FSE_AZ_RX_EV_PKT_TYPE_ETH 0
+#define	FSF_AZ_RX_EV_HDR_TYPE_LBN 42
+#define	FSF_AZ_RX_EV_HDR_TYPE_WIDTH 2
+#define	FSE_AZ_RX_EV_HDR_TYPE_OTHER 3
+#define	FSE_AB_RX_EV_HDR_TYPE_IPV4_OTHER 2
+#define	FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER 2
+#define	FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP 1
+#define	FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP 1
+#define	FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP 0
+#define	FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP 0
+#define	FSF_AZ_RX_EV_DESC_Q_EMPTY_LBN 41
+#define	FSF_AZ_RX_EV_DESC_Q_EMPTY_WIDTH 1
+#define	FSF_AZ_RX_EV_MCAST_HASH_MATCH_LBN 40
+#define	FSF_AZ_RX_EV_MCAST_HASH_MATCH_WIDTH 1
+#define	FSF_AZ_RX_EV_MCAST_PKT_LBN 39
+#define	FSF_AZ_RX_EV_MCAST_PKT_WIDTH 1
+#define	FSF_AA_RX_EV_RECOVERY_FLAG_LBN 37
+#define	FSF_AA_RX_EV_RECOVERY_FLAG_WIDTH 1
+#define	FSF_AZ_RX_EV_Q_LABEL_LBN 32
+#define	FSF_AZ_RX_EV_Q_LABEL_WIDTH 5
+#define	FSF_AZ_RX_EV_JUMBO_CONT_LBN 31
+#define	FSF_AZ_RX_EV_JUMBO_CONT_WIDTH 1
+#define	FSF_AZ_RX_EV_PORT_LBN 30
+#define	FSF_AZ_RX_EV_PORT_WIDTH 1
+#define	FSF_AZ_RX_EV_BYTE_CNT_LBN 16
+#define	FSF_AZ_RX_EV_BYTE_CNT_WIDTH 14
+#define	FSF_AZ_RX_EV_SOP_LBN 15
+#define	FSF_AZ_RX_EV_SOP_WIDTH 1
+#define	FSF_AZ_RX_EV_ISCSI_PKT_OK_LBN 14
+#define	FSF_AZ_RX_EV_ISCSI_PKT_OK_WIDTH 1
+#define	FSF_AZ_RX_EV_ISCSI_DDIG_ERR_LBN 13
+#define	FSF_AZ_RX_EV_ISCSI_DDIG_ERR_WIDTH 1
+#define	FSF_AZ_RX_EV_ISCSI_HDIG_ERR_LBN 12
+#define	FSF_AZ_RX_EV_ISCSI_HDIG_ERR_WIDTH 1
+#define	FSF_AZ_RX_EV_DESC_PTR_LBN 0
+#define	FSF_AZ_RX_EV_DESC_PTR_WIDTH 12
+
+/* RX_KER_DESC */
+#define	FSF_AZ_RX_KER_BUF_SIZE_LBN 48
+#define	FSF_AZ_RX_KER_BUF_SIZE_WIDTH 14
+#define	FSF_AZ_RX_KER_BUF_REGION_LBN 46
+#define	FSF_AZ_RX_KER_BUF_REGION_WIDTH 2
+#define	FSF_AZ_RX_KER_BUF_ADDR_LBN 0
+#define	FSF_AZ_RX_KER_BUF_ADDR_WIDTH 46
+
+/* RX_USER_DESC */
+#define	FSF_AZ_RX_USER_2BYTE_OFFSET_LBN 20
+#define	FSF_AZ_RX_USER_2BYTE_OFFSET_WIDTH 12
+#define	FSF_AZ_RX_USER_BUF_ID_LBN 0
+#define	FSF_AZ_RX_USER_BUF_ID_WIDTH 20
+
+/* TX_EV */
+#define	FSF_AZ_TX_EV_PKT_ERR_LBN 38
+#define	FSF_AZ_TX_EV_PKT_ERR_WIDTH 1
+#define	FSF_AZ_TX_EV_PKT_TOO_BIG_LBN 37
+#define	FSF_AZ_TX_EV_PKT_TOO_BIG_WIDTH 1
+#define	FSF_AZ_TX_EV_Q_LABEL_LBN 32
+#define	FSF_AZ_TX_EV_Q_LABEL_WIDTH 5
+#define	FSF_AZ_TX_EV_PORT_LBN 16
+#define	FSF_AZ_TX_EV_PORT_WIDTH 1
+#define	FSF_AZ_TX_EV_WQ_FF_FULL_LBN 15
+#define	FSF_AZ_TX_EV_WQ_FF_FULL_WIDTH 1
+#define	FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_LBN 14
+#define	FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_WIDTH 1
+#define	FSF_AZ_TX_EV_COMP_LBN 12
+#define	FSF_AZ_TX_EV_COMP_WIDTH 1
+#define	FSF_AZ_TX_EV_DESC_PTR_LBN 0
+#define	FSF_AZ_TX_EV_DESC_PTR_WIDTH 12
+
+/* TX_KER_DESC */
+#define	FSF_AZ_TX_KER_CONT_LBN 62
+#define	FSF_AZ_TX_KER_CONT_WIDTH 1
+#define	FSF_AZ_TX_KER_BYTE_COUNT_LBN 48
+#define	FSF_AZ_TX_KER_BYTE_COUNT_WIDTH 14
+#define	FSF_AZ_TX_KER_BUF_REGION_LBN 46
+#define	FSF_AZ_TX_KER_BUF_REGION_WIDTH 2
+#define	FSF_AZ_TX_KER_BUF_ADDR_LBN 0
+#define	FSF_AZ_TX_KER_BUF_ADDR_WIDTH 46
+
+/* TX_USER_DESC */
+#define	FSF_AZ_TX_USER_SW_EV_EN_LBN 48
+#define	FSF_AZ_TX_USER_SW_EV_EN_WIDTH 1
+#define	FSF_AZ_TX_USER_CONT_LBN 46
+#define	FSF_AZ_TX_USER_CONT_WIDTH 1
+#define	FSF_AZ_TX_USER_BYTE_CNT_LBN 33
+#define	FSF_AZ_TX_USER_BYTE_CNT_WIDTH 13
+#define	FSF_AZ_TX_USER_BUF_ID_LBN 13
+#define	FSF_AZ_TX_USER_BUF_ID_WIDTH 20
+#define	FSF_AZ_TX_USER_BYTE_OFS_LBN 0
+#define	FSF_AZ_TX_USER_BYTE_OFS_WIDTH 13
+
+/* USER_EV */
+#define	FSF_CZ_USER_QID_LBN 32
+#define	FSF_CZ_USER_QID_WIDTH 10
+#define	FSF_CZ_USER_EV_REG_VALUE_LBN 0
+#define	FSF_CZ_USER_EV_REG_VALUE_WIDTH 32
+
+/**************************************************************************
+ *
+ * Falcon B0 PCIe core indirect registers
+ *
+ **************************************************************************
+ */
+
+#define FPCR_BB_PCIE_DEVICE_CTRL_STAT 0x68
+
+#define FPCR_BB_PCIE_LINK_CTRL_STAT 0x70
+
+#define FPCR_BB_ACK_RPL_TIMER 0x700
+#define FPCRF_BB_ACK_TL_LBN 0
+#define FPCRF_BB_ACK_TL_WIDTH 16
+#define FPCRF_BB_RPL_TL_LBN 16
+#define FPCRF_BB_RPL_TL_WIDTH 16
+
+#define FPCR_BB_ACK_FREQ 0x70C
+#define FPCRF_BB_ACK_FREQ_LBN 0
+#define FPCRF_BB_ACK_FREQ_WIDTH 7
+
+/**************************************************************************
+ *
+ * Pseudo-registers and fields
+ *
+ **************************************************************************
+ */
+
+/* Interrupt acknowledge work-around register (A0/A1 only) */
+#define FR_AA_WORK_AROUND_BROKEN_PCI_READS 0x0070
+
+/* EE_SPI_HCMD_REG: SPI host command register */
+/* Values for the EE_SPI_HCMD_SF_SEL register field */
+#define FFE_AB_SPI_DEVICE_EEPROM 0
+#define FFE_AB_SPI_DEVICE_FLASH 1
+
+/* NIC_STAT_REG: NIC status register */
+#define FRF_AB_STRAP_10G_LBN 2
+#define FRF_AB_STRAP_10G_WIDTH 1
+#define FRF_AA_STRAP_PCIE_LBN 0
+#define FRF_AA_STRAP_PCIE_WIDTH 1
+
+/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */
+#define FRF_AZ_FATAL_INTR_LBN 0
+#define FRF_AZ_FATAL_INTR_WIDTH 12
+
+/* SRM_CFG_REG: SRAM configuration register */
+/* We treat the number of SRAM banks and bank size as a single field */
+#define	FRF_AZ_SRM_NB_SZ_LBN FRF_AZ_SRM_BANK_SIZE_LBN
+#define	FRF_AZ_SRM_NB_SZ_WIDTH \
+	(FRF_AZ_SRM_BANK_SIZE_WIDTH + FRF_AZ_SRM_NUM_BANK_WIDTH)
+#define FFE_AB_SRM_NB1_SZ2M 0
+#define FFE_AB_SRM_NB1_SZ4M 1
+#define FFE_AB_SRM_NB1_SZ8M 2
+#define FFE_AB_SRM_NB_SZ_DEF 3
+#define FFE_AB_SRM_NB2_SZ4M 4
+#define FFE_AB_SRM_NB2_SZ8M 5
+#define FFE_AB_SRM_NB2_SZ16M 6
+#define FFE_AB_SRM_NB_SZ_RES 7
+
+/* RX_DESC_UPD_REGP0: Receive descriptor update register. */
+/* We write just the last dword of these registers */
+#define	FR_AZ_RX_DESC_UPD_DWORD_P0 \
+	(BUILD_BUG_ON_ZERO(FR_AA_RX_DESC_UPD_KER != FR_BZ_RX_DESC_UPD_P0) + \
+	 FR_BZ_RX_DESC_UPD_P0 + 3 * 4)
+#define	FRF_AZ_RX_DESC_WPTR_DWORD_LBN (FRF_AZ_RX_DESC_WPTR_LBN - 3 * 32)
+#define	FRF_AZ_RX_DESC_WPTR_DWORD_WIDTH FRF_AZ_RX_DESC_WPTR_WIDTH
+
+/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */
+#define FR_AZ_TX_DESC_UPD_DWORD_P0 \
+	(BUILD_BUG_ON_ZERO(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0) + \
+	 FR_BZ_TX_DESC_UPD_P0 + 3 * 4)
+#define	FRF_AZ_TX_DESC_WPTR_DWORD_LBN (FRF_AZ_TX_DESC_WPTR_LBN - 3 * 32)
+#define	FRF_AZ_TX_DESC_WPTR_DWORD_WIDTH FRF_AZ_TX_DESC_WPTR_WIDTH
+
+/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */
+#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_LBN 12
+#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_WIDTH 1
+
+/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */
+#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_LBN 12
+#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1
+
+/* XM_TX_PARAM_REG: XGMAC transmit parameter register */
+#define	FRF_AB_XM_MAX_TX_FRM_SIZE_LBN FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN
+#define	FRF_AB_XM_MAX_TX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH + \
+					 FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH)
+
+/* XM_RX_PARAM_REG: XGMAC receive parameter register */
+#define	FRF_AB_XM_MAX_RX_FRM_SIZE_LBN FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN
+#define	FRF_AB_XM_MAX_RX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH + \
+					 FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH)
+
+/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */
+/* Default values */
+#define FFE_AB_XX_TXDRV_DEQ_DEF 0xe /* deq=.6 */
+#define FFE_AB_XX_TXDRV_DTX_DEF 0x5 /* 1.25 */
+#define FFE_AB_XX_SD_CTL_DRV_DEF 0  /* 20mA */
+
+/* XX_CORE_STAT_REG: XAUI XGXS core status register */
+/* XGXS all-lanes status fields */
+#define	FRF_AB_XX_SYNC_STAT_LBN FRF_AB_XX_SYNC_STAT0_LBN
+#define	FRF_AB_XX_SYNC_STAT_WIDTH 4
+#define	FRF_AB_XX_COMMA_DET_LBN FRF_AB_XX_COMMA_DET_CH0_LBN
+#define	FRF_AB_XX_COMMA_DET_WIDTH 4
+#define	FRF_AB_XX_CHAR_ERR_LBN FRF_AB_XX_CHAR_ERR_CH0_LBN
+#define	FRF_AB_XX_CHAR_ERR_WIDTH 4
+#define	FRF_AB_XX_DISPERR_LBN FRF_AB_XX_DISPERR_CH0_LBN
+#define	FRF_AB_XX_DISPERR_WIDTH 4
+#define	FFE_AB_XX_STAT_ALL_LANES 0xf
+#define	FRF_AB_XX_FORCE_SIG_LBN FRF_AB_XX_FORCE_SIG0_VAL_LBN
+#define	FRF_AB_XX_FORCE_SIG_WIDTH 8
+#define	FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff
+
+/* DRIVER_EV */
+/* Sub-fields of an RX flush completion event */
+#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
+#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
+#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_LBN 0
+#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_WIDTH 12
+
+/* EVENT_ENTRY */
+/* Magic number field for event test */
+#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0
+#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32
+
+/**************************************************************************
+ *
+ * Falcon MAC stats
+ *
+ **************************************************************************
+ *
+ */
+
+#define GRxGoodOct_offset 0x0
+#define GRxGoodOct_WIDTH 48
+#define GRxBadOct_offset 0x8
+#define GRxBadOct_WIDTH 48
+#define GRxMissPkt_offset 0x10
+#define GRxMissPkt_WIDTH 32
+#define GRxFalseCRS_offset 0x14
+#define GRxFalseCRS_WIDTH 32
+#define GRxPausePkt_offset 0x18
+#define GRxPausePkt_WIDTH 32
+#define GRxBadPkt_offset 0x1C
+#define GRxBadPkt_WIDTH 32
+#define GRxUcastPkt_offset 0x20
+#define GRxUcastPkt_WIDTH 32
+#define GRxMcastPkt_offset 0x24
+#define GRxMcastPkt_WIDTH 32
+#define GRxBcastPkt_offset 0x28
+#define GRxBcastPkt_WIDTH 32
+#define GRxGoodLt64Pkt_offset 0x2C
+#define GRxGoodLt64Pkt_WIDTH 32
+#define GRxBadLt64Pkt_offset 0x30
+#define GRxBadLt64Pkt_WIDTH 32
+#define GRx64Pkt_offset 0x34
+#define GRx64Pkt_WIDTH 32
+#define GRx65to127Pkt_offset 0x38
+#define GRx65to127Pkt_WIDTH 32
+#define GRx128to255Pkt_offset 0x3C
+#define GRx128to255Pkt_WIDTH 32
+#define GRx256to511Pkt_offset 0x40
+#define GRx256to511Pkt_WIDTH 32
+#define GRx512to1023Pkt_offset 0x44
+#define GRx512to1023Pkt_WIDTH 32
+#define GRx1024to15xxPkt_offset 0x48
+#define GRx1024to15xxPkt_WIDTH 32
+#define GRx15xxtoJumboPkt_offset 0x4C
+#define GRx15xxtoJumboPkt_WIDTH 32
+#define GRxGtJumboPkt_offset 0x50
+#define GRxGtJumboPkt_WIDTH 32
+#define GRxFcsErr64to15xxPkt_offset 0x54
+#define GRxFcsErr64to15xxPkt_WIDTH 32
+#define GRxFcsErr15xxtoJumboPkt_offset 0x58
+#define GRxFcsErr15xxtoJumboPkt_WIDTH 32
+#define GRxFcsErrGtJumboPkt_offset 0x5C
+#define GRxFcsErrGtJumboPkt_WIDTH 32
+#define GTxGoodBadOct_offset 0x80
+#define GTxGoodBadOct_WIDTH 48
+#define GTxGoodOct_offset 0x88
+#define GTxGoodOct_WIDTH 48
+#define GTxSglColPkt_offset 0x90
+#define GTxSglColPkt_WIDTH 32
+#define GTxMultColPkt_offset 0x94
+#define GTxMultColPkt_WIDTH 32
+#define GTxExColPkt_offset 0x98
+#define GTxExColPkt_WIDTH 32
+#define GTxDefPkt_offset 0x9C
+#define GTxDefPkt_WIDTH 32
+#define GTxLateCol_offset 0xA0
+#define GTxLateCol_WIDTH 32
+#define GTxExDefPkt_offset 0xA4
+#define GTxExDefPkt_WIDTH 32
+#define GTxPausePkt_offset 0xA8
+#define GTxPausePkt_WIDTH 32
+#define GTxBadPkt_offset 0xAC
+#define GTxBadPkt_WIDTH 32
+#define GTxUcastPkt_offset 0xB0
+#define GTxUcastPkt_WIDTH 32
+#define GTxMcastPkt_offset 0xB4
+#define GTxMcastPkt_WIDTH 32
+#define GTxBcastPkt_offset 0xB8
+#define GTxBcastPkt_WIDTH 32
+#define GTxLt64Pkt_offset 0xBC
+#define GTxLt64Pkt_WIDTH 32
+#define GTx64Pkt_offset 0xC0
+#define GTx64Pkt_WIDTH 32
+#define GTx65to127Pkt_offset 0xC4
+#define GTx65to127Pkt_WIDTH 32
+#define GTx128to255Pkt_offset 0xC8
+#define GTx128to255Pkt_WIDTH 32
+#define GTx256to511Pkt_offset 0xCC
+#define GTx256to511Pkt_WIDTH 32
+#define GTx512to1023Pkt_offset 0xD0
+#define GTx512to1023Pkt_WIDTH 32
+#define GTx1024to15xxPkt_offset 0xD4
+#define GTx1024to15xxPkt_WIDTH 32
+#define GTx15xxtoJumboPkt_offset 0xD8
+#define GTx15xxtoJumboPkt_WIDTH 32
+#define GTxGtJumboPkt_offset 0xDC
+#define GTxGtJumboPkt_WIDTH 32
+#define GTxNonTcpUdpPkt_offset 0xE0
+#define GTxNonTcpUdpPkt_WIDTH 16
+#define GTxMacSrcErrPkt_offset 0xE4
+#define GTxMacSrcErrPkt_WIDTH 16
+#define GTxIpSrcErrPkt_offset 0xE8
+#define GTxIpSrcErrPkt_WIDTH 16
+#define GDmaDone_offset 0xEC
+#define GDmaDone_WIDTH 32
+
+#define XgRxOctets_offset 0x0
+#define XgRxOctets_WIDTH 48
+#define XgRxOctetsOK_offset 0x8
+#define XgRxOctetsOK_WIDTH 48
+#define XgRxPkts_offset 0x10
+#define XgRxPkts_WIDTH 32
+#define XgRxPktsOK_offset 0x14
+#define XgRxPktsOK_WIDTH 32
+#define XgRxBroadcastPkts_offset 0x18
+#define XgRxBroadcastPkts_WIDTH 32
+#define XgRxMulticastPkts_offset 0x1C
+#define XgRxMulticastPkts_WIDTH 32
+#define XgRxUnicastPkts_offset 0x20
+#define XgRxUnicastPkts_WIDTH 32
+#define XgRxUndersizePkts_offset 0x24
+#define XgRxUndersizePkts_WIDTH 32
+#define XgRxOversizePkts_offset 0x28
+#define XgRxOversizePkts_WIDTH 32
+#define XgRxJabberPkts_offset 0x2C
+#define XgRxJabberPkts_WIDTH 32
+#define XgRxUndersizeFCSerrorPkts_offset 0x30
+#define XgRxUndersizeFCSerrorPkts_WIDTH 32
+#define XgRxDropEvents_offset 0x34
+#define XgRxDropEvents_WIDTH 32
+#define XgRxFCSerrorPkts_offset 0x38
+#define XgRxFCSerrorPkts_WIDTH 32
+#define XgRxAlignError_offset 0x3C
+#define XgRxAlignError_WIDTH 32
+#define XgRxSymbolError_offset 0x40
+#define XgRxSymbolError_WIDTH 32
+#define XgRxInternalMACError_offset 0x44
+#define XgRxInternalMACError_WIDTH 32
+#define XgRxControlPkts_offset 0x48
+#define XgRxControlPkts_WIDTH 32
+#define XgRxPausePkts_offset 0x4C
+#define XgRxPausePkts_WIDTH 32
+#define XgRxPkts64Octets_offset 0x50
+#define XgRxPkts64Octets_WIDTH 32
+#define XgRxPkts65to127Octets_offset 0x54
+#define XgRxPkts65to127Octets_WIDTH 32
+#define XgRxPkts128to255Octets_offset 0x58
+#define XgRxPkts128to255Octets_WIDTH 32
+#define XgRxPkts256to511Octets_offset 0x5C
+#define XgRxPkts256to511Octets_WIDTH 32
+#define XgRxPkts512to1023Octets_offset 0x60
+#define XgRxPkts512to1023Octets_WIDTH 32
+#define XgRxPkts1024to15xxOctets_offset 0x64
+#define XgRxPkts1024to15xxOctets_WIDTH 32
+#define XgRxPkts15xxtoMaxOctets_offset 0x68
+#define XgRxPkts15xxtoMaxOctets_WIDTH 32
+#define XgRxLengthError_offset 0x6C
+#define XgRxLengthError_WIDTH 32
+#define XgTxPkts_offset 0x80
+#define XgTxPkts_WIDTH 32
+#define XgTxOctets_offset 0x88
+#define XgTxOctets_WIDTH 48
+#define XgTxMulticastPkts_offset 0x90
+#define XgTxMulticastPkts_WIDTH 32
+#define XgTxBroadcastPkts_offset 0x94
+#define XgTxBroadcastPkts_WIDTH 32
+#define XgTxUnicastPkts_offset 0x98
+#define XgTxUnicastPkts_WIDTH 32
+#define XgTxControlPkts_offset 0x9C
+#define XgTxControlPkts_WIDTH 32
+#define XgTxPausePkts_offset 0xA0
+#define XgTxPausePkts_WIDTH 32
+#define XgTxPkts64Octets_offset 0xA4
+#define XgTxPkts64Octets_WIDTH 32
+#define XgTxPkts65to127Octets_offset 0xA8
+#define XgTxPkts65to127Octets_WIDTH 32
+#define XgTxPkts128to255Octets_offset 0xAC
+#define XgTxPkts128to255Octets_WIDTH 32
+#define XgTxPkts256to511Octets_offset 0xB0
+#define XgTxPkts256to511Octets_WIDTH 32
+#define XgTxPkts512to1023Octets_offset 0xB4
+#define XgTxPkts512to1023Octets_WIDTH 32
+#define XgTxPkts1024to15xxOctets_offset 0xB8
+#define XgTxPkts1024to15xxOctets_WIDTH 32
+#define XgTxPkts1519toMaxOctets_offset 0xBC
+#define XgTxPkts1519toMaxOctets_WIDTH 32
+#define XgTxUndersizePkts_offset 0xC0
+#define XgTxUndersizePkts_WIDTH 32
+#define XgTxOversizePkts_offset 0xC4
+#define XgTxOversizePkts_WIDTH 32
+#define XgTxNonTcpUdpPkt_offset 0xC8
+#define XgTxNonTcpUdpPkt_WIDTH 16
+#define XgTxMacSrcErrPkt_offset 0xCC
+#define XgTxMacSrcErrPkt_WIDTH 16
+#define XgTxIpSrcErrPkt_offset 0xD0
+#define XgTxIpSrcErrPkt_WIDTH 16
+#define XgDmaDone_offset 0xD4
+#define XgDmaDone_WIDTH 32
+
+#define FALCON_STATS_NOT_DONE 0x00000000
+#define FALCON_STATS_DONE 0xffffffff
+
+/**************************************************************************
+ *
+ * Falcon non-volatile configuration
+ *
+ **************************************************************************
+ */
+
+/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
+struct falcon_nvconfig_board_v2 {
+	__le16 nports;
+	u8 port0_phy_addr;
+	u8 port0_phy_type;
+	u8 port1_phy_addr;
+	u8 port1_phy_type;
+	__le16 asic_sub_revision;
+	__le16 board_revision;
+} __packed;
+
+/* Board configuration v3 extra information */
+struct falcon_nvconfig_board_v3 {
+	__le32 spi_device_type[2];
+} __packed;
+
+/* Bit numbers for spi_device_type */
+#define SPI_DEV_TYPE_SIZE_LBN 0
+#define SPI_DEV_TYPE_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
+#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
+#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
+#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
+#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
+#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
+#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_FIELD(type, field)					\
+	(((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
+
+#define FALCON_NVCONFIG_OFFSET 0x300
+
+#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
+struct falcon_nvconfig {
+	efx_oword_t ee_vpd_cfg_reg;			/* 0x300 */
+	u8 mac_address[2][8];			/* 0x310 */
+	efx_oword_t pcie_sd_ctl0123_reg;		/* 0x320 */
+	efx_oword_t pcie_sd_ctl45_reg;			/* 0x330 */
+	efx_oword_t pcie_pcs_ctl_stat_reg;		/* 0x340 */
+	efx_oword_t hw_init_reg;			/* 0x350 */
+	efx_oword_t nic_stat_reg;			/* 0x360 */
+	efx_oword_t glb_ctl_reg;			/* 0x370 */
+	efx_oword_t srm_cfg_reg;			/* 0x380 */
+	efx_oword_t spare_reg;				/* 0x390 */
+	__le16 board_magic_num;			/* 0x3A0 */
+	__le16 board_struct_ver;
+	__le16 board_checksum;
+	struct falcon_nvconfig_board_v2 board_v2;
+	efx_oword_t ee_base_page_reg;			/* 0x3B0 */
+	struct falcon_nvconfig_board_v3 board_v3;	/* 0x3C0 */
+} __packed;
+
+#endif /* EFX_REGS_H */
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 98bff5ada09a..a97c923b560c 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2008 Solarflare Communications Inc.
+ * Copyright 2005-2009 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -16,9 +16,8 @@
 #include <net/ip.h>
 #include <net/checksum.h>
 #include "net_driver.h"
-#include "rx.h"
 #include "efx.h"
-#include "falcon.h"
+#include "nic.h"
 #include "selftest.h"
 #include "workarounds.h"
 
@@ -61,7 +60,7 @@
  *   rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ?
  *                      RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
  */
-static int rx_alloc_method = RX_ALLOC_METHOD_PAGE;
+static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
 
 #define RX_ALLOC_LEVEL_LRO 0x2000
 #define RX_ALLOC_LEVEL_MAX 0x3000
@@ -293,8 +292,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
 	 * fill anyway.
 	 */
 	fill_level = (rx_queue->added_count - rx_queue->removed_count);
-	EFX_BUG_ON_PARANOID(fill_level >
-			    rx_queue->efx->type->rxd_ring_mask + 1);
+	EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
 
 	/* Don't fill if we don't need to */
 	if (fill_level >= rx_queue->fast_fill_trigger)
@@ -316,8 +314,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
  retry:
 	/* Recalculate current fill level now that we have the lock */
 	fill_level = (rx_queue->added_count - rx_queue->removed_count);
-	EFX_BUG_ON_PARANOID(fill_level >
-			    rx_queue->efx->type->rxd_ring_mask + 1);
+	EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
 	space = rx_queue->fast_fill_limit - fill_level;
 	if (space < EFX_RX_BATCH)
 		goto out_unlock;
@@ -329,8 +326,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
 
 	do {
 		for (i = 0; i < EFX_RX_BATCH; ++i) {
-			index = (rx_queue->added_count &
-				 rx_queue->efx->type->rxd_ring_mask);
+			index = rx_queue->added_count & EFX_RXQ_MASK;
 			rx_buf = efx_rx_buffer(rx_queue, index);
 			rc = efx_init_rx_buffer(rx_queue, rx_buf);
 			if (unlikely(rc))
@@ -345,7 +341,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
 
  out:
 	/* Send write pointer to card. */
-	falcon_notify_rx_desc(rx_queue);
+	efx_nic_notify_rx_desc(rx_queue);
 
 	/* If the fast fill is running inside from the refill tasklet, then
 	 * for SMP systems it may be running on a different CPU to
@@ -448,17 +444,23 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
 			      bool checksummed)
 {
 	struct napi_struct *napi = &channel->napi_str;
+	gro_result_t gro_result;
 
 	/* Pass the skb/page into the LRO engine */
 	if (rx_buf->page) {
-		struct sk_buff *skb = napi_get_frags(napi);
+		struct page *page = rx_buf->page;
+		struct sk_buff *skb;
 
+		EFX_BUG_ON_PARANOID(rx_buf->skb);
+		rx_buf->page = NULL;
+
+		skb = napi_get_frags(napi);
 		if (!skb) {
-			put_page(rx_buf->page);
-			goto out;
+			put_page(page);
+			return;
 		}
 
-		skb_shinfo(skb)->frags[0].page = rx_buf->page;
+		skb_shinfo(skb)->frags[0].page = page;
 		skb_shinfo(skb)->frags[0].page_offset =
 			efx_rx_buf_offset(rx_buf);
 		skb_shinfo(skb)->frags[0].size = rx_buf->len;
@@ -470,17 +472,24 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
 		skb->ip_summed =
 			checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
 
-		napi_gro_frags(napi);
+		skb_record_rx_queue(skb, channel->channel);
 
-out:
-		EFX_BUG_ON_PARANOID(rx_buf->skb);
-		rx_buf->page = NULL;
+		gro_result = napi_gro_frags(napi);
 	} else {
-		EFX_BUG_ON_PARANOID(!rx_buf->skb);
-		EFX_BUG_ON_PARANOID(!checksummed);
+		struct sk_buff *skb = rx_buf->skb;
 
-		napi_gro_receive(napi, rx_buf->skb);
+		EFX_BUG_ON_PARANOID(!skb);
+		EFX_BUG_ON_PARANOID(!checksummed);
 		rx_buf->skb = NULL;
+
+		gro_result = napi_gro_receive(napi, skb);
+	}
+
+	if (gro_result == GRO_NORMAL) {
+		channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
+	} else if (gro_result != GRO_DROP) {
+		channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
+		channel->irq_mod_score += 2;
 	}
 }
 
@@ -558,7 +567,7 @@ void __efx_rx_packet(struct efx_channel *channel,
 	if (unlikely(efx->loopback_selftest)) {
 		efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len);
 		efx_free_rx_buffer(efx, rx_buf);
-		goto done;
+		return;
 	}
 
 	if (rx_buf->skb) {
@@ -570,34 +579,28 @@ void __efx_rx_packet(struct efx_channel *channel,
 		 * at the ethernet header */
 		rx_buf->skb->protocol = eth_type_trans(rx_buf->skb,
 						       efx->net_dev);
+
+		skb_record_rx_queue(rx_buf->skb, channel->channel);
 	}
 
 	if (likely(checksummed || rx_buf->page)) {
 		efx_rx_packet_lro(channel, rx_buf, checksummed);
-		goto done;
+		return;
 	}
 
 	/* We now own the SKB */
 	skb = rx_buf->skb;
 	rx_buf->skb = NULL;
-
-	EFX_BUG_ON_PARANOID(rx_buf->page);
-	EFX_BUG_ON_PARANOID(rx_buf->skb);
 	EFX_BUG_ON_PARANOID(!skb);
 
 	/* Set the SKB flags */
 	skb->ip_summed = CHECKSUM_NONE;
 
-	skb_record_rx_queue(skb, channel->channel);
-
 	/* Pass the packet up */
 	netif_receive_skb(skb);
 
 	/* Update allocation strategy method */
 	channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
-
-done:
-	;
 }
 
 void efx_rx_strategy(struct efx_channel *channel)
@@ -632,12 +635,12 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
 	EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue);
 
 	/* Allocate RX buffers */
-	rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer);
+	rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer);
 	rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
 	if (!rx_queue->buffer)
 		return -ENOMEM;
 
-	rc = falcon_probe_rx(rx_queue);
+	rc = efx_nic_probe_rx(rx_queue);
 	if (rc) {
 		kfree(rx_queue->buffer);
 		rx_queue->buffer = NULL;
@@ -647,7 +650,6 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
 
 void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
 {
-	struct efx_nic *efx = rx_queue->efx;
 	unsigned int max_fill, trigger, limit;
 
 	EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue);
@@ -660,7 +662,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
 	rx_queue->min_overfill = -1U;
 
 	/* Initialise limit fields */
-	max_fill = efx->type->rxd_ring_mask + 1 - EFX_RXD_HEAD_ROOM;
+	max_fill = EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM;
 	trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
 	limit = max_fill * min(rx_refill_limit, 100U) / 100U;
 
@@ -669,7 +671,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
 	rx_queue->fast_fill_limit = limit;
 
 	/* Set up RX descriptor ring */
-	falcon_init_rx(rx_queue);
+	efx_nic_init_rx(rx_queue);
 }
 
 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
@@ -679,11 +681,11 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
 
 	EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue);
 
-	falcon_fini_rx(rx_queue);
+	efx_nic_fini_rx(rx_queue);
 
 	/* Release RX buffers NB start at index 0 not current HW ptr */
 	if (rx_queue->buffer) {
-		for (i = 0; i <= rx_queue->efx->type->rxd_ring_mask; i++) {
+		for (i = 0; i <= EFX_RXQ_MASK; i++) {
 			rx_buf = efx_rx_buffer(rx_queue, i);
 			efx_fini_rx_buffer(rx_queue, rx_buf);
 		}
@@ -704,7 +706,7 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
 {
 	EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue);
 
-	falcon_remove_rx(rx_queue);
+	efx_nic_remove_rx(rx_queue);
 
 	kfree(rx_queue->buffer);
 	rx_queue->buffer = NULL;
diff --git a/drivers/net/sfc/rx.h b/drivers/net/sfc/rx.h
deleted file mode 100644
index 42ee7555a80b..000000000000
--- a/drivers/net/sfc/rx.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#ifndef EFX_RX_H
-#define EFX_RX_H
-
-#include "net_driver.h"
-
-int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
-
-void efx_rx_strategy(struct efx_channel *channel);
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
-void efx_rx_work(struct work_struct *data);
-void __efx_rx_packet(struct efx_channel *channel,
-		     struct efx_rx_buffer *rx_buf, bool checksummed);
-
-#endif /* EFX_RX_H */
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 817c7efc11e0..14949bb303a0 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2008 Solarflare Communications Inc.
+ * Copyright 2006-2009 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -20,14 +20,12 @@
 #include <linux/rtnetlink.h>
 #include <asm/io.h>
 #include "net_driver.h"
-#include "ethtool.h"
 #include "efx.h"
-#include "falcon.h"
+#include "nic.h"
 #include "selftest.h"
-#include "boards.h"
 #include "workarounds.h"
 #include "spi.h"
-#include "falcon_io.h"
+#include "io.h"
 #include "mdio_10g.h"
 
 /*
@@ -57,6 +55,7 @@ static const char *payload_msg =
  * @flush:		Drop all packets in efx_loopback_rx_packet
  * @packet_count:	Number of packets being used in this test
  * @skbs:		An array of skbs transmitted
+ * @offload_csum:	Checksums are being offloaded
  * @rx_good:		RX good packet count
  * @rx_bad:		RX bad packet count
  * @payload:		Payload used in tests
@@ -65,10 +64,7 @@ struct efx_loopback_state {
 	bool flush;
 	int packet_count;
 	struct sk_buff **skbs;
-
-	/* Checksums are being offloaded */
 	bool offload_csum;
-
 	atomic_t rx_good;
 	atomic_t rx_bad;
 	struct efx_loopback_payload payload;
@@ -104,7 +100,7 @@ static int efx_test_mdio(struct efx_nic *efx, struct efx_self_tests *tests)
 	}
 
 	if (EFX_IS10G(efx)) {
-		rc = efx_mdio_check_mmds(efx, efx->phy_op->mmds, 0);
+		rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0);
 		if (rc)
 			goto out;
 	}
@@ -117,23 +113,26 @@ out:
 
 static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
 {
-	int rc;
+	int rc = 0;
+
+	if (efx->type->test_nvram) {
+		rc = efx->type->test_nvram(efx);
+		tests->nvram = rc ? -1 : 1;
+	}
 
-	rc = falcon_read_nvram(efx, NULL);
-	tests->nvram = rc ? -1 : 1;
 	return rc;
 }
 
 static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
 {
-	int rc;
+	int rc = 0;
 
-	/* Not supported on A-series silicon */
-	if (falcon_rev(efx) < FALCON_REV_B0)
-		return 0;
+	/* Test register access */
+	if (efx->type->test_registers) {
+		rc = efx->type->test_registers(efx);
+		tests->registers = rc ? -1 : 1;
+	}
 
-	rc = falcon_test_registers(efx);
-	tests->registers = rc ? -1 : 1;
 	return rc;
 }
 
@@ -165,7 +164,7 @@ static int efx_test_interrupts(struct efx_nic *efx,
 			goto success;
 	}
 
-	falcon_generate_interrupt(efx);
+	efx_nic_generate_interrupt(efx);
 
 	/* Wait for arrival of test interrupt. */
 	EFX_LOG(efx, "waiting for test interrupt\n");
@@ -177,8 +176,8 @@ static int efx_test_interrupts(struct efx_nic *efx,
 	return -ETIMEDOUT;
 
  success:
-	EFX_LOG(efx, "test interrupt (mode %d) seen on CPU%d\n",
-		efx->interrupt_mode, efx->last_irq_cpu);
+	EFX_LOG(efx, "%s test interrupt seen on CPU%d\n", INT_MODE(efx),
+		efx->last_irq_cpu);
 	tests->interrupt = 1;
 	return 0;
 }
@@ -203,7 +202,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
 	channel->eventq_magic = 0;
 	smp_wmb();
 
-	falcon_generate_test_event(channel, magic);
+	efx_nic_generate_test_event(channel, magic);
 
 	/* Wait for arrival of interrupt */
 	count = 0;
@@ -254,9 +253,6 @@ static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
 	if (!efx->phy_op->run_tests)
 		return 0;
 
-	EFX_BUG_ON_PARANOID(efx->phy_op->num_tests == 0 ||
-			    efx->phy_op->num_tests > EFX_MAX_PHY_TESTS);
-
 	mutex_lock(&efx->mac_lock);
 	rc = efx->phy_op->run_tests(efx, tests->phy, flags);
 	mutex_unlock(&efx->mac_lock);
@@ -426,7 +422,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
 
 		if (efx_dev_registered(efx))
 			netif_tx_lock_bh(efx->net_dev);
-		rc = efx_xmit(efx, tx_queue, skb);
+		rc = efx_enqueue_skb(tx_queue, skb);
 		if (efx_dev_registered(efx))
 			netif_tx_unlock_bh(efx->net_dev);
 
@@ -439,7 +435,6 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
 			kfree_skb(skb);
 			return -EPIPE;
 		}
-		efx->net_dev->trans_start = jiffies;
 	}
 
 	return 0;
@@ -527,7 +522,7 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
 
 	for (i = 0; i < 3; i++) {
 		/* Determine how many packets to send */
-		state->packet_count = (efx->type->txd_ring_mask + 1) / 3;
+		state->packet_count = EFX_TXQ_SIZE / 3;
 		state->packet_count = min(1 << (i << 2), state->packet_count);
 		state->skbs = kzalloc(sizeof(state->skbs[0]) *
 				      state->packet_count, GFP_KERNEL);
@@ -568,14 +563,49 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
 	return 0;
 }
 
+/* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but
+ * any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it
+ * to delay and retry. Therefore, it's safer to just poll directly. Wait
+ * for link up and any faults to dissipate. */
+static int efx_wait_for_link(struct efx_nic *efx)
+{
+	struct efx_link_state *link_state = &efx->link_state;
+	int count;
+	bool link_up;
+
+	for (count = 0; count < 40; count++) {
+		schedule_timeout_uninterruptible(HZ / 10);
+
+		if (efx->type->monitor != NULL) {
+			mutex_lock(&efx->mac_lock);
+			efx->type->monitor(efx);
+			mutex_unlock(&efx->mac_lock);
+		} else {
+			struct efx_channel *channel = &efx->channel[0];
+			if (channel->work_pending)
+				efx_process_channel_now(channel);
+		}
+
+		mutex_lock(&efx->mac_lock);
+		link_up = link_state->up;
+		if (link_up)
+			link_up = !efx->mac_op->check_fault(efx);
+		mutex_unlock(&efx->mac_lock);
+
+		if (link_up)
+			return 0;
+	}
+
+	return -ETIMEDOUT;
+}
+
 static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
 			      unsigned int loopback_modes)
 {
 	enum efx_loopback_mode mode;
 	struct efx_loopback_state *state;
 	struct efx_tx_queue *tx_queue;
-	bool link_up;
-	int count, rc = 0;
+	int rc = 0;
 
 	/* Set the port loopback_selftest member. From this point on
 	 * all received packets will be dropped. Mark the state as
@@ -594,46 +624,23 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
 
 		/* Move the port into the specified loopback mode. */
 		state->flush = true;
+		mutex_lock(&efx->mac_lock);
 		efx->loopback_mode = mode;
-		efx_reconfigure_port(efx);
-
-		/* Wait for the PHY to signal the link is up. Interrupts
-		 * are enabled for PHY's using LASI, otherwise we poll()
-		 * quickly */
-		count = 0;
-		do {
-			struct efx_channel *channel = &efx->channel[0];
+		rc = __efx_reconfigure_port(efx);
+		mutex_unlock(&efx->mac_lock);
+		if (rc) {
+			EFX_ERR(efx, "unable to move into %s loopback\n",
+				LOOPBACK_MODE(efx));
+			goto out;
+		}
 
-			efx->phy_op->poll(efx);
-			schedule_timeout_uninterruptible(HZ / 10);
-			if (channel->work_pending)
-				efx_process_channel_now(channel);
-			/* Wait for PHY events to be processed */
-			flush_workqueue(efx->workqueue);
-			rmb();
-
-			/* We need both the phy and xaui links to be ok.
-			 * rather than relying on the falcon_xmac irq/poll
-			 * regime, just poll xaui directly */
-			link_up = efx->link_up;
-			if (link_up && EFX_IS10G(efx) &&
-			    !falcon_xaui_link_ok(efx))
-				link_up = false;
-
-		} while ((++count < 20) && !link_up);
-
-		/* The link should now be up. If it isn't, there is no point
-		 * in attempting a loopback test */
-		if (!link_up) {
+		rc = efx_wait_for_link(efx);
+		if (rc) {
 			EFX_ERR(efx, "loopback %s never came up\n",
 				LOOPBACK_MODE(efx));
-			rc = -EIO;
 			goto out;
 		}
 
-		EFX_LOG(efx, "link came up in %s loopback in %d iterations\n",
-			LOOPBACK_MODE(efx), count);
-
 		/* Test every TX queue */
 		efx_for_each_tx_queue(tx_queue, efx) {
 			state->offload_csum = (tx_queue->queue ==
@@ -667,7 +674,6 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
 	enum efx_loopback_mode loopback_mode = efx->loopback_mode;
 	int phy_mode = efx->phy_mode;
 	enum reset_type reset_method = RESET_TYPE_INVISIBLE;
-	struct ethtool_cmd ecmd;
 	struct efx_channel *channel;
 	int rc_test = 0, rc_reset = 0, rc;
 
@@ -720,21 +726,21 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
 	mutex_unlock(&efx->mac_lock);
 
 	/* free up all consumers of SRAM (including all the queues) */
-	efx_reset_down(efx, reset_method, &ecmd);
+	efx_reset_down(efx, reset_method);
 
 	rc = efx_test_chip(efx, tests);
 	if (rc && !rc_test)
 		rc_test = rc;
 
 	/* reset the chip to recover from the register test */
-	rc_reset = falcon_reset_hw(efx, reset_method);
+	rc_reset = efx->type->reset(efx, reset_method);
 
 	/* Ensure that the phy is powered and out of loopback
 	 * for the bist and loopback tests */
 	efx->phy_mode &= ~PHY_MODE_LOW_POWER;
 	efx->loopback_mode = LOOPBACK_NONE;
 
-	rc = efx_reset_up(efx, reset_method, &ecmd, rc_reset == 0);
+	rc = efx_reset_up(efx, reset_method, rc_reset == 0);
 	if (rc && !rc_reset)
 		rc_reset = rc;
 
@@ -753,10 +759,12 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
 		rc_test = rc;
 
 	/* restore the PHY to the previous state */
-	efx->loopback_mode = loopback_mode;
+	mutex_lock(&efx->mac_lock);
 	efx->phy_mode = phy_mode;
 	efx->port_inhibited = false;
-	efx_ethtool_set_settings(efx->net_dev, &ecmd);
+	efx->loopback_mode = loopback_mode;
+	__efx_reconfigure_port(efx);
+	mutex_unlock(&efx->mac_lock);
 
 	return rc_test;
 }
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
deleted file mode 100644
index 49eb91b5f50c..000000000000
--- a/drivers/net/sfc/sfe4001.c
+++ /dev/null
@@ -1,435 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2008 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-/*****************************************************************************
- * Support for the SFE4001 and SFN4111T NICs.
- *
- * The SFE4001 does not power-up fully at reset due to its high power
- * consumption.  We control its power via a PCA9539 I/O expander.
- * Both boards have a MAX6647 temperature monitor which we expose to
- * the lm90 driver.
- *
- * This also provides minimal support for reflashing the PHY, which is
- * initiated by resetting it with the FLASH_CFG_1 pin pulled down.
- * On SFE4001 rev A2 and later this is connected to the 3V3X output of
- * the IO-expander; on the SFN4111T it is connected to Falcon's GPIO3.
- * We represent reflash mode as PHY_MODE_SPECIAL and make it mutually
- * exclusive with the network device being open.
- */
-
-#include <linux/delay.h>
-#include <linux/rtnetlink.h>
-#include "net_driver.h"
-#include "efx.h"
-#include "phy.h"
-#include "boards.h"
-#include "falcon.h"
-#include "falcon_hwdefs.h"
-#include "falcon_io.h"
-#include "mac.h"
-#include "workarounds.h"
-
-/**************************************************************************
- *
- * I2C IO Expander device
- *
- **************************************************************************/
-#define	PCA9539 0x74
-
-#define	P0_IN 0x00
-#define	P0_OUT 0x02
-#define	P0_INVERT 0x04
-#define	P0_CONFIG 0x06
-
-#define	P0_EN_1V0X_LBN 0
-#define	P0_EN_1V0X_WIDTH 1
-#define	P0_EN_1V2_LBN 1
-#define	P0_EN_1V2_WIDTH 1
-#define	P0_EN_2V5_LBN 2
-#define	P0_EN_2V5_WIDTH 1
-#define	P0_EN_3V3X_LBN 3
-#define	P0_EN_3V3X_WIDTH 1
-#define	P0_EN_5V_LBN 4
-#define	P0_EN_5V_WIDTH 1
-#define	P0_SHORTEN_JTAG_LBN 5
-#define	P0_SHORTEN_JTAG_WIDTH 1
-#define	P0_X_TRST_LBN 6
-#define	P0_X_TRST_WIDTH 1
-#define	P0_DSP_RESET_LBN 7
-#define	P0_DSP_RESET_WIDTH 1
-
-#define	P1_IN 0x01
-#define	P1_OUT 0x03
-#define	P1_INVERT 0x05
-#define	P1_CONFIG 0x07
-
-#define	P1_AFE_PWD_LBN 0
-#define	P1_AFE_PWD_WIDTH 1
-#define	P1_DSP_PWD25_LBN 1
-#define	P1_DSP_PWD25_WIDTH 1
-#define	P1_RESERVED_LBN 2
-#define	P1_RESERVED_WIDTH 2
-#define	P1_SPARE_LBN 4
-#define	P1_SPARE_WIDTH 4
-
-/* Temperature Sensor */
-#define MAX664X_REG_RSL		0x02
-#define MAX664X_REG_WLHO	0x0B
-
-static void sfe4001_poweroff(struct efx_nic *efx)
-{
-	struct i2c_client *ioexp_client = efx->board_info.ioexp_client;
-	struct i2c_client *hwmon_client = efx->board_info.hwmon_client;
-
-	/* Turn off all power rails and disable outputs */
-	i2c_smbus_write_byte_data(ioexp_client, P0_OUT, 0xff);
-	i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG, 0xff);
-	i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0xff);
-
-	/* Clear any over-temperature alert */
-	i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
-}
-
-static int sfe4001_poweron(struct efx_nic *efx)
-{
-	struct i2c_client *hwmon_client = efx->board_info.hwmon_client;
-	struct i2c_client *ioexp_client = efx->board_info.ioexp_client;
-	unsigned int i, j;
-	int rc;
-	u8 out;
-
-	/* Clear any previous over-temperature alert */
-	rc = i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
-	if (rc < 0)
-		return rc;
-
-	/* Enable port 0 and port 1 outputs on IO expander */
-	rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00);
-	if (rc)
-		return rc;
-	rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG,
-				       0xff & ~(1 << P1_SPARE_LBN));
-	if (rc)
-		goto fail_on;
-
-	/* If PHY power is on, turn it all off and wait 1 second to
-	 * ensure a full reset.
-	 */
-	rc = i2c_smbus_read_byte_data(ioexp_client, P0_OUT);
-	if (rc < 0)
-		goto fail_on;
-	out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
-		       (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
-		       (0 << P0_EN_1V0X_LBN));
-	if (rc != out) {
-		EFX_INFO(efx, "power-cycling PHY\n");
-		rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
-		if (rc)
-			goto fail_on;
-		schedule_timeout_uninterruptible(HZ);
-	}
-
-	for (i = 0; i < 20; ++i) {
-		/* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */
-		out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
-			       (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
-			       (1 << P0_X_TRST_LBN));
-		if (efx->phy_mode & PHY_MODE_SPECIAL)
-			out |= 1 << P0_EN_3V3X_LBN;
-
-		rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
-		if (rc)
-			goto fail_on;
-		msleep(10);
-
-		/* Turn on 1V power rail */
-		out &= ~(1 << P0_EN_1V0X_LBN);
-		rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
-		if (rc)
-			goto fail_on;
-
-		EFX_INFO(efx, "waiting for DSP boot (attempt %d)...\n", i);
-
-		/* In flash config mode, DSP does not turn on AFE, so
-		 * just wait 1 second.
-		 */
-		if (efx->phy_mode & PHY_MODE_SPECIAL) {
-			schedule_timeout_uninterruptible(HZ);
-			return 0;
-		}
-
-		for (j = 0; j < 10; ++j) {
-			msleep(100);
-
-			/* Check DSP has asserted AFE power line */
-			rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN);
-			if (rc < 0)
-				goto fail_on;
-			if (rc & (1 << P1_AFE_PWD_LBN))
-				return 0;
-		}
-	}
-
-	EFX_INFO(efx, "timed out waiting for DSP boot\n");
-	rc = -ETIMEDOUT;
-fail_on:
-	sfe4001_poweroff(efx);
-	return rc;
-}
-
-static int sfn4111t_reset(struct efx_nic *efx)
-{
-	efx_oword_t reg;
-
-	/* GPIO 3 and the GPIO register are shared with I2C, so block that */
-	i2c_lock_adapter(&efx->i2c_adap);
-
-	/* Pull RST_N (GPIO 2) low then let it up again, setting the
-	 * FLASH_CFG_1 strap (GPIO 3) appropriately.  Only change the
-	 * output enables; the output levels should always be 0 (low)
-	 * and we rely on external pull-ups. */
-	falcon_read(efx, &reg, GPIO_CTL_REG_KER);
-	EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, true);
-	falcon_write(efx, &reg, GPIO_CTL_REG_KER);
-	msleep(1000);
-	EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, false);
-	EFX_SET_OWORD_FIELD(reg, GPIO3_OEN,
-			    !!(efx->phy_mode & PHY_MODE_SPECIAL));
-	falcon_write(efx, &reg, GPIO_CTL_REG_KER);
-	msleep(1);
-
-	i2c_unlock_adapter(&efx->i2c_adap);
-
-	ssleep(1);
-	return 0;
-}
-
-static ssize_t show_phy_flash_cfg(struct device *dev,
-				  struct device_attribute *attr, char *buf)
-{
-	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
-	return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL));
-}
-
-static ssize_t set_phy_flash_cfg(struct device *dev,
-				 struct device_attribute *attr,
-				 const char *buf, size_t count)
-{
-	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
-	enum efx_phy_mode old_mode, new_mode;
-	int err;
-
-	rtnl_lock();
-	old_mode = efx->phy_mode;
-	if (count == 0 || *buf == '0')
-		new_mode = old_mode & ~PHY_MODE_SPECIAL;
-	else
-		new_mode = PHY_MODE_SPECIAL;
-	if (old_mode == new_mode) {
-		err = 0;
-	} else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
-		err = -EBUSY;
-	} else {
-		/* Reset the PHY, reconfigure the MAC and enable/disable
-		 * MAC stats accordingly. */
-		efx->phy_mode = new_mode;
-		if (new_mode & PHY_MODE_SPECIAL)
-			efx_stats_disable(efx);
-		if (efx->board_info.type == EFX_BOARD_SFE4001)
-			err = sfe4001_poweron(efx);
-		else
-			err = sfn4111t_reset(efx);
-		efx_reconfigure_port(efx);
-		if (!(new_mode & PHY_MODE_SPECIAL))
-			efx_stats_enable(efx);
-	}
-	rtnl_unlock();
-
-	return err ? err : count;
-}
-
-static DEVICE_ATTR(phy_flash_cfg, 0644, show_phy_flash_cfg, set_phy_flash_cfg);
-
-static void sfe4001_fini(struct efx_nic *efx)
-{
-	EFX_INFO(efx, "%s\n", __func__);
-
-	device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
-	sfe4001_poweroff(efx);
-	i2c_unregister_device(efx->board_info.ioexp_client);
-	i2c_unregister_device(efx->board_info.hwmon_client);
-}
-
-static int sfe4001_check_hw(struct efx_nic *efx)
-{
-	s32 status;
-
-	/* If XAUI link is up then do not monitor */
-	if (EFX_WORKAROUND_7884(efx) && efx->mac_up)
-		return 0;
-
-	/* Check the powered status of the PHY. Lack of power implies that
-	 * the MAX6647 has shut down power to it, probably due to a temp.
-	 * alarm. Reading the power status rather than the MAX6647 status
-	 * directly because the later is read-to-clear and would thus
-	 * start to power up the PHY again when polled, causing us to blip
-	 * the power undesirably.
-	 * We know we can read from the IO expander because we did
-	 * it during power-on. Assume failure now is bad news. */
-	status = i2c_smbus_read_byte_data(efx->board_info.ioexp_client, P1_IN);
-	if (status >= 0 &&
-	    (status & ((1 << P1_AFE_PWD_LBN) | (1 << P1_DSP_PWD25_LBN))) != 0)
-		return 0;
-
-	/* Use board power control, not PHY power control */
-	sfe4001_poweroff(efx);
-	efx->phy_mode = PHY_MODE_OFF;
-
-	return (status < 0) ? -EIO : -ERANGE;
-}
-
-static struct i2c_board_info sfe4001_hwmon_info = {
-	I2C_BOARD_INFO("max6647", 0x4e),
-};
-
-/* This board uses an I2C expander to provider power to the PHY, which needs to
- * be turned on before the PHY can be used.
- * Context: Process context, rtnl lock held
- */
-int sfe4001_init(struct efx_nic *efx)
-{
-	int rc;
-
-#if defined(CONFIG_SENSORS_LM90) || defined(CONFIG_SENSORS_LM90_MODULE)
-	efx->board_info.hwmon_client =
-		i2c_new_device(&efx->i2c_adap, &sfe4001_hwmon_info);
-#else
-	efx->board_info.hwmon_client =
-		i2c_new_dummy(&efx->i2c_adap, sfe4001_hwmon_info.addr);
-#endif
-	if (!efx->board_info.hwmon_client)
-		return -EIO;
-
-	/* Raise board/PHY high limit from 85 to 90 degrees Celsius */
-	rc = i2c_smbus_write_byte_data(efx->board_info.hwmon_client,
-				       MAX664X_REG_WLHO, 90);
-	if (rc)
-		goto fail_hwmon;
-
-	efx->board_info.ioexp_client = i2c_new_dummy(&efx->i2c_adap, PCA9539);
-	if (!efx->board_info.ioexp_client) {
-		rc = -EIO;
-		goto fail_hwmon;
-	}
-
-	/* 10Xpress has fixed-function LED pins, so there is no board-specific
-	 * blink code. */
-	efx->board_info.blink = tenxpress_phy_blink;
-
-	efx->board_info.monitor = sfe4001_check_hw;
-	efx->board_info.fini = sfe4001_fini;
-
-	if (efx->phy_mode & PHY_MODE_SPECIAL) {
-		/* PHY won't generate a 156.25 MHz clock and MAC stats fetch
-		 * will fail. */
-		efx_stats_disable(efx);
-	}
-	rc = sfe4001_poweron(efx);
-	if (rc)
-		goto fail_ioexp;
-
-	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
-	if (rc)
-		goto fail_on;
-
-	EFX_INFO(efx, "PHY is powered on\n");
-	return 0;
-
-fail_on:
-	sfe4001_poweroff(efx);
-fail_ioexp:
-	i2c_unregister_device(efx->board_info.ioexp_client);
-fail_hwmon:
-	i2c_unregister_device(efx->board_info.hwmon_client);
-	return rc;
-}
-
-static int sfn4111t_check_hw(struct efx_nic *efx)
-{
-	s32 status;
-
-	/* If XAUI link is up then do not monitor */
-	if (EFX_WORKAROUND_7884(efx) && efx->mac_up)
-		return 0;
-
-	/* Test LHIGH, RHIGH, FAULT, EOT and IOT alarms */
-	status = i2c_smbus_read_byte_data(efx->board_info.hwmon_client,
-					  MAX664X_REG_RSL);
-	if (status < 0)
-		return -EIO;
-	if (status & 0x57)
-		return -ERANGE;
-	return 0;
-}
-
-static void sfn4111t_fini(struct efx_nic *efx)
-{
-	EFX_INFO(efx, "%s\n", __func__);
-
-	device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
-	i2c_unregister_device(efx->board_info.hwmon_client);
-}
-
-static struct i2c_board_info sfn4111t_a0_hwmon_info = {
-	I2C_BOARD_INFO("max6647", 0x4e),
-};
-
-static struct i2c_board_info sfn4111t_r5_hwmon_info = {
-	I2C_BOARD_INFO("max6646", 0x4d),
-};
-
-int sfn4111t_init(struct efx_nic *efx)
-{
-	int i = 0;
-	int rc;
-
-	efx->board_info.hwmon_client =
-		i2c_new_device(&efx->i2c_adap,
-			       (efx->board_info.minor < 5) ?
-			       &sfn4111t_a0_hwmon_info :
-			       &sfn4111t_r5_hwmon_info);
-	if (!efx->board_info.hwmon_client)
-		return -EIO;
-
-	efx->board_info.blink = tenxpress_phy_blink;
-	efx->board_info.monitor = sfn4111t_check_hw;
-	efx->board_info.fini = sfn4111t_fini;
-
-	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
-	if (rc)
-		goto fail_hwmon;
-
-	do {
-		if (efx->phy_mode & PHY_MODE_SPECIAL) {
-			/* PHY may not generate a 156.25 MHz clock and MAC
-			 * stats fetch will fail. */
-			efx_stats_disable(efx);
-			sfn4111t_reset(efx);
-		}
-		rc = sft9001_wait_boot(efx);
-		if (rc == 0)
-			return 0;
-		efx->phy_mode = PHY_MODE_SPECIAL;
-	} while (rc == -EINVAL && ++i < 2);
-
-	device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
-fail_hwmon:
-	i2c_unregister_device(efx->board_info.hwmon_client);
-	return rc;
-}
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
new file mode 100644
index 000000000000..de07a4f031b2
--- /dev/null
+++ b/drivers/net/sfc/siena.c
@@ -0,0 +1,604 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2009 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include "net_driver.h"
+#include "bitfield.h"
+#include "efx.h"
+#include "nic.h"
+#include "mac.h"
+#include "spi.h"
+#include "regs.h"
+#include "io.h"
+#include "phy.h"
+#include "workarounds.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+
+/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
+
+static void siena_init_wol(struct efx_nic *efx);
+
+
+static void siena_push_irq_moderation(struct efx_channel *channel)
+{
+	efx_dword_t timer_cmd;
+
+	if (channel->irq_moderation)
+		EFX_POPULATE_DWORD_2(timer_cmd,
+				     FRF_CZ_TC_TIMER_MODE,
+				     FFE_CZ_TIMER_MODE_INT_HLDOFF,
+				     FRF_CZ_TC_TIMER_VAL,
+				     channel->irq_moderation - 1);
+	else
+		EFX_POPULATE_DWORD_2(timer_cmd,
+				     FRF_CZ_TC_TIMER_MODE,
+				     FFE_CZ_TIMER_MODE_DIS,
+				     FRF_CZ_TC_TIMER_VAL, 0);
+	efx_writed_page_locked(channel->efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
+			       channel->channel);
+}
+
+static void siena_push_multicast_hash(struct efx_nic *efx)
+{
+	WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+	efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
+		     efx->multicast_hash.byte, sizeof(efx->multicast_hash),
+		     NULL, 0, NULL);
+}
+
+static int siena_mdio_write(struct net_device *net_dev,
+			    int prtad, int devad, u16 addr, u16 value)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	uint32_t status;
+	int rc;
+
+	rc = efx_mcdi_mdio_write(efx, efx->mdio_bus, prtad, devad,
+				 addr, value, &status);
+	if (rc)
+		return rc;
+	if (status != MC_CMD_MDIO_STATUS_GOOD)
+		return -EIO;
+
+	return 0;
+}
+
+static int siena_mdio_read(struct net_device *net_dev,
+			   int prtad, int devad, u16 addr)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	uint16_t value;
+	uint32_t status;
+	int rc;
+
+	rc = efx_mcdi_mdio_read(efx, efx->mdio_bus, prtad, devad,
+				addr, &value, &status);
+	if (rc)
+		return rc;
+	if (status != MC_CMD_MDIO_STATUS_GOOD)
+		return -EIO;
+
+	return (int)value;
+}
+
+/* This call is responsible for hooking in the MAC and PHY operations */
+static int siena_probe_port(struct efx_nic *efx)
+{
+	int rc;
+
+	/* Hook in PHY operations table */
+	efx->phy_op = &efx_mcdi_phy_ops;
+
+	/* Set up MDIO structure for PHY */
+	efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+	efx->mdio.mdio_read = siena_mdio_read;
+	efx->mdio.mdio_write = siena_mdio_write;
+
+	/* Fill out MDIO structure and loopback modes */
+	rc = efx->phy_op->probe(efx);
+	if (rc != 0)
+		return rc;
+
+	/* Initial assumption */
+	efx->link_state.speed = 10000;
+	efx->link_state.fd = true;
+	efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
+
+	/* Allocate buffer for stats */
+	rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
+				  MC_CMD_MAC_NSTATS * sizeof(u64));
+	if (rc)
+		return rc;
+	EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n",
+		(u64)efx->stats_buffer.dma_addr,
+		efx->stats_buffer.addr,
+		(u64)virt_to_phys(efx->stats_buffer.addr));
+
+	efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
+
+	return 0;
+}
+
+void siena_remove_port(struct efx_nic *efx)
+{
+	efx_nic_free_buffer(efx, &efx->stats_buffer);
+}
+
+static const struct efx_nic_register_test siena_register_tests[] = {
+	{ FR_AZ_ADR_REGION,
+	  EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
+	{ FR_CZ_USR_EV_CFG,
+	  EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) },
+	{ FR_AZ_RX_CFG,
+	  EFX_OWORD32(0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000) },
+	{ FR_AZ_TX_CFG,
+	  EFX_OWORD32(0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF) },
+	{ FR_AZ_TX_RESERVED,
+	  EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
+	{ FR_AZ_SRM_TX_DC_CFG,
+	  EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
+	{ FR_AZ_RX_DC_CFG,
+	  EFX_OWORD32(0x00000003, 0x00000000, 0x00000000, 0x00000000) },
+	{ FR_AZ_RX_DC_PF_WM,
+	  EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
+	{ FR_BZ_DP_CTRL,
+	  EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
+	{ FR_BZ_RX_RSS_TKEY,
+	  EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
+	{ FR_CZ_RX_RSS_IPV6_REG1,
+	  EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
+	{ FR_CZ_RX_RSS_IPV6_REG2,
+	  EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
+	{ FR_CZ_RX_RSS_IPV6_REG3,
+	  EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) },
+};
+
+static int siena_test_registers(struct efx_nic *efx)
+{
+	return efx_nic_test_registers(efx, siena_register_tests,
+				      ARRAY_SIZE(siena_register_tests));
+}
+
+/**************************************************************************
+ *
+ * Device reset
+ *
+ **************************************************************************
+ */
+
+static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
+{
+
+	if (method == RESET_TYPE_WORLD)
+		return efx_mcdi_reset_mc(efx);
+	else
+		return efx_mcdi_reset_port(efx);
+}
+
+static int siena_probe_nvconfig(struct efx_nic *efx)
+{
+	int rc;
+
+	rc = efx_mcdi_get_board_cfg(efx, efx->mac_address, NULL);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static int siena_probe_nic(struct efx_nic *efx)
+{
+	struct siena_nic_data *nic_data;
+	bool already_attached = 0;
+	int rc;
+
+	/* Allocate storage for hardware specific data */
+	nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL);
+	if (!nic_data)
+		return -ENOMEM;
+	efx->nic_data = nic_data;
+
+	if (efx_nic_fpga_ver(efx) != 0) {
+		EFX_ERR(efx, "Siena FPGA not supported\n");
+		rc = -ENODEV;
+		goto fail1;
+	}
+
+	efx_mcdi_init(efx);
+
+	/* Recover from a failed assertion before probing */
+	rc = efx_mcdi_handle_assertion(efx);
+	if (rc)
+		goto fail1;
+
+	rc = efx_mcdi_fwver(efx, &nic_data->fw_version, &nic_data->fw_build);
+	if (rc) {
+		EFX_ERR(efx, "Failed to read MCPU firmware version - "
+			"rc %d\n", rc);
+		goto fail1; /* MCPU absent? */
+	}
+
+	/* Let the BMC know that the driver is now in charge of link and
+	 * filter settings. We must do this before we reset the NIC */
+	rc = efx_mcdi_drv_attach(efx, true, &already_attached);
+	if (rc) {
+		EFX_ERR(efx, "Unable to register driver with MCPU\n");
+		goto fail2;
+	}
+	if (already_attached)
+		/* Not a fatal error */
+		EFX_ERR(efx, "Host already registered with MCPU\n");
+
+	/* Now we can reset the NIC */
+	rc = siena_reset_hw(efx, RESET_TYPE_ALL);
+	if (rc) {
+		EFX_ERR(efx, "failed to reset NIC\n");
+		goto fail3;
+	}
+
+	siena_init_wol(efx);
+
+	/* Allocate memory for INT_KER */
+	rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
+	if (rc)
+		goto fail4;
+	BUG_ON(efx->irq_status.dma_addr & 0x0f);
+
+	EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n",
+		(unsigned long long)efx->irq_status.dma_addr,
+		efx->irq_status.addr,
+		(unsigned long long)virt_to_phys(efx->irq_status.addr));
+
+	/* Read in the non-volatile configuration */
+	rc = siena_probe_nvconfig(efx);
+	if (rc == -EINVAL) {
+		EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n");
+		efx->phy_type = PHY_TYPE_NONE;
+		efx->mdio.prtad = MDIO_PRTAD_NONE;
+	} else if (rc) {
+		goto fail5;
+	}
+
+	return 0;
+
+fail5:
+	efx_nic_free_buffer(efx, &efx->irq_status);
+fail4:
+fail3:
+	efx_mcdi_drv_attach(efx, false, NULL);
+fail2:
+fail1:
+	kfree(efx->nic_data);
+	return rc;
+}
+
+/* This call performs hardware-specific global initialisation, such as
+ * defining the descriptor cache sizes and number of RSS channels.
+ * It does not set up any buffers, descriptor rings or event queues.
+ */
+static int siena_init_nic(struct efx_nic *efx)
+{
+	efx_oword_t temp;
+	int rc;
+
+	/* Recover from a failed assertion post-reset */
+	rc = efx_mcdi_handle_assertion(efx);
+	if (rc)
+		return rc;
+
+	/* Squash TX of packets of 16 bytes or less */
+	efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
+	EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
+	efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
+
+	/* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
+	 * descriptors (which is bad).
+	 */
+	efx_reado(efx, &temp, FR_AZ_TX_CFG);
+	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
+	EFX_SET_OWORD_FIELD(temp, FRF_CZ_TX_FILTER_EN_BIT, 1);
+	efx_writeo(efx, &temp, FR_AZ_TX_CFG);
+
+	efx_reado(efx, &temp, FR_AZ_RX_CFG);
+	EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0);
+	EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1);
+	efx_writeo(efx, &temp, FR_AZ_RX_CFG);
+
+	if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0)
+		/* No MCDI operation has been defined to set thresholds */
+		EFX_ERR(efx, "ignoring RX flow control thresholds\n");
+
+	/* Enable event logging */
+	rc = efx_mcdi_log_ctrl(efx, true, false, 0);
+	if (rc)
+		return rc;
+
+	/* Set destination of both TX and RX Flush events */
+	EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
+	efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
+
+	EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1);
+	efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG);
+
+	efx_nic_init_common(efx);
+	return 0;
+}
+
+static void siena_remove_nic(struct efx_nic *efx)
+{
+	efx_nic_free_buffer(efx, &efx->irq_status);
+
+	siena_reset_hw(efx, RESET_TYPE_ALL);
+
+	/* Relinquish the device back to the BMC */
+	if (efx_nic_has_mc(efx))
+		efx_mcdi_drv_attach(efx, false, NULL);
+
+	/* Tear down the private nic state */
+	kfree(efx->nic_data);
+	efx->nic_data = NULL;
+}
+
+#define STATS_GENERATION_INVALID ((u64)(-1))
+
+static int siena_try_update_nic_stats(struct efx_nic *efx)
+{
+	u64 *dma_stats;
+	struct efx_mac_stats *mac_stats;
+	u64 generation_start;
+	u64 generation_end;
+
+	mac_stats = &efx->mac_stats;
+	dma_stats = (u64 *)efx->stats_buffer.addr;
+
+	generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+	if (generation_end == STATS_GENERATION_INVALID)
+		return 0;
+	rmb();
+
+#define MAC_STAT(M, D) \
+	mac_stats->M = dma_stats[MC_CMD_MAC_ ## D]
+
+	MAC_STAT(tx_bytes, TX_BYTES);
+	MAC_STAT(tx_bad_bytes, TX_BAD_BYTES);
+	mac_stats->tx_good_bytes = (mac_stats->tx_bytes -
+				    mac_stats->tx_bad_bytes);
+	MAC_STAT(tx_packets, TX_PKTS);
+	MAC_STAT(tx_bad, TX_BAD_FCS_PKTS);
+	MAC_STAT(tx_pause, TX_PAUSE_PKTS);
+	MAC_STAT(tx_control, TX_CONTROL_PKTS);
+	MAC_STAT(tx_unicast, TX_UNICAST_PKTS);
+	MAC_STAT(tx_multicast, TX_MULTICAST_PKTS);
+	MAC_STAT(tx_broadcast, TX_BROADCAST_PKTS);
+	MAC_STAT(tx_lt64, TX_LT64_PKTS);
+	MAC_STAT(tx_64, TX_64_PKTS);
+	MAC_STAT(tx_65_to_127, TX_65_TO_127_PKTS);
+	MAC_STAT(tx_128_to_255, TX_128_TO_255_PKTS);
+	MAC_STAT(tx_256_to_511, TX_256_TO_511_PKTS);
+	MAC_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS);
+	MAC_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS);
+	MAC_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS);
+	MAC_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS);
+	mac_stats->tx_collision = 0;
+	MAC_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS);
+	MAC_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS);
+	MAC_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS);
+	MAC_STAT(tx_deferred, TX_DEFERRED_PKTS);
+	MAC_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS);
+	mac_stats->tx_collision = (mac_stats->tx_single_collision +
+				   mac_stats->tx_multiple_collision +
+				   mac_stats->tx_excessive_collision +
+				   mac_stats->tx_late_collision);
+	MAC_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS);
+	MAC_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS);
+	MAC_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS);
+	MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS);
+	MAC_STAT(rx_bytes, RX_BYTES);
+	MAC_STAT(rx_bad_bytes, RX_BAD_BYTES);
+	mac_stats->rx_good_bytes = (mac_stats->rx_bytes -
+				    mac_stats->rx_bad_bytes);
+	MAC_STAT(rx_packets, RX_PKTS);
+	MAC_STAT(rx_good, RX_GOOD_PKTS);
+	mac_stats->rx_bad = mac_stats->rx_packets - mac_stats->rx_good;
+	MAC_STAT(rx_pause, RX_PAUSE_PKTS);
+	MAC_STAT(rx_control, RX_CONTROL_PKTS);
+	MAC_STAT(rx_unicast, RX_UNICAST_PKTS);
+	MAC_STAT(rx_multicast, RX_MULTICAST_PKTS);
+	MAC_STAT(rx_broadcast, RX_BROADCAST_PKTS);
+	MAC_STAT(rx_lt64, RX_UNDERSIZE_PKTS);
+	MAC_STAT(rx_64, RX_64_PKTS);
+	MAC_STAT(rx_65_to_127, RX_65_TO_127_PKTS);
+	MAC_STAT(rx_128_to_255, RX_128_TO_255_PKTS);
+	MAC_STAT(rx_256_to_511, RX_256_TO_511_PKTS);
+	MAC_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS);
+	MAC_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS);
+	MAC_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS);
+	MAC_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS);
+	mac_stats->rx_bad_lt64 = 0;
+	mac_stats->rx_bad_64_to_15xx = 0;
+	mac_stats->rx_bad_15xx_to_jumbo = 0;
+	MAC_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS);
+	MAC_STAT(rx_overflow, RX_OVERFLOW_PKTS);
+	mac_stats->rx_missed = 0;
+	MAC_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS);
+	MAC_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS);
+	MAC_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS);
+	MAC_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS);
+	MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS);
+	mac_stats->rx_good_lt64 = 0;
+
+	efx->n_rx_nodesc_drop_cnt = dma_stats[MC_CMD_MAC_RX_NODESC_DROPS];
+
+#undef MAC_STAT
+
+	rmb();
+	generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
+	if (generation_end != generation_start)
+		return -EAGAIN;
+
+	return 0;
+}
+
+static void siena_update_nic_stats(struct efx_nic *efx)
+{
+	while (siena_try_update_nic_stats(efx) == -EAGAIN)
+		cpu_relax();
+}
+
+static void siena_start_nic_stats(struct efx_nic *efx)
+{
+	u64 *dma_stats = (u64 *)efx->stats_buffer.addr;
+
+	dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID;
+
+	efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr,
+			   MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0);
+}
+
+static void siena_stop_nic_stats(struct efx_nic *efx)
+{
+	efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
+}
+
+void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len)
+{
+	struct siena_nic_data *nic_data = efx->nic_data;
+	snprintf(buf, len, "%u.%u.%u.%u",
+		 (unsigned int)(nic_data->fw_version >> 48),
+		 (unsigned int)(nic_data->fw_version >> 32 & 0xffff),
+		 (unsigned int)(nic_data->fw_version >> 16 & 0xffff),
+		 (unsigned int)(nic_data->fw_version & 0xffff));
+}
+
+/**************************************************************************
+ *
+ * Wake on LAN
+ *
+ **************************************************************************
+ */
+
+static void siena_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
+{
+	struct siena_nic_data *nic_data = efx->nic_data;
+
+	wol->supported = WAKE_MAGIC;
+	if (nic_data->wol_filter_id != -1)
+		wol->wolopts = WAKE_MAGIC;
+	else
+		wol->wolopts = 0;
+	memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+
+static int siena_set_wol(struct efx_nic *efx, u32 type)
+{
+	struct siena_nic_data *nic_data = efx->nic_data;
+	int rc;
+
+	if (type & ~WAKE_MAGIC)
+		return -EINVAL;
+
+	if (type & WAKE_MAGIC) {
+		if (nic_data->wol_filter_id != -1)
+			efx_mcdi_wol_filter_remove(efx,
+						   nic_data->wol_filter_id);
+		rc = efx_mcdi_wol_filter_set_magic(efx, efx->mac_address,
+						   &nic_data->wol_filter_id);
+		if (rc)
+			goto fail;
+
+		pci_wake_from_d3(efx->pci_dev, true);
+	} else {
+		rc = efx_mcdi_wol_filter_reset(efx);
+		nic_data->wol_filter_id = -1;
+		pci_wake_from_d3(efx->pci_dev, false);
+		if (rc)
+			goto fail;
+	}
+
+	return 0;
+ fail:
+	EFX_ERR(efx, "%s failed: type=%d rc=%d\n", __func__, type, rc);
+	return rc;
+}
+
+
+static void siena_init_wol(struct efx_nic *efx)
+{
+	struct siena_nic_data *nic_data = efx->nic_data;
+	int rc;
+
+	rc = efx_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id);
+
+	if (rc != 0) {
+		/* If it failed, attempt to get into a synchronised
+		 * state with MC by resetting any set WoL filters */
+		efx_mcdi_wol_filter_reset(efx);
+		nic_data->wol_filter_id = -1;
+	} else if (nic_data->wol_filter_id != -1) {
+		pci_wake_from_d3(efx->pci_dev, true);
+	}
+}
+
+
+/**************************************************************************
+ *
+ * Revision-dependent attributes used by efx.c and nic.c
+ *
+ **************************************************************************
+ */
+
+struct efx_nic_type siena_a0_nic_type = {
+	.probe = siena_probe_nic,
+	.remove = siena_remove_nic,
+	.init = siena_init_nic,
+	.fini = efx_port_dummy_op_void,
+	.monitor = NULL,
+	.reset = siena_reset_hw,
+	.probe_port = siena_probe_port,
+	.remove_port = siena_remove_port,
+	.prepare_flush = efx_port_dummy_op_void,
+	.update_stats = siena_update_nic_stats,
+	.start_stats = siena_start_nic_stats,
+	.stop_stats = siena_stop_nic_stats,
+	.set_id_led = efx_mcdi_set_id_led,
+	.push_irq_moderation = siena_push_irq_moderation,
+	.push_multicast_hash = siena_push_multicast_hash,
+	.reconfigure_port = efx_mcdi_phy_reconfigure,
+	.get_wol = siena_get_wol,
+	.set_wol = siena_set_wol,
+	.resume_wol = siena_init_wol,
+	.test_registers = siena_test_registers,
+	.default_mac_ops = &efx_mcdi_mac_operations,
+
+	.revision = EFX_REV_SIENA_A0,
+	.mem_map_size = (FR_CZ_MC_TREG_SMEM +
+			 FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
+	.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
+	.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
+	.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
+	.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
+	.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
+	.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
+	.rx_buffer_padding = 0,
+	.max_interrupt_mode = EFX_INT_MODE_MSIX,
+	.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
+				   * interrupt handler only supports 32
+				   * channels */
+	.tx_dc_base = 0x88000,
+	.rx_dc_base = 0x68000,
+	.offload_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM,
+	.reset_world_flags = ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT,
+};
diff --git a/drivers/net/sfc/spi.h b/drivers/net/sfc/spi.h
index 1b1ceb411671..8bf4fce0813a 100644
--- a/drivers/net/sfc/spi.h
+++ b/drivers/net/sfc/spi.h
@@ -36,8 +36,6 @@
 
 /**
  * struct efx_spi_device - an Efx SPI (Serial Peripheral Interface) device
- * @efx:		The Efx controller that owns this device
- * @mtd:		MTD state
  * @device_id:		Controller's id for the device
  * @size:		Size (in bytes)
  * @addr_len:		Number of address bytes in read/write commands
@@ -54,10 +52,6 @@
  *	Write commands are limited to blocks with this size and alignment.
  */
 struct efx_spi_device {
-	struct efx_nic *efx;
-#ifdef CONFIG_SFC_MTD
-	void *mtd;
-#endif
 	int device_id;
 	unsigned int size;
 	unsigned int addr_len;
@@ -67,12 +61,16 @@ struct efx_spi_device {
 	unsigned int block_size;
 };
 
-int falcon_spi_cmd(const struct efx_spi_device *spi, unsigned int command,
+int falcon_spi_cmd(struct efx_nic *efx,
+		   const struct efx_spi_device *spi, unsigned int command,
 		   int address, const void* in, void *out, size_t len);
-int falcon_spi_wait_write(const struct efx_spi_device *spi);
-int falcon_spi_read(const struct efx_spi_device *spi, loff_t start,
+int falcon_spi_wait_write(struct efx_nic *efx,
+			  const struct efx_spi_device *spi);
+int falcon_spi_read(struct efx_nic *efx,
+		    const struct efx_spi_device *spi, loff_t start,
 		    size_t len, size_t *retlen, u8 *buffer);
-int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
+int falcon_spi_write(struct efx_nic *efx,
+		     const struct efx_spi_device *spi, loff_t start,
 		     size_t len, size_t *retlen, const u8 *buffer);
 
 /*
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index f4d509015f75..ca11572a49a9 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2008 Solarflare Communications Inc.
+ * Copyright 2007-2009 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -12,10 +12,9 @@
 #include <linux/seq_file.h>
 #include "efx.h"
 #include "mdio_10g.h"
-#include "falcon.h"
+#include "nic.h"
 #include "phy.h"
-#include "falcon_hwdefs.h"
-#include "boards.h"
+#include "regs.h"
 #include "workarounds.h"
 #include "selftest.h"
 
@@ -31,13 +30,13 @@
 #define SFX7101_LOOPBACKS ((1 << LOOPBACK_PHYXS) |	\
 			   (1 << LOOPBACK_PCS) |	\
 			   (1 << LOOPBACK_PMAPMD) |	\
-			   (1 << LOOPBACK_NETWORK))
+			   (1 << LOOPBACK_PHYXS_WS))
 
 #define SFT9001_LOOPBACKS ((1 << LOOPBACK_GPHY) |	\
 			   (1 << LOOPBACK_PHYXS) |	\
 			   (1 << LOOPBACK_PCS) |	\
 			   (1 << LOOPBACK_PMAPMD) |	\
-			   (1 << LOOPBACK_NETWORK))
+			   (1 << LOOPBACK_PHYXS_WS))
 
 /* We complain if we fail to see the link partner as 10G capable this many
  * times in a row (must be > 1 as sampling the autoneg. registers is racy)
@@ -84,9 +83,9 @@
 #define PMA_PMD_LED_FLASH	(3)
 #define PMA_PMD_LED_MASK	3
 /* All LEDs under hardware control */
-#define PMA_PMD_LED_FULL_AUTO	(0)
+#define SFT9001_PMA_PMD_LED_DEFAULT 0
 /* Green and Amber under hardware control, Red off */
-#define PMA_PMD_LED_DEFAULT	(PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN)
+#define SFX7101_PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN)
 
 #define PMA_PMD_SPEED_ENABLE_REG 49192
 #define PMA_PMD_100TX_ADV_LBN    1
@@ -200,15 +199,16 @@ static ssize_t set_phy_short_reach(struct device *dev,
 				   const char *buf, size_t count)
 {
 	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+	int rc;
 
 	rtnl_lock();
 	efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR,
 			  MDIO_PMA_10GBT_TXPWR_SHORT,
 			  count != 0 && *buf != '0');
-	efx_reconfigure_port(efx);
+	rc = efx_reconfigure_port(efx);
 	rtnl_unlock();
 
-	return count;
+	return rc < 0 ? rc : (ssize_t)count;
 }
 
 static DEVICE_ATTR(phy_short_reach, 0644, show_phy_short_reach,
@@ -292,17 +292,36 @@ static int tenxpress_init(struct efx_nic *efx)
 		efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG,
 				  1 << PMA_PMA_LED_ACTIVITY_LBN, true);
 		efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG,
-			       PMA_PMD_LED_DEFAULT);
+			       SFX7101_PMA_PMD_LED_DEFAULT);
 	}
 
 	return 0;
 }
 
+static int sfx7101_phy_probe(struct efx_nic *efx)
+{
+	efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
+	efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+	efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
+	return 0;
+}
+
+static int sft9001_phy_probe(struct efx_nic *efx)
+{
+	efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
+	efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+	efx->loopback_modes = (SFT9001_LOOPBACKS | FALCON_XMAC_LOOPBACKS |
+			       FALCON_GMAC_LOOPBACKS);
+	return 0;
+}
+
 static int tenxpress_phy_init(struct efx_nic *efx)
 {
 	struct tenxpress_phy_data *phy_data;
 	int rc = 0;
 
+	falcon_board(efx)->type->init_phy(efx);
+
 	phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
 	if (!phy_data)
 		return -ENOMEM;
@@ -333,6 +352,15 @@ static int tenxpress_phy_init(struct efx_nic *efx)
 	if (rc < 0)
 		goto fail;
 
+	/* Initialise advertising flags */
+	efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
+				  ADVERTISED_10000baseT_Full);
+	if (efx->phy_type != PHY_TYPE_SFX7101)
+		efx->link_advertising |= (ADVERTISED_1000baseT_Full |
+					   ADVERTISED_100baseT_Full);
+	efx_link_set_wanted_fc(efx, efx->wanted_fc);
+	efx_mdio_an_reconfigure(efx);
+
 	if (efx->phy_type == PHY_TYPE_SFT9001B) {
 		rc = device_create_file(&efx->pci_dev->dev,
 					&dev_attr_phy_short_reach);
@@ -363,7 +391,7 @@ static int tenxpress_special_reset(struct efx_nic *efx)
 	/* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so
 	 * a special software reset can glitch the XGMAC sufficiently for stats
 	 * requests to fail. */
-	efx_stats_disable(efx);
+	falcon_stop_nic_stats(efx);
 
 	/* Initiate reset */
 	reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG);
@@ -385,7 +413,7 @@ static int tenxpress_special_reset(struct efx_nic *efx)
 	/* Wait for the XGXS state machine to churn */
 	mdelay(10);
 out:
-	efx_stats_enable(efx);
+	falcon_start_nic_stats(efx);
 	return rc;
 }
 
@@ -489,95 +517,76 @@ static void tenxpress_low_power(struct efx_nic *efx)
 			!!(efx->phy_mode & PHY_MODE_LOW_POWER));
 }
 
-static void tenxpress_phy_reconfigure(struct efx_nic *efx)
+static int tenxpress_phy_reconfigure(struct efx_nic *efx)
 {
 	struct tenxpress_phy_data *phy_data = efx->phy_data;
-	struct ethtool_cmd ecmd;
 	bool phy_mode_change, loop_reset;
 
 	if (efx->phy_mode & (PHY_MODE_OFF | PHY_MODE_SPECIAL)) {
 		phy_data->phy_mode = efx->phy_mode;
-		return;
+		return 0;
 	}
 
-	tenxpress_low_power(efx);
-
 	phy_mode_change = (efx->phy_mode == PHY_MODE_NORMAL &&
 			   phy_data->phy_mode != PHY_MODE_NORMAL);
-	loop_reset = (LOOPBACK_OUT_OF(phy_data, efx, efx->phy_op->loopbacks) ||
+	loop_reset = (LOOPBACK_OUT_OF(phy_data, efx, LOOPBACKS_EXTERNAL(efx)) ||
 		      LOOPBACK_CHANGED(phy_data, efx, 1 << LOOPBACK_GPHY));
 
 	if (loop_reset || phy_mode_change) {
-		int rc;
-
-		efx->phy_op->get_settings(efx, &ecmd);
-
-		if (loop_reset || phy_mode_change) {
-			tenxpress_special_reset(efx);
-
-			/* Reset XAUI if we were in 10G, and are staying
-			 * in 10G. If we're moving into and out of 10G
-			 * then xaui will be reset anyway */
-			if (EFX_IS10G(efx))
-				falcon_reset_xaui(efx);
-		}
+		tenxpress_special_reset(efx);
 
-		rc = efx->phy_op->set_settings(efx, &ecmd);
-		WARN_ON(rc);
+		/* Reset XAUI if we were in 10G, and are staying
+		 * in 10G. If we're moving into and out of 10G
+		 * then xaui will be reset anyway */
+		if (EFX_IS10G(efx))
+			falcon_reset_xaui(efx);
 	}
 
+	tenxpress_low_power(efx);
 	efx_mdio_transmit_disable(efx);
 	efx_mdio_phy_reconfigure(efx);
 	tenxpress_ext_loopback(efx);
+	efx_mdio_an_reconfigure(efx);
 
 	phy_data->loopback_mode = efx->loopback_mode;
 	phy_data->phy_mode = efx->phy_mode;
 
-	if (efx->phy_type == PHY_TYPE_SFX7101) {
-		efx->link_speed = 10000;
-		efx->link_fd = true;
-		efx->link_up = sfx7101_link_ok(efx);
-	} else {
-		efx->phy_op->get_settings(efx, &ecmd);
-		efx->link_speed = ecmd.speed;
-		efx->link_fd = ecmd.duplex == DUPLEX_FULL;
-		efx->link_up = sft9001_link_ok(efx, &ecmd);
-	}
-	efx->link_fc = efx_mdio_get_pause(efx);
+	return 0;
 }
 
-/* Poll PHY for interrupt */
-static void tenxpress_phy_poll(struct efx_nic *efx)
+static void
+tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
+
+/* Poll for link state changes */
+static bool tenxpress_phy_poll(struct efx_nic *efx)
 {
-	struct tenxpress_phy_data *phy_data = efx->phy_data;
-	bool change = false;
+	struct efx_link_state old_state = efx->link_state;
 
 	if (efx->phy_type == PHY_TYPE_SFX7101) {
-		bool link_ok = sfx7101_link_ok(efx);
-		if (link_ok != efx->link_up) {
-			change = true;
-		} else {
-			unsigned int link_fc = efx_mdio_get_pause(efx);
-			if (link_fc != efx->link_fc)
-				change = true;
-		}
-		sfx7101_check_bad_lp(efx, link_ok);
-	} else if (efx->loopback_mode) {
-		bool link_ok = sft9001_link_ok(efx, NULL);
-		if (link_ok != efx->link_up)
-			change = true;
+		efx->link_state.up = sfx7101_link_ok(efx);
+		efx->link_state.speed = 10000;
+		efx->link_state.fd = true;
+		efx->link_state.fc = efx_mdio_get_pause(efx);
+
+		sfx7101_check_bad_lp(efx, efx->link_state.up);
 	} else {
-		int status = efx_mdio_read(efx, MDIO_MMD_PMAPMD,
-					   MDIO_PMA_LASI_STAT);
-		if (status & MDIO_PMA_LASI_LSALARM)
-			change = true;
-	}
+		struct ethtool_cmd ecmd;
 
-	if (change)
-		falcon_sim_phy_event(efx);
+		/* Check the LASI alarm first */
+		if (efx->loopback_mode == LOOPBACK_NONE &&
+		    !(efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT) &
+		      MDIO_PMA_LASI_LSALARM))
+			return false;
 
-	if (phy_data->phy_mode != PHY_MODE_NORMAL)
-		return;
+		tenxpress_get_settings(efx, &ecmd);
+
+		efx->link_state.up = sft9001_link_ok(efx, &ecmd);
+		efx->link_state.speed = ecmd.speed;
+		efx->link_state.fd = (ecmd.duplex == DUPLEX_FULL);
+		efx->link_state.fc = efx_mdio_get_pause(efx);
+	}
+
+	return !efx_link_state_equal(&efx->link_state, &old_state);
 }
 
 static void tenxpress_phy_fini(struct efx_nic *efx)
@@ -604,18 +613,29 @@ static void tenxpress_phy_fini(struct efx_nic *efx)
 }
 
 
-/* Set the RX and TX LEDs and Link LED flashing. The other LEDs
- * (which probably aren't wired anyway) are left in AUTO mode */
-void tenxpress_phy_blink(struct efx_nic *efx, bool blink)
+/* Override the RX, TX and link LEDs */
+void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
 {
 	int reg;
 
-	if (blink)
-		reg = (PMA_PMD_LED_FLASH << PMA_PMD_LED_TX_LBN) |
-			(PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN) |
-			(PMA_PMD_LED_FLASH << PMA_PMD_LED_LINK_LBN);
-	else
-		reg = PMA_PMD_LED_DEFAULT;
+	switch (mode) {
+	case EFX_LED_OFF:
+		reg = (PMA_PMD_LED_OFF << PMA_PMD_LED_TX_LBN) |
+			(PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN) |
+			(PMA_PMD_LED_OFF << PMA_PMD_LED_LINK_LBN);
+		break;
+	case EFX_LED_ON:
+		reg = (PMA_PMD_LED_ON << PMA_PMD_LED_TX_LBN) |
+			(PMA_PMD_LED_ON << PMA_PMD_LED_RX_LBN) |
+			(PMA_PMD_LED_ON << PMA_PMD_LED_LINK_LBN);
+		break;
+	default:
+		if (efx->phy_type == PHY_TYPE_SFX7101)
+			reg = SFX7101_PMA_PMD_LED_DEFAULT;
+		else
+			reg = SFT9001_PMA_PMD_LED_DEFAULT;
+		break;
+	}
 
 	efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, reg);
 }
@@ -624,6 +644,13 @@ static const char *const sfx7101_test_names[] = {
 	"bist"
 };
 
+static const char *sfx7101_test_name(struct efx_nic *efx, unsigned int index)
+{
+	if (index < ARRAY_SIZE(sfx7101_test_names))
+		return sfx7101_test_names[index];
+	return NULL;
+}
+
 static int
 sfx7101_run_tests(struct efx_nic *efx, int *results, unsigned flags)
 {
@@ -635,6 +662,9 @@ sfx7101_run_tests(struct efx_nic *efx, int *results, unsigned flags)
 	/* BIST is automatically run after a special software reset */
 	rc = tenxpress_special_reset(efx);
 	results[0] = rc ? -1 : 1;
+
+	efx_mdio_an_reconfigure(efx);
+
 	return rc;
 }
 
@@ -650,14 +680,17 @@ static const char *const sft9001_test_names[] = {
 	"cable.pairD.length",
 };
 
+static const char *sft9001_test_name(struct efx_nic *efx, unsigned int index)
+{
+	if (index < ARRAY_SIZE(sft9001_test_names))
+		return sft9001_test_names[index];
+	return NULL;
+}
+
 static int sft9001_run_tests(struct efx_nic *efx, int *results, unsigned flags)
 {
-	struct ethtool_cmd ecmd;
 	int rc = 0, rc2, i, ctrl_reg, res_reg;
 
-	if (flags & ETH_TEST_FL_OFFLINE)
-		efx->phy_op->get_settings(efx, &ecmd);
-
 	/* Initialise cable diagnostic results to unknown failure */
 	for (i = 1; i < 9; ++i)
 		results[i] = -1;
@@ -709,9 +742,7 @@ out:
 		if (!rc)
 			rc = rc2;
 
-		rc2 = efx->phy_op->set_settings(efx, &ecmd);
-		if (!rc)
-			rc = rc2;
+		efx_mdio_an_reconfigure(efx);
 	}
 
 	return rc;
@@ -758,7 +789,7 @@ tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
 	 * but doesn't advertise the correct speed. So override it */
 	if (efx->loopback_mode == LOOPBACK_GPHY)
 		ecmd->speed = SPEED_1000;
-	else if (LOOPBACK_MASK(efx) & efx->phy_op->loopbacks)
+	else if (LOOPBACK_EXTERNAL(efx))
 		ecmd->speed = SPEED_10000;
 }
 
@@ -788,35 +819,27 @@ static void sft9001_set_npage_adv(struct efx_nic *efx, u32 advertising)
 }
 
 struct efx_phy_operations falcon_sfx7101_phy_ops = {
-	.macs		  = EFX_XMAC,
+	.probe		  = sfx7101_phy_probe,
 	.init             = tenxpress_phy_init,
 	.reconfigure      = tenxpress_phy_reconfigure,
 	.poll             = tenxpress_phy_poll,
 	.fini             = tenxpress_phy_fini,
-	.clear_interrupt  = efx_port_dummy_op_void,
 	.get_settings	  = tenxpress_get_settings,
 	.set_settings	  = tenxpress_set_settings,
 	.set_npage_adv    = sfx7101_set_npage_adv,
-	.num_tests	  = ARRAY_SIZE(sfx7101_test_names),
-	.test_names	  = sfx7101_test_names,
+	.test_name	  = sfx7101_test_name,
 	.run_tests	  = sfx7101_run_tests,
-	.mmds             = TENXPRESS_REQUIRED_DEVS,
-	.loopbacks        = SFX7101_LOOPBACKS,
 };
 
 struct efx_phy_operations falcon_sft9001_phy_ops = {
-	.macs		  = EFX_GMAC | EFX_XMAC,
+	.probe		  = sft9001_phy_probe,
 	.init             = tenxpress_phy_init,
 	.reconfigure      = tenxpress_phy_reconfigure,
 	.poll             = tenxpress_phy_poll,
 	.fini             = tenxpress_phy_fini,
-	.clear_interrupt  = efx_port_dummy_op_void,
 	.get_settings	  = tenxpress_get_settings,
 	.set_settings	  = tenxpress_set_settings,
 	.set_npage_adv    = sft9001_set_npage_adv,
-	.num_tests	  = ARRAY_SIZE(sft9001_test_names),
-	.test_names	  = sft9001_test_names,
+	.test_name	  = sft9001_test_name,
 	.run_tests	  = sft9001_run_tests,
-	.mmds             = TENXPRESS_REQUIRED_DEVS,
-	.loopbacks        = SFT9001_LOOPBACKS,
 };
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 489c4de31447..e669f94e821b 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2008 Solarflare Communications Inc.
+ * Copyright 2005-2009 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -12,12 +12,13 @@
 #include <linux/tcp.h>
 #include <linux/ip.h>
 #include <linux/in.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
 #include <linux/if_ether.h>
 #include <linux/highmem.h>
 #include "net_driver.h"
-#include "tx.h"
 #include "efx.h"
-#include "falcon.h"
+#include "nic.h"
 #include "workarounds.h"
 
 /*
@@ -26,8 +27,7 @@
  * The tx_queue descriptor ring fill-level must fall below this value
  * before we restart the netif queue
  */
-#define EFX_NETDEV_TX_THRESHOLD(_tx_queue)	\
-	(_tx_queue->efx->type->txd_ring_mask / 2u)
+#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u)
 
 /* We want to be able to nest calls to netif_stop_queue(), since each
  * channel can have an individual stop on the queue.
@@ -125,6 +125,24 @@ static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
 }
 
 
+static inline unsigned
+efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
+{
+	/* Depending on the NIC revision, we can use descriptor
+	 * lengths up to 8K or 8K-1.  However, since PCI Express
+	 * devices must split read requests at 4K boundaries, there is
+	 * little benefit from using descriptors that cross those
+	 * boundaries and we keep things simple by not doing so.
+	 */
+	unsigned len = (~dma_addr & 0xfff) + 1;
+
+	/* Work around hardware bug for unaligned buffers. */
+	if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
+		len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
+
+	return len;
+}
+
 /*
  * Add a socket buffer to a TX queue
  *
@@ -135,11 +153,13 @@ static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
  * If any DMA mapping fails, any mapped fragments will be unmapped,
  * the queue's insert pointer will be restored to its original value.
  *
+ * This function is split out from efx_hard_start_xmit to allow the
+ * loopback test to direct packets via specific TX queues.
+ *
  * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
  * You must hold netif_tx_lock() to call this function.
  */
-static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
-					 struct sk_buff *skb)
+netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
 {
 	struct efx_nic *efx = tx_queue->efx;
 	struct pci_dev *pci_dev = efx->pci_dev;
@@ -147,7 +167,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
 	skb_frag_t *fragment;
 	struct page *page;
 	int page_offset;
-	unsigned int len, unmap_len = 0, fill_level, insert_ptr, misalign;
+	unsigned int len, unmap_len = 0, fill_level, insert_ptr;
 	dma_addr_t dma_addr, unmap_addr = 0;
 	unsigned int dma_len;
 	bool unmap_single;
@@ -156,7 +176,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
 
 	EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
 
-	if (skb_shinfo((struct sk_buff *)skb)->gso_size)
+	if (skb_shinfo(skb)->gso_size)
 		return efx_enqueue_skb_tso(tx_queue, skb);
 
 	/* Get size of the initial fragment */
@@ -171,7 +191,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
 	}
 
 	fill_level = tx_queue->insert_count - tx_queue->old_read_count;
-	q_space = efx->type->txd_ring_mask - 1 - fill_level;
+	q_space = EFX_TXQ_MASK - 1 - fill_level;
 
 	/* Map for DMA.  Use pci_map_single rather than pci_map_page
 	 * since this is more efficient on machines with sparse
@@ -208,16 +228,14 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
 					&tx_queue->read_count;
 				fill_level = (tx_queue->insert_count
 					      - tx_queue->old_read_count);
-				q_space = (efx->type->txd_ring_mask - 1 -
-					   fill_level);
+				q_space = EFX_TXQ_MASK - 1 - fill_level;
 				if (unlikely(q_space-- <= 0))
 					goto stop;
 				smp_mb();
 				--tx_queue->stopped;
 			}
 
-			insert_ptr = (tx_queue->insert_count &
-				      efx->type->txd_ring_mask);
+			insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
 			buffer = &tx_queue->buffer[insert_ptr];
 			efx_tsoh_free(tx_queue, buffer);
 			EFX_BUG_ON_PARANOID(buffer->tsoh);
@@ -226,14 +244,10 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
 			EFX_BUG_ON_PARANOID(!buffer->continuation);
 			EFX_BUG_ON_PARANOID(buffer->unmap_len);
 
-			dma_len = (((~dma_addr) & efx->type->tx_dma_mask) + 1);
-			if (likely(dma_len > len))
+			dma_len = efx_max_tx_len(efx, dma_addr);
+			if (likely(dma_len >= len))
 				dma_len = len;
 
-			misalign = (unsigned)dma_addr & efx->type->bug5391_mask;
-			if (misalign && dma_len + misalign > 512)
-				dma_len = 512 - misalign;
-
 			/* Fill out per descriptor fields */
 			buffer->len = dma_len;
 			buffer->dma_addr = dma_addr;
@@ -266,7 +280,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
 	buffer->continuation = false;
 
 	/* Pass off to hardware */
-	falcon_push_buffers(tx_queue);
+	efx_nic_push_buffers(tx_queue);
 
 	return NETDEV_TX_OK;
 
@@ -276,7 +290,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
 		   skb_shinfo(skb)->nr_frags + 1);
 
 	/* Mark the packet as transmitted, and free the SKB ourselves */
-	dev_kfree_skb_any((struct sk_buff *)skb);
+	dev_kfree_skb_any(skb);
 	goto unwind;
 
  stop:
@@ -289,7 +303,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
 	/* Work backwards until we hit the original insert pointer value */
 	while (tx_queue->insert_count != tx_queue->write_count) {
 		--tx_queue->insert_count;
-		insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask;
+		insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
 		buffer = &tx_queue->buffer[insert_ptr];
 		efx_dequeue_buffer(tx_queue, buffer);
 		buffer->len = 0;
@@ -318,10 +332,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
 {
 	struct efx_nic *efx = tx_queue->efx;
 	unsigned int stop_index, read_ptr;
-	unsigned int mask = tx_queue->efx->type->txd_ring_mask;
 
-	stop_index = (index + 1) & mask;
-	read_ptr = tx_queue->read_count & mask;
+	stop_index = (index + 1) & EFX_TXQ_MASK;
+	read_ptr = tx_queue->read_count & EFX_TXQ_MASK;
 
 	while (read_ptr != stop_index) {
 		struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
@@ -338,28 +351,10 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
 		buffer->len = 0;
 
 		++tx_queue->read_count;
-		read_ptr = tx_queue->read_count & mask;
+		read_ptr = tx_queue->read_count & EFX_TXQ_MASK;
 	}
 }
 
-/* Initiate a packet transmission on the specified TX queue.
- * Note that returning anything other than NETDEV_TX_OK will cause the
- * OS to free the skb.
- *
- * This function is split out from efx_hard_start_xmit to allow the
- * loopback test to direct packets via specific TX queues.  It is
- * therefore a non-static inline, so as not to penalise performance
- * for non-loopback transmissions.
- *
- * Context: netif_tx_lock held
- */
-inline netdev_tx_t efx_xmit(struct efx_nic *efx,
-			   struct efx_tx_queue *tx_queue, struct sk_buff *skb)
-{
-	/* Map fragments for DMA and add to TX queue */
-	return efx_enqueue_skb(tx_queue, skb);
-}
-
 /* Initiate a packet transmission.  We use one channel per CPU
  * (sharing when we have more CPUs than channels).  On Falcon, the TX
  * completion events will be directed back to the CPU that transmitted
@@ -383,7 +378,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
 	else
 		tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM];
 
-	return efx_xmit(efx, tx_queue, skb);
+	return efx_enqueue_skb(tx_queue, skb);
 }
 
 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
@@ -391,7 +386,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
 	unsigned fill_level;
 	struct efx_nic *efx = tx_queue->efx;
 
-	EFX_BUG_ON_PARANOID(index > efx->type->txd_ring_mask);
+	EFX_BUG_ON_PARANOID(index > EFX_TXQ_MASK);
 
 	efx_dequeue_buffers(tx_queue, index);
 
@@ -401,7 +396,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
 	smp_mb();
 	if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) {
 		fill_level = tx_queue->insert_count - tx_queue->read_count;
-		if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) {
+		if (fill_level < EFX_TXQ_THRESHOLD) {
 			EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
 
 			/* Do this under netif_tx_lock(), to avoid racing
@@ -425,15 +420,15 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
 	EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue);
 
 	/* Allocate software ring */
-	txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer);
+	txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer);
 	tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL);
 	if (!tx_queue->buffer)
 		return -ENOMEM;
-	for (i = 0; i <= efx->type->txd_ring_mask; ++i)
+	for (i = 0; i <= EFX_TXQ_MASK; ++i)
 		tx_queue->buffer[i].continuation = true;
 
 	/* Allocate hardware ring */
-	rc = falcon_probe_tx(tx_queue);
+	rc = efx_nic_probe_tx(tx_queue);
 	if (rc)
 		goto fail;
 
@@ -456,7 +451,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
 	BUG_ON(tx_queue->stopped);
 
 	/* Set up TX descriptor ring */
-	falcon_init_tx(tx_queue);
+	efx_nic_init_tx(tx_queue);
 }
 
 void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
@@ -468,8 +463,7 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
 
 	/* Free any buffers left in the ring */
 	while (tx_queue->read_count != tx_queue->write_count) {
-		buffer = &tx_queue->buffer[tx_queue->read_count &
-					   tx_queue->efx->type->txd_ring_mask];
+		buffer = &tx_queue->buffer[tx_queue->read_count & EFX_TXQ_MASK];
 		efx_dequeue_buffer(tx_queue, buffer);
 		buffer->continuation = true;
 		buffer->len = 0;
@@ -483,7 +477,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
 	EFX_LOG(tx_queue->efx, "shutting down TX queue %d\n", tx_queue->queue);
 
 	/* Flush TX queue, remove descriptor ring */
-	falcon_fini_tx(tx_queue);
+	efx_nic_fini_tx(tx_queue);
 
 	efx_release_tx_buffers(tx_queue);
 
@@ -500,7 +494,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
 {
 	EFX_LOG(tx_queue->efx, "destroying TX queue %d\n", tx_queue->queue);
-	falcon_remove_tx(tx_queue);
+	efx_nic_remove_tx(tx_queue);
 
 	kfree(tx_queue->buffer);
 	tx_queue->buffer = NULL;
@@ -539,6 +533,7 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
 #define ETH_HDR_LEN(skb)  (skb_network_header(skb) - (skb)->data)
 #define SKB_TCP_OFF(skb)  PTR_DIFF(tcp_hdr(skb), (skb)->data)
 #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
+#define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data)
 
 /**
  * struct tso_state - TSO state for an SKB
@@ -551,6 +546,7 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
  * @unmap_len: Length of SKB fragment
  * @unmap_addr: DMA address of SKB fragment
  * @unmap_single: DMA single vs page mapping flag
+ * @protocol: Network protocol (after any VLAN header)
  * @header_len: Number of bytes of header
  * @full_packet_size: Number of bytes to put in each outgoing segment
  *
@@ -571,6 +567,7 @@ struct tso_state {
 	dma_addr_t unmap_addr;
 	bool unmap_single;
 
+	__be16 protocol;
 	unsigned header_len;
 	int full_packet_size;
 };
@@ -578,9 +575,9 @@ struct tso_state {
 
 /*
  * Verify that our various assumptions about sk_buffs and the conditions
- * under which TSO will be attempted hold true.
+ * under which TSO will be attempted hold true.  Return the protocol number.
  */
-static void efx_tso_check_safe(struct sk_buff *skb)
+static __be16 efx_tso_check_protocol(struct sk_buff *skb)
 {
 	__be16 protocol = skb->protocol;
 
@@ -595,13 +592,22 @@ static void efx_tso_check_safe(struct sk_buff *skb)
 		if (protocol == htons(ETH_P_IP))
 			skb_set_transport_header(skb, sizeof(*veh) +
 						 4 * ip_hdr(skb)->ihl);
+		else if (protocol == htons(ETH_P_IPV6))
+			skb_set_transport_header(skb, sizeof(*veh) +
+						 sizeof(struct ipv6hdr));
 	}
 
-	EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IP));
-	EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
+	if (protocol == htons(ETH_P_IP)) {
+		EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
+	} else {
+		EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
+		EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
+	}
 	EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
 			     + (tcp_hdr(skb)->doff << 2u)) >
 			    skb_headlen(skb));
+
+	return protocol;
 }
 
 
@@ -708,14 +714,14 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
 {
 	struct efx_tx_buffer *buffer;
 	struct efx_nic *efx = tx_queue->efx;
-	unsigned dma_len, fill_level, insert_ptr, misalign;
+	unsigned dma_len, fill_level, insert_ptr;
 	int q_space;
 
 	EFX_BUG_ON_PARANOID(len <= 0);
 
 	fill_level = tx_queue->insert_count - tx_queue->old_read_count;
 	/* -1 as there is no way to represent all descriptors used */
-	q_space = efx->type->txd_ring_mask - 1 - fill_level;
+	q_space = EFX_TXQ_MASK - 1 - fill_level;
 
 	while (1) {
 		if (unlikely(q_space-- <= 0)) {
@@ -731,7 +737,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
 				*(volatile unsigned *)&tx_queue->read_count;
 			fill_level = (tx_queue->insert_count
 				      - tx_queue->old_read_count);
-			q_space = efx->type->txd_ring_mask - 1 - fill_level;
+			q_space = EFX_TXQ_MASK - 1 - fill_level;
 			if (unlikely(q_space-- <= 0)) {
 				*final_buffer = NULL;
 				return 1;
@@ -740,13 +746,13 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
 			--tx_queue->stopped;
 		}
 
-		insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask;
+		insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
 		buffer = &tx_queue->buffer[insert_ptr];
 		++tx_queue->insert_count;
 
 		EFX_BUG_ON_PARANOID(tx_queue->insert_count -
 				    tx_queue->read_count >
-				    efx->type->txd_ring_mask);
+				    EFX_TXQ_MASK);
 
 		efx_tsoh_free(tx_queue, buffer);
 		EFX_BUG_ON_PARANOID(buffer->len);
@@ -757,12 +763,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
 
 		buffer->dma_addr = dma_addr;
 
-		/* Ensure we do not cross a boundary unsupported by H/W */
-		dma_len = (~dma_addr & efx->type->tx_dma_mask) + 1;
-
-		misalign = (unsigned)dma_addr & efx->type->bug5391_mask;
-		if (misalign && dma_len + misalign > 512)
-			dma_len = 512 - misalign;
+		dma_len = efx_max_tx_len(efx, dma_addr);
 
 		/* If there is enough space to send then do so */
 		if (dma_len >= len)
@@ -792,8 +793,7 @@ static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
 {
 	struct efx_tx_buffer *buffer;
 
-	buffer = &tx_queue->buffer[tx_queue->insert_count &
-				   tx_queue->efx->type->txd_ring_mask];
+	buffer = &tx_queue->buffer[tx_queue->insert_count & EFX_TXQ_MASK];
 	efx_tsoh_free(tx_queue, buffer);
 	EFX_BUG_ON_PARANOID(buffer->len);
 	EFX_BUG_ON_PARANOID(buffer->unmap_len);
@@ -818,7 +818,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
 	while (tx_queue->insert_count != tx_queue->write_count) {
 		--tx_queue->insert_count;
 		buffer = &tx_queue->buffer[tx_queue->insert_count &
-					   tx_queue->efx->type->txd_ring_mask];
+					   EFX_TXQ_MASK];
 		efx_tsoh_free(tx_queue, buffer);
 		EFX_BUG_ON_PARANOID(buffer->skb);
 		buffer->len = 0;
@@ -850,7 +850,10 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb)
 			  + PTR_DIFF(tcp_hdr(skb), skb->data));
 	st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
 
-	st->ipv4_id = ntohs(ip_hdr(skb)->id);
+	if (st->protocol == htons(ETH_P_IP))
+		st->ipv4_id = ntohs(ip_hdr(skb)->id);
+	else
+		st->ipv4_id = 0;
 	st->seqnum = ntohl(tcp_hdr(skb)->seq);
 
 	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
@@ -965,7 +968,6 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
 				struct tso_state *st)
 {
 	struct efx_tso_header *tsoh;
-	struct iphdr *tsoh_iph;
 	struct tcphdr *tsoh_th;
 	unsigned ip_length;
 	u8 *header;
@@ -989,7 +991,6 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
 
 	header = TSOH_BUFFER(tsoh);
 	tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
-	tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb));
 
 	/* Copy and update the headers. */
 	memcpy(header, skb->data, st->header_len);
@@ -1007,11 +1008,22 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
 		tsoh_th->fin = tcp_hdr(skb)->fin;
 		tsoh_th->psh = tcp_hdr(skb)->psh;
 	}
-	tsoh_iph->tot_len = htons(ip_length);
 
-	/* Linux leaves suitable gaps in the IP ID space for us to fill. */
-	tsoh_iph->id = htons(st->ipv4_id);
-	st->ipv4_id++;
+	if (st->protocol == htons(ETH_P_IP)) {
+		struct iphdr *tsoh_iph =
+			(struct iphdr *)(header + SKB_IPV4_OFF(skb));
+
+		tsoh_iph->tot_len = htons(ip_length);
+
+		/* Linux leaves suitable gaps in the IP ID space for us to fill. */
+		tsoh_iph->id = htons(st->ipv4_id);
+		st->ipv4_id++;
+	} else {
+		struct ipv6hdr *tsoh_iph =
+			(struct ipv6hdr *)(header + SKB_IPV6_OFF(skb));
+
+		tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph));
+	}
 
 	st->packet_space = skb_shinfo(skb)->gso_size;
 	++tx_queue->tso_packets;
@@ -1041,8 +1053,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
 	int frag_i, rc, rc2 = NETDEV_TX_OK;
 	struct tso_state state;
 
-	/* Verify TSO is safe - these checks should never fail. */
-	efx_tso_check_safe(skb);
+	/* Find the packet protocol and sanity-check it */
+	state.protocol = efx_tso_check_protocol(skb);
 
 	EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
 
@@ -1092,14 +1104,14 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
 	}
 
 	/* Pass off to hardware */
-	falcon_push_buffers(tx_queue);
+	efx_nic_push_buffers(tx_queue);
 
 	tx_queue->tso_bursts++;
 	return NETDEV_TX_OK;
 
  mem_err:
 	EFX_ERR(efx, "Out of memory for TSO headers, or PCI mapping error\n");
-	dev_kfree_skb_any((struct sk_buff *)skb);
+	dev_kfree_skb_any(skb);
 	goto unwind;
 
  stop:
@@ -1135,7 +1147,7 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
 	unsigned i;
 
 	if (tx_queue->buffer) {
-		for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i)
+		for (i = 0; i <= EFX_TXQ_MASK; ++i)
 			efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
 	}
 
diff --git a/drivers/net/sfc/tx.h b/drivers/net/sfc/tx.h
deleted file mode 100644
index e3678962a5b4..000000000000
--- a/drivers/net/sfc/tx.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006 Fen Systems Ltd.
- * Copyright 2006-2008 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#ifndef EFX_TX_H
-#define EFX_TX_H
-
-#include "net_driver.h"
-
-int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
-void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
-void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
-void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
-
-netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
-				      struct net_device *net_dev);
-void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
-
-#endif /* EFX_TX_H */
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index c821c15445a0..acd9c734e483 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2008 Solarflare Communications Inc.
+ * Copyright 2006-2009 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -16,7 +16,9 @@
  */
 
 #define EFX_WORKAROUND_ALWAYS(efx) 1
-#define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1)
+#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1)
+#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0)
+#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
 #define EFX_WORKAROUND_10G(efx) EFX_IS10G(efx)
 #define EFX_WORKAROUND_SFT9001(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A || \
 				     (efx)->phy_type == PHY_TYPE_SFT9001B)
@@ -27,20 +29,22 @@
 #define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS
 /* Bit-bashed I2C reads cause performance drop */
 #define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
-/* TX pkt parser problem with <= 16 byte TXes */
-#define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS
 /* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
  * or a PCIe error (bug 11028) */
 #define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS
 /* Transmit flow control may get disabled */
-#define EFX_WORKAROUND_11482 EFX_WORKAROUND_ALWAYS
-/* Flush events can take a very long time to appear */
-#define EFX_WORKAROUND_11557 EFX_WORKAROUND_ALWAYS
+#define EFX_WORKAROUND_11482 EFX_WORKAROUND_FALCON_AB
 /* Truncated IPv4 packets can confuse the TX packet parser */
-#define EFX_WORKAROUND_15592 EFX_WORKAROUND_ALWAYS
+#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB
+/* Legacy ISR read can return zero once */
+#define EFX_WORKAROUND_15783 EFX_WORKAROUND_SIENA
+/* Legacy interrupt storm when interrupt fifo fills */
+#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
 
 /* Spurious parity errors in TSORT buffers */
 #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
+/* Unaligned read request >512 bytes after aligning may break TSORT */
+#define EFX_WORKAROUND_5391 EFX_WORKAROUND_FALCON_A
 /* iSCSI parsing errors */
 #define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A
 /* RX events go missing */
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index f4dfd1f679a9..6b364a6c6c60 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -365,11 +365,10 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp
 					}
 					skb_reserve(newskb, 2);
 				} else {
-					skb = netdev_alloc_skb(dev, len + 2);
-					if (skb) {
-						skb_reserve(skb, 2);
+					skb = netdev_alloc_skb_ip_align(dev, len);
+					if (skb)
 						skb_copy_to_linear_data(skb, rd->skb->data, len);
-					}
+
 					newskb = rd->skb;
 				}
 memory_squeeze:
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 528b912a4b0d..c88bc1013047 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -30,6 +30,7 @@
 #include <linux/phy.h>
 #include <linux/cache.h>
 #include <linux/io.h>
+#include <linux/pm_runtime.h>
 #include <asm/cacheflush.h>
 
 #include "sh_eth.h"
@@ -299,16 +300,20 @@ static void update_mac_address(struct net_device *ndev)
  * When you want use this device, you must set MAC address in bootloader.
  *
  */
-static void read_mac_address(struct net_device *ndev)
+static void read_mac_address(struct net_device *ndev, unsigned char *mac)
 {
 	u32 ioaddr = ndev->base_addr;
 
-	ndev->dev_addr[0] = (ctrl_inl(ioaddr + MAHR) >> 24);
-	ndev->dev_addr[1] = (ctrl_inl(ioaddr + MAHR) >> 16) & 0xFF;
-	ndev->dev_addr[2] = (ctrl_inl(ioaddr + MAHR) >> 8) & 0xFF;
-	ndev->dev_addr[3] = (ctrl_inl(ioaddr + MAHR) & 0xFF);
-	ndev->dev_addr[4] = (ctrl_inl(ioaddr + MALR) >> 8) & 0xFF;
-	ndev->dev_addr[5] = (ctrl_inl(ioaddr + MALR) & 0xFF);
+	if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
+		memcpy(ndev->dev_addr, mac, 6);
+	} else {
+		ndev->dev_addr[0] = (ctrl_inl(ioaddr + MAHR) >> 24);
+		ndev->dev_addr[1] = (ctrl_inl(ioaddr + MAHR) >> 16) & 0xFF;
+		ndev->dev_addr[2] = (ctrl_inl(ioaddr + MAHR) >> 8) & 0xFF;
+		ndev->dev_addr[3] = (ctrl_inl(ioaddr + MAHR) & 0xFF);
+		ndev->dev_addr[4] = (ctrl_inl(ioaddr + MALR) >> 8) & 0xFF;
+		ndev->dev_addr[5] = (ctrl_inl(ioaddr + MALR) & 0xFF);
+	}
 }
 
 struct bb_info {
@@ -1009,7 +1014,9 @@ static int sh_eth_open(struct net_device *ndev)
 	int ret = 0;
 	struct sh_eth_private *mdp = netdev_priv(ndev);
 
-	ret = request_irq(ndev->irq, &sh_eth_interrupt,
+	pm_runtime_get_sync(&mdp->pdev->dev);
+
+	ret = request_irq(ndev->irq, sh_eth_interrupt,
 #if defined(CONFIG_CPU_SUBTYPE_SH7763) || defined(CONFIG_CPU_SUBTYPE_SH7764)
 				IRQF_SHARED,
 #else
@@ -1045,6 +1052,7 @@ static int sh_eth_open(struct net_device *ndev)
 
 out_free_irq:
 	free_irq(ndev->irq, ndev);
+	pm_runtime_put_sync(&mdp->pdev->dev);
 	return ret;
 }
 
@@ -1176,6 +1184,8 @@ static int sh_eth_close(struct net_device *ndev)
 	ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
 	dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
 
+	pm_runtime_put_sync(&mdp->pdev->dev);
+
 	return 0;
 }
 
@@ -1184,6 +1194,8 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
 	struct sh_eth_private *mdp = netdev_priv(ndev);
 	u32 ioaddr = ndev->base_addr;
 
+	pm_runtime_get_sync(&mdp->pdev->dev);
+
 	mdp->stats.tx_dropped += ctrl_inl(ioaddr + TROCR);
 	ctrl_outl(0, ioaddr + TROCR);	/* (write clear) */
 	mdp->stats.collisions += ctrl_inl(ioaddr + CDCR);
@@ -1199,6 +1211,8 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
 	mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CNDCR);
 	ctrl_outl(0, ioaddr + CNDCR);	/* (write clear) */
 #endif
+	pm_runtime_put_sync(&mdp->pdev->dev);
+
 	return &mdp->stats;
 }
 
@@ -1407,6 +1421,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
 
 	mdp = netdev_priv(ndev);
 	spin_lock_init(&mdp->lock);
+	mdp->pdev = pdev;
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_resume(&pdev->dev);
 
 	pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
 	/* get PHY ID */
@@ -1428,7 +1445,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
 	mdp->post_fw = POST_FW >> (devno << 1);
 
 	/* read and set MAC address */
-	read_mac_address(ndev);
+	read_mac_address(ndev, pd->mac_addr);
 
 	/* First device only init */
 	if (!devno) {
@@ -1482,18 +1499,37 @@ static int sh_eth_drv_remove(struct platform_device *pdev)
 	sh_mdio_release(ndev);
 	unregister_netdev(ndev);
 	flush_scheduled_work();
-
+	pm_runtime_disable(&pdev->dev);
 	free_netdev(ndev);
 	platform_set_drvdata(pdev, NULL);
 
 	return 0;
 }
 
+static int sh_eth_runtime_nop(struct device *dev)
+{
+	/*
+	 * Runtime PM callback shared between ->runtime_suspend()
+	 * and ->runtime_resume(). Simply returns success.
+	 *
+	 * This driver re-initializes all registers after
+	 * pm_runtime_get_sync() anyway so there is no need
+	 * to save and restore registers here.
+	 */
+	return 0;
+}
+
+static struct dev_pm_ops sh_eth_dev_pm_ops = {
+	.runtime_suspend = sh_eth_runtime_nop,
+	.runtime_resume = sh_eth_runtime_nop,
+};
+
 static struct platform_driver sh_eth_driver = {
 	.probe = sh_eth_drv_probe,
 	.remove = sh_eth_drv_remove,
 	.driver = {
 		   .name = CARDNAME,
+		   .pm = &sh_eth_dev_pm_ops,
 	},
 };
 
diff --git a/drivers/net/sh_eth.h b/drivers/net/sh_eth.h
index ba151f86ae7b..8b47763958f2 100644
--- a/drivers/net/sh_eth.h
+++ b/drivers/net/sh_eth.h
@@ -703,6 +703,7 @@ struct sh_eth_cpu_data {
 };
 
 struct sh_eth_private {
+	struct platform_device *pdev;
 	struct sh_eth_cpu_data *cd;
 	dma_addr_t rx_desc_dma;
 	dma_addr_t tx_desc_dma;
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 7cc9898f4e00..31233b4c44a0 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -536,13 +536,12 @@ static bool sis190_try_rx_copy(struct sis190_private *tp,
 	if (pkt_size >= rx_copybreak)
 		goto out;
 
-	skb = netdev_alloc_skb(tp->dev, pkt_size + 2);
+	skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
 	if (!skb)
 		goto out;
 
 	pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz,
 				PCI_DMA_FROMDEVICE);
-	skb_reserve(skb, 2);
 	skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
 	*sk_buff = skb;
 	done = true;
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index c072f7f36acf..9a12d88ac2d9 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1016,7 +1016,7 @@ sis900_open(struct net_device *net_dev)
 	/* Equalizer workaround Rule */
 	sis630_set_eq(net_dev, sis_priv->chipset_rev);
 
-	ret = request_irq(net_dev->irq, &sis900_interrupt, IRQF_SHARED,
+	ret = request_irq(net_dev->irq, sis900_interrupt, IRQF_SHARED,
 						net_dev->name, net_dev);
 	if (ret)
 		return ret;
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 8f5414348e86..379a3dc00163 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -238,8 +238,8 @@ static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 	struct skge_port *skge = netdev_priv(dev);
 	struct skge_hw *hw = skge->hw;
 
-	if ((wol->wolopts & ~wol_supported(hw))
-	    || !device_can_wakeup(&hw->pdev->dev))
+	if ((wol->wolopts & ~wol_supported(hw)) ||
+	    !device_can_wakeup(&hw->pdev->dev))
 		return -EOPNOTSUPP;
 
 	skge->wol = wol->wolopts;
@@ -576,9 +576,10 @@ static void skge_get_pauseparam(struct net_device *dev,
 {
 	struct skge_port *skge = netdev_priv(dev);
 
-	ecmd->rx_pause = (skge->flow_control == FLOW_MODE_SYMMETRIC)
-		|| (skge->flow_control == FLOW_MODE_SYM_OR_REM);
-	ecmd->tx_pause = ecmd->rx_pause || (skge->flow_control == FLOW_MODE_LOC_SEND);
+	ecmd->rx_pause = ((skge->flow_control == FLOW_MODE_SYMMETRIC) ||
+			  (skge->flow_control == FLOW_MODE_SYM_OR_REM));
+	ecmd->tx_pause = (ecmd->rx_pause ||
+			  (skge->flow_control == FLOW_MODE_LOC_SEND));
 
 	ecmd->autoneg = ecmd->rx_pause || ecmd->tx_pause;
 }
@@ -2779,8 +2780,8 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
 		/* This seems backwards, but it is what the sk98lin
 		 * does.  Looks like hardware is wrong?
 		 */
-		if (ipip_hdr(skb)->protocol == IPPROTO_UDP
-	            && hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON)
+		if (ipip_hdr(skb)->protocol == IPPROTO_UDP &&
+	            hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON)
 			control = BMU_TCP_CHECK;
 		else
 			control = BMU_UDP_CHECK;
@@ -2948,8 +2949,8 @@ static void genesis_set_multicast(struct net_device *dev)
 	else {
 		memset(filter, 0, sizeof(filter));
 
-		if (skge->flow_status == FLOW_STAT_REM_SEND
-		    || skge->flow_status == FLOW_STAT_SYMMETRIC)
+		if (skge->flow_status == FLOW_STAT_REM_SEND ||
+		    skge->flow_status == FLOW_STAT_SYMMETRIC)
 			genesis_add_filter(filter, pause_mc_addr);
 
 		for (i = 0; list && i < count; i++, list = list->next)
@@ -2972,8 +2973,8 @@ static void yukon_set_multicast(struct net_device *dev)
 	struct skge_hw *hw = skge->hw;
 	int port = skge->port;
 	struct dev_mc_list *list = dev->mc_list;
-	int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND
-			|| skge->flow_status == FLOW_STAT_SYMMETRIC);
+	int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND ||
+			skge->flow_status == FLOW_STAT_SYMMETRIC);
 	u16 reg;
 	u8 filter[8];
 
@@ -3071,11 +3072,10 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
 		goto error;
 
 	if (len < RX_COPY_THRESHOLD) {
-		skb = netdev_alloc_skb(dev, len + 2);
+		skb = netdev_alloc_skb_ip_align(dev, len);
 		if (!skb)
 			goto resubmit;
 
-		skb_reserve(skb, 2);
 		pci_dma_sync_single_for_cpu(skge->hw->pdev,
 					    pci_unmap_addr(e, mapaddr),
 					    len, PCI_DMA_FROMDEVICE);
@@ -3086,11 +3086,11 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
 		skge_rx_reuse(e, skge->rx_buf_size);
 	} else {
 		struct sk_buff *nskb;
-		nskb = netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN);
+
+		nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size);
 		if (!nskb)
 			goto resubmit;
 
-		skb_reserve(nskb, NET_IP_ALIGN);
 		pci_unmap_single(skge->hw->pdev,
 				 pci_unmap_addr(e, mapaddr),
 				 pci_unmap_len(e, maplen),
@@ -3948,7 +3948,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
 	hw->pdev = pdev;
 	spin_lock_init(&hw->hw_lock);
 	spin_lock_init(&hw->phy_lock);
-	tasklet_init(&hw->phy_task, &skge_extirq, (unsigned long) hw);
+	tasklet_init(&hw->phy_task, skge_extirq, (unsigned long) hw);
 
 	hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
 	if (!hw->regs) {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 6a10d7ba5877..3943d89afb2b 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -50,7 +50,7 @@
 #include "sky2.h"
 
 #define DRV_NAME		"sky2"
-#define DRV_VERSION		"1.25"
+#define DRV_VERSION		"1.26"
 #define PFX			DRV_NAME " "
 
 /*
@@ -102,6 +102,7 @@ MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
 static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */
 	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */
+	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E01) }, /* SK-9E21M */
 	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) },	/* DGE-560T */
 	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, 	/* DGE-550SX */
 	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) },	/* DGE-560SX */
@@ -139,6 +140,7 @@ static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
 	{ 0 }
 };
 
@@ -372,8 +374,8 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
 			ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
 
 			/* downshift on PHY 88E1112 and 88E1149 is changed */
-			if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED)
-			    && (hw->flags & SKY2_HW_NEWER_PHY)) {
+			if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) &&
+			     (hw->flags & SKY2_HW_NEWER_PHY)) {
 				/* set downshift counter to 3x and enable downshift */
 				ctrl &= ~PHY_M_PC_DSC_MSK;
 				ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
@@ -602,13 +604,23 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
 		/* apply workaround for integrated resistors calibration */
 		gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17);
 		gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60);
+	} else if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) {
+		/* apply fixes in PHY AFE */
+		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff);
+
+		/* apply RDAC termination workaround */
+		gm_phy_write(hw, port, 24, 0x2800);
+		gm_phy_write(hw, port, 23, 0x2001);
+
+		/* set page register back to 0 */
+		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
 	} else if (hw->chip_id != CHIP_ID_YUKON_EX &&
 		   hw->chip_id < CHIP_ID_YUKON_SUPR) {
 		/* no effect on Yukon-XL */
 		gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
 
-		if ( !(sky2->flags & SKY2_FLAG_AUTO_SPEED)
-		     || sky2->speed == SPEED_100) {
+		if (!(sky2->flags & SKY2_FLAG_AUTO_SPEED) ||
+		    sky2->speed == SPEED_100) {
 			/* turn on 100 Mbps LED (LED_LINK100) */
 			ledover |= PHY_M_LED_MO_100(MO_LED_ON);
 		}
@@ -786,8 +798,7 @@ static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
 
 	if ( (hw->chip_id == CHIP_ID_YUKON_EX &&
 	      hw->chip_rev != CHIP_REV_YU_EX_A0) ||
-	     hw->chip_id == CHIP_ID_YUKON_FE_P ||
-	     hw->chip_id == CHIP_ID_YUKON_SUPR) {
+	     hw->chip_id >= CHIP_ID_YUKON_FE_P) {
 		/* Yukon-Extreme B0 and further Extreme devices */
 		/* enable Store & Forward mode for TX */
 
@@ -925,8 +936,14 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
 
 	/* On chips without ram buffer, pause is controled by MAC level */
 	if (!(hw->flags & SKY2_HW_RAM_BUFFER)) {
-		sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
-		sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
+		/* Pause threshold is scaled by 8 in bytes */
+		if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
+		    hw->chip_rev == CHIP_REV_YU_FE2_A0)
+			reg = 1568 / 8;
+		else
+			reg = 1024 / 8;
+		sky2_write16(hw, SK_REG(port, RX_GMF_UP_THR), reg);
+		sky2_write16(hw, SK_REG(port, RX_GMF_LP_THR), 768 / 8);
 
 		sky2_set_tx_stfwd(hw, port);
 	}
@@ -1336,8 +1353,8 @@ static int sky2_rx_start(struct sky2_port *sky2)
 	/* These chips have no ram buffer?
 	 * MAC Rx RAM Read is controlled by hardware */
 	if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
-	    (hw->chip_rev == CHIP_REV_YU_EC_U_A1
-	     || hw->chip_rev == CHIP_REV_YU_EC_U_B0))
+	    (hw->chip_rev == CHIP_REV_YU_EC_U_A1 ||
+	     hw->chip_rev == CHIP_REV_YU_EC_U_B0))
 		sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS);
 
 	sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
@@ -1397,6 +1414,31 @@ static int sky2_rx_start(struct sky2_port *sky2)
 
 	/* Tell chip about available buffers */
 	sky2_rx_update(sky2, rxq);
+
+	if (hw->chip_id == CHIP_ID_YUKON_EX ||
+	    hw->chip_id == CHIP_ID_YUKON_SUPR) {
+		/*
+		 * Disable flushing of non ASF packets;
+		 * must be done after initializing the BMUs;
+		 * drivers without ASF support should do this too, otherwise
+		 * it may happen that they cannot run on ASF devices;
+		 * remember that the MAC FIFO isn't reset during initialization.
+		 */
+		sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_MACSEC_FLUSH_OFF);
+	}
+
+	if (hw->chip_id >= CHIP_ID_YUKON_SUPR) {
+		/* Enable RX Home Address & Routing Header checksum fix */
+		sky2_write16(hw, SK_REG(sky2->port, RX_GMF_FL_CTRL),
+			     RX_IPV6_SA_MOB_ENA | RX_IPV6_DA_MOB_ENA);
+
+		/* Enable TX Home Address & Routing Header checksum fix */
+		sky2_write32(hw, Q_ADDR(txqaddr[sky2->port], Q_TEST),
+			     TBMU_TEST_HOME_ADD_FIX_EN | TBMU_TEST_ROUTING_ADD_FIX_EN);
+	}
+
+
+
 	return 0;
 nomem:
 	sky2_rx_clean(sky2);
@@ -1518,8 +1560,8 @@ static int sky2_up(struct net_device *dev)
 		sky2_write32(hw, Q_ADDR(txqaddr[port], Q_TEST), F_TX_CHK_AUTO_OFF);
 
 	/* Set almost empty threshold */
-	if (hw->chip_id == CHIP_ID_YUKON_EC_U
-	    && hw->chip_rev == CHIP_REV_YU_EC_U_A0)
+	if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
+	    hw->chip_rev == CHIP_REV_YU_EC_U_A0)
 		sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), ECU_TXFF_LEV);
 
 	sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
@@ -1865,8 +1907,8 @@ static int sky2_down(struct net_device *dev)
 	sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
 
 	/* Workaround shared GMAC reset */
-	if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0
-	      && port == 0 && hw->dev[1] && netif_running(hw->dev[1])))
+	if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 &&
+	      port == 0 && hw->dev[1] && netif_running(hw->dev[1])))
 		sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
 
 	sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
@@ -2043,8 +2085,8 @@ static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
 			sky2->flow_status = FC_TX;
 	}
 
-	if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000
-	    && !(hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX))
+	if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000 &&
+	    !(hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX))
 		sky2->flow_status = FC_NONE;
 
 	if (sky2->flow_status & FC_TX)
@@ -2096,6 +2138,25 @@ out:
 	spin_unlock(&sky2->phy_lock);
 }
 
+/* Special quick link interrupt (Yukon-2 Optima only) */
+static void sky2_qlink_intr(struct sky2_hw *hw)
+{
+	struct sky2_port *sky2 = netdev_priv(hw->dev[0]);
+	u32 imask;
+	u16 phy;
+
+	/* disable irq */
+	imask = sky2_read32(hw, B0_IMSK);
+	imask &= ~Y2_IS_PHY_QLNK;
+	sky2_write32(hw, B0_IMSK, imask);
+
+	/* reset PHY Link Detect */
+	phy = sky2_pci_read16(hw, PSM_CONFIG_REG4);
+	sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1);
+
+	sky2_link_up(sky2);
+}
+
 /* Transmit timeout is only called if we are running, carrier is up
  * and tx queue is full (stopped).
  */
@@ -2191,9 +2252,8 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
 {
 	struct sk_buff *skb;
 
-	skb = netdev_alloc_skb(sky2->netdev, length + 2);
+	skb = netdev_alloc_skb_ip_align(sky2->netdev, length);
 	if (likely(skb)) {
-		skb_reserve(skb, 2);
 		pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr,
 					    length, PCI_DMA_FROMDEVICE);
 		skb_copy_from_linear_data(re->skb, skb->data, length);
@@ -2766,6 +2826,9 @@ static int sky2_poll(struct napi_struct *napi, int work_limit)
 	if (status & Y2_IS_IRQ_PHY2)
 		sky2_phy_intr(hw, 1);
 
+	if (status & Y2_IS_PHY_QLNK)
+		sky2_qlink_intr(hw);
+
 	while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) {
 		work_done += sky2_status_intr(hw, work_limit - work_done, idx);
 
@@ -2815,6 +2878,7 @@ static u32 sky2_mhz(const struct sky2_hw *hw)
 	case CHIP_ID_YUKON_EX:
 	case CHIP_ID_YUKON_SUPR:
 	case CHIP_ID_YUKON_UL_2:
+	case CHIP_ID_YUKON_OPT:
 		return 125;
 
 	case CHIP_ID_YUKON_FE:
@@ -2904,6 +2968,7 @@ static int __devinit sky2_init(struct sky2_hw *hw)
 		break;
 
 	case CHIP_ID_YUKON_UL_2:
+	case CHIP_ID_YUKON_OPT:
 		hw->flags = SKY2_HW_GIGABIT
 			| SKY2_HW_ADV_POWER_CTL;
 		break;
@@ -2986,6 +3051,52 @@ static void sky2_reset(struct sky2_hw *hw)
 			sky2_write16(hw, SK_REG(i, GMAC_CTRL),
 				     GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON
 				     | GMC_BYP_RETR_ON);
+
+	}
+
+	if (hw->chip_id == CHIP_ID_YUKON_SUPR && hw->chip_rev > CHIP_REV_YU_SU_B0) {
+		/* enable MACSec clock gating */
+		sky2_pci_write32(hw, PCI_DEV_REG3, P_CLK_MACSEC_DIS);
+	}
+
+	if (hw->chip_id == CHIP_ID_YUKON_OPT) {
+		u16 reg;
+		u32 msk;
+
+		if (hw->chip_rev == 0) {
+			/* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */
+			sky2_write32(hw, Y2_PEX_PHY_DATA, (0x80UL << 16) | (1 << 7));
+
+			/* set PHY Link Detect Timer to 1.1 second (11x 100ms) */
+			reg = 10;
+		} else {
+			/* set PHY Link Detect Timer to 0.4 second (4x 100ms) */
+			reg = 3;
+		}
+
+		reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE;
+
+		/* reset PHY Link Detect */
+		sky2_pci_write16(hw, PSM_CONFIG_REG4,
+				 reg | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT);
+		sky2_pci_write16(hw, PSM_CONFIG_REG4, reg);
+
+
+		/* enable PHY Quick Link */
+		msk = sky2_read32(hw, B0_IMSK);
+		msk |= Y2_IS_PHY_QLNK;
+		sky2_write32(hw, B0_IMSK, msk);
+
+		/* check if PSMv2 was running before */
+		reg = sky2_pci_read16(hw, PSM_CONFIG_REG3);
+		if (reg & PCI_EXP_LNKCTL_ASPMC) {
+			int cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+			/* restore the PCIe Link Control register */
+			sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg);
+		}
+
+		/* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
+		sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16));
 	}
 
 	/* Clear I2C IRQ noise */
@@ -3133,8 +3244,8 @@ static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 	struct sky2_port *sky2 = netdev_priv(dev);
 	struct sky2_hw *hw = sky2->hw;
 
-	if ((wol->wolopts & ~sky2_wol_supported(sky2->hw))
-	    || !device_can_wakeup(&hw->pdev->dev))
+	if ((wol->wolopts & ~sky2_wol_supported(sky2->hw)) ||
+	    !device_can_wakeup(&hw->pdev->dev))
 		return -EOPNOTSUPP;
 
 	sky2->wol = wol->wolopts;
@@ -4406,9 +4517,11 @@ static const char *sky2_name(u8 chipid, char *buf, int sz)
 		"FE+",		/* 0xb8 */
 		"Supreme",	/* 0xb9 */
 		"UL 2",		/* 0xba */
+		"Unknown",	/* 0xbb */
+		"Optima",	/* 0xbc */
 	};
 
-	if (chipid >= CHIP_ID_YUKON_XL && chipid < CHIP_ID_YUKON_UL_2)
+	if (chipid >= CHIP_ID_YUKON_XL && chipid < CHIP_ID_YUKON_OPT)
 		strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz);
 	else
 		snprintf(buf, sz, "(chip %#x)", chipid);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index ed54129698b4..365d79c7d834 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -16,6 +16,13 @@ enum {
 	PCI_DEV_REG5    = 0x88,
 	PCI_CFG_REG_0	= 0x90,
 	PCI_CFG_REG_1	= 0x94,
+
+	PSM_CONFIG_REG0  = 0x98,
+	PSM_CONFIG_REG1	 = 0x9C,
+	PSM_CONFIG_REG2  = 0x160,
+	PSM_CONFIG_REG3  = 0x164,
+	PSM_CONFIG_REG4  = 0x168,
+
 };
 
 /* Yukon-2 */
@@ -48,6 +55,37 @@ enum pci_dev_reg_2 {
 	PCI_USEDATA64	= 1<<0,		/* Use 64Bit Data bus ext */
 };
 
+/*	PCI_OUR_REG_3		32 bit	Our Register 3 (Yukon-ECU only) */
+enum pci_dev_reg_3 {
+	P_CLK_ASF_REGS_DIS	= 1<<18,/* Disable Clock ASF (Yukon-Ext.) */
+	P_CLK_COR_REGS_D0_DIS	= 1<<17,/* Disable Clock Core Regs D0 */
+	P_CLK_MACSEC_DIS	= 1<<17,/* Disable Clock MACSec (Yukon-Ext.) */
+	P_CLK_PCI_REGS_D0_DIS	= 1<<16,/* Disable Clock PCI  Regs D0 */
+	P_CLK_COR_YTB_ARB_DIS	= 1<<15,/* Disable Clock YTB  Arbiter */
+	P_CLK_MAC_LNK1_D3_DIS	= 1<<14,/* Disable Clock MAC  Link1 D3 */
+	P_CLK_COR_LNK1_D0_DIS	= 1<<13,/* Disable Clock Core Link1 D0 */
+	P_CLK_MAC_LNK1_D0_DIS	= 1<<12,/* Disable Clock MAC  Link1 D0 */
+	P_CLK_COR_LNK1_D3_DIS	= 1<<11,/* Disable Clock Core Link1 D3 */
+	P_CLK_PCI_MST_ARB_DIS	= 1<<10,/* Disable Clock PCI  Master Arb. */
+	P_CLK_COR_REGS_D3_DIS	= 1<<9,	/* Disable Clock Core Regs D3 */
+	P_CLK_PCI_REGS_D3_DIS	= 1<<8,	/* Disable Clock PCI  Regs D3 */
+	P_CLK_REF_LNK1_GM_DIS	= 1<<7,	/* Disable Clock Ref. Link1 GMAC */
+	P_CLK_COR_LNK1_GM_DIS	= 1<<6,	/* Disable Clock Core Link1 GMAC */
+	P_CLK_PCI_COMMON_DIS	= 1<<5,	/* Disable Clock PCI  Common */
+	P_CLK_COR_COMMON_DIS	= 1<<4,	/* Disable Clock Core Common */
+	P_CLK_PCI_LNK1_BMU_DIS	= 1<<3,	/* Disable Clock PCI  Link1 BMU */
+	P_CLK_COR_LNK1_BMU_DIS	= 1<<2,	/* Disable Clock Core Link1 BMU */
+	P_CLK_PCI_LNK1_BIU_DIS	= 1<<1,	/* Disable Clock PCI  Link1 BIU */
+	P_CLK_COR_LNK1_BIU_DIS	= 1<<0,	/* Disable Clock Core Link1 BIU */
+	PCIE_OUR3_WOL_D3_COLD_SET = P_CLK_ASF_REGS_DIS |
+				    P_CLK_COR_REGS_D0_DIS |
+				    P_CLK_COR_LNK1_D0_DIS |
+				    P_CLK_MAC_LNK1_D0_DIS |
+				    P_CLK_PCI_MST_ARB_DIS |
+				    P_CLK_COR_COMMON_DIS |
+				    P_CLK_COR_LNK1_BMU_DIS,
+};
+
 /*	PCI_OUR_REG_4		32 bit	Our Register 4 (Yukon-ECU only) */
 enum pci_dev_reg_4 {
 				/* (Link Training & Status State Machine) */
@@ -114,7 +152,7 @@ enum pci_dev_reg_5 {
 				     P_GAT_PCIE_RX_EL_IDLE,
 };
 
-#/*	PCI_CFG_REG_1			32 bit	Config Register 1 (Yukon-Ext only) */
+/*	PCI_CFG_REG_1			32 bit	Config Register 1 (Yukon-Ext only) */
 enum pci_cfg_reg1 {
 	P_CF1_DIS_REL_EVT_RST	= 1<<24, /* Dis. Rel. Event during PCIE reset */
 										/* Bit 23..21: Release Clock on Event */
@@ -145,6 +183,72 @@ enum pci_cfg_reg1 {
 					P_CF1_ENA_TXBMU_WR_IDLE,
 };
 
+/* Yukon-Optima */
+enum {
+	PSM_CONFIG_REG1_AC_PRESENT_STATUS = 1<<31,   /* AC Present Status */
+
+	PSM_CONFIG_REG1_PTP_CLK_SEL	  = 1<<29,   /* PTP Clock Select */
+	PSM_CONFIG_REG1_PTP_MODE	  = 1<<28,   /* PTP Mode */
+
+	PSM_CONFIG_REG1_MUX_PHY_LINK	  = 1<<27,   /* PHY Energy Detect Event */
+
+	PSM_CONFIG_REG1_EN_PIN63_AC_PRESENT = 1<<26,  /* Enable LED_DUPLEX for ac_present */
+	PSM_CONFIG_REG1_EN_PCIE_TIMER	  = 1<<25,    /* Enable PCIe Timer */
+	PSM_CONFIG_REG1_EN_SPU_TIMER	  = 1<<24,    /* Enable SPU Timer */
+	PSM_CONFIG_REG1_POLARITY_AC_PRESENT = 1<<23,  /* AC Present Polarity */
+
+	PSM_CONFIG_REG1_EN_AC_PRESENT	  = 1<<21,    /* Enable AC Present */
+
+	PSM_CONFIG_REG1_EN_GPHY_INT_PSM	= 1<<20,      /* Enable GPHY INT for PSM */
+	PSM_CONFIG_REG1_DIS_PSM_TIMER	= 1<<19,      /* Disable PSM Timer */
+};
+
+/* Yukon-Supreme */
+enum {
+	PSM_CONFIG_REG1_GPHY_ENERGY_STS	= 1<<31, /* GPHY Energy Detect Status */
+
+	PSM_CONFIG_REG1_UART_MODE_MSK	= 3<<29, /* UART_Mode */
+	PSM_CONFIG_REG1_CLK_RUN_ASF	= 1<<28, /* Enable Clock Free Running for ASF Subsystem */
+	PSM_CONFIG_REG1_UART_CLK_DISABLE= 1<<27, /* Disable UART clock */
+	PSM_CONFIG_REG1_VAUX_ONE	= 1<<26, /* Tie internal Vaux to 1'b1 */
+	PSM_CONFIG_REG1_UART_FC_RI_VAL	= 1<<25, /* Default value for UART_RI_n */
+	PSM_CONFIG_REG1_UART_FC_DCD_VAL	= 1<<24, /* Default value for UART_DCD_n */
+	PSM_CONFIG_REG1_UART_FC_DSR_VAL	= 1<<23, /* Default value for UART_DSR_n */
+	PSM_CONFIG_REG1_UART_FC_CTS_VAL	= 1<<22, /* Default value for UART_CTS_n */
+	PSM_CONFIG_REG1_LATCH_VAUX	= 1<<21, /* Enable Latch current Vaux_avlbl */
+	PSM_CONFIG_REG1_FORCE_TESTMODE_INPUT= 1<<20, /* Force Testmode pin as input PAD */
+	PSM_CONFIG_REG1_UART_RST	= 1<<19, /* UART_RST */
+	PSM_CONFIG_REG1_PSM_PCIE_L1_POL	= 1<<18, /* PCIE L1 Event Polarity for PSM */
+	PSM_CONFIG_REG1_TIMER_STAT	= 1<<17, /* PSM Timer Status */
+	PSM_CONFIG_REG1_GPHY_INT	= 1<<16, /* GPHY INT Status */
+	PSM_CONFIG_REG1_FORCE_TESTMODE_ZERO= 1<<15, /* Force internal Testmode as 1'b0 */
+	PSM_CONFIG_REG1_EN_INT_ASPM_CLKREQ = 1<<14, /* ENABLE INT for CLKRUN on ASPM and CLKREQ */
+	PSM_CONFIG_REG1_EN_SND_TASK_ASPM_CLKREQ	= 1<<13, /* ENABLE Snd_task for CLKRUN on ASPM and CLKREQ */
+	PSM_CONFIG_REG1_DIS_CLK_GATE_SND_TASK	= 1<<12, /* Disable CLK_GATE control snd_task */
+	PSM_CONFIG_REG1_DIS_FF_CHIAN_SND_INTA	= 1<<11, /* Disable flip-flop chain for sndmsg_inta */
+
+	PSM_CONFIG_REG1_DIS_LOADER	= 1<<9, /* Disable Loader SM after PSM Goes back to IDLE */
+	PSM_CONFIG_REG1_DO_PWDN		= 1<<8, /* Do Power Down, Start PSM Scheme */
+	PSM_CONFIG_REG1_DIS_PIG		= 1<<7, /* Disable Plug-in-Go SM after PSM Goes back to IDLE */
+	PSM_CONFIG_REG1_DIS_PERST	= 1<<6, /* Disable Internal PCIe Reset after PSM Goes back to IDLE */
+	PSM_CONFIG_REG1_EN_REG18_PD	= 1<<5, /* Enable REG18 Power Down for PSM */
+	PSM_CONFIG_REG1_EN_PSM_LOAD	= 1<<4, /* Disable EEPROM Loader after PSM Goes back to IDLE */
+	PSM_CONFIG_REG1_EN_PSM_HOT_RST	= 1<<3, /* Enable PCIe Hot Reset for PSM */
+	PSM_CONFIG_REG1_EN_PSM_PERST	= 1<<2, /* Enable PCIe Reset Event for PSM */
+	PSM_CONFIG_REG1_EN_PSM_PCIE_L1	= 1<<1, /* Enable PCIe L1 Event for PSM */
+	PSM_CONFIG_REG1_EN_PSM		= 1<<0, /* Enable PSM Scheme */
+};
+
+/*	PSM_CONFIG_REG4				0x0168	PSM Config Register 4 */
+enum {
+						/* PHY Link Detect Timer */
+	PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_MSK = 0xf<<4,
+	PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE = 4,
+
+	PSM_CONFIG_REG4_DEBUG_TIMER	    = 1<<1, /* Debug Timer */
+	PSM_CONFIG_REG4_RST_PHY_LINK_DETECT = 1<<0, /* Reset GPHY Link Detect */
+};
+
 
 #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
 			       PCI_STATUS_SIG_SYSTEM_ERROR | \
@@ -197,6 +301,9 @@ enum csr_regs {
 	B2_I2C_IRQ	= 0x0168,
 	B2_I2C_SW	= 0x016c,
 
+	Y2_PEX_PHY_DATA = 0x0170,
+	Y2_PEX_PHY_ADDR = 0x0172,
+
 	B3_RAM_ADDR	= 0x0180,
 	B3_RAM_DATA_LO	= 0x0184,
 	B3_RAM_DATA_HI	= 0x0188,
@@ -317,6 +424,10 @@ enum {
 	Y2_IS_CHK_TXS2	= 1<<9,		/* Descriptor error TXS 2 */
 	Y2_IS_CHK_TXA2	= 1<<8,		/* Descriptor error TXA 2 */
 
+	Y2_IS_PSM_ACK	= 1<<7,		/* PSM Acknowledge (Yukon-Optima only) */
+	Y2_IS_PTP_TIST	= 1<<6,		/* PTP Time Stamp (Yukon-Optima only) */
+	Y2_IS_PHY_QLNK	= 1<<5,		/* PHY Quick Link (Yukon-Optima only) */
+
 	Y2_IS_IRQ_PHY1	= 1<<4,		/* Interrupt from PHY 1 */
 	Y2_IS_IRQ_MAC1	= 1<<3,		/* Interrupt from MAC 1 */
 	Y2_IS_CHK_RX1	= 1<<2,		/* Descriptor error Rx 1 */
@@ -435,6 +546,7 @@ enum {
  	CHIP_ID_YUKON_FE_P = 0xb8, /* YUKON-2 FE+ */
 	CHIP_ID_YUKON_SUPR = 0xb9, /* YUKON-2 Supreme */
 	CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */
+	CHIP_ID_YUKON_OPT  = 0xbc, /* YUKON-2 Optima */
 };
 enum yukon_ec_rev {
 	CHIP_REV_YU_EC_A1    = 0,  /* Chip Rev. for Yukon-EC A1/A0 */
@@ -459,6 +571,8 @@ enum yukon_ex_rev {
 };
 enum yukon_supr_rev {
 	CHIP_REV_YU_SU_A0    = 0,
+	CHIP_REV_YU_SU_B0    = 1,
+	CHIP_REV_YU_SU_B1    = 3,
 };
 
 
@@ -513,6 +627,12 @@ enum {
 	TIM_T_STEP	= 1<<0,	/* Test step */
 };
 
+/*	Y2_PEX_PHY_ADDR/DATA		PEX PHY address and data reg  (Yukon-2 only) */
+enum {
+	PEX_RD_ACCESS	= 1<<31, /* Access Mode Read = 1, Write = 0 */
+	PEX_DB_ACCESS	= 1<<30, /* Access to debug register */
+};
+
 /*	B3_RAM_ADDR		32 bit	RAM Address, to read or write */
 					/* Bit 31..19:	reserved */
 #define RAM_ADR_RAN	0x0007ffffL	/* Bit 18.. 0:	RAM Address Range */
@@ -688,10 +808,11 @@ enum {
 	RX_GMF_AF_THR	= 0x0c44,/* 32 bit	Rx GMAC FIFO Almost Full Thresh. */
 	RX_GMF_CTRL_T	= 0x0c48,/* 32 bit	Rx GMAC FIFO Control/Test */
 	RX_GMF_FL_MSK	= 0x0c4c,/* 32 bit	Rx GMAC FIFO Flush Mask */
-	RX_GMF_FL_THR	= 0x0c50,/* 32 bit	Rx GMAC FIFO Flush Threshold */
+	RX_GMF_FL_THR	= 0x0c50,/* 16 bit	Rx GMAC FIFO Flush Threshold */
+	RX_GMF_FL_CTRL	= 0x0c52,/* 16 bit	Rx GMAC FIFO Flush Control */
 	RX_GMF_TR_THR	= 0x0c54,/* 32 bit	Rx Truncation Threshold (Yukon-2) */
-	RX_GMF_UP_THR	= 0x0c58,/*  8 bit	Rx Upper Pause Thr (Yukon-EC_U) */
-	RX_GMF_LP_THR	= 0x0c5a,/*  8 bit	Rx Lower Pause Thr (Yukon-EC_U) */
+	RX_GMF_UP_THR	= 0x0c58,/* 16 bit	Rx Upper Pause Thr (Yukon-EC_U) */
+	RX_GMF_LP_THR	= 0x0c5a,/* 16 bit	Rx Lower Pause Thr (Yukon-EC_U) */
 	RX_GMF_VLAN	= 0x0c5c,/* 32 bit	Rx VLAN Type Register (Yukon-2) */
 	RX_GMF_WP	= 0x0c60,/* 32 bit	Rx GMAC FIFO Write Pointer */
 
@@ -754,6 +875,42 @@ enum {
 	BMU_TX_CLR_IRQ_TCP	= 1<<11, /* Clear IRQ on TCP segment length mismatch */
 };
 
+/*	TBMU_TEST			0x06B8	Transmit BMU Test Register */
+enum {
+	TBMU_TEST_BMU_TX_CHK_AUTO_OFF		= 1<<31, /* BMU Tx Checksum Auto Calculation Disable */
+	TBMU_TEST_BMU_TX_CHK_AUTO_ON		= 1<<30, /* BMU Tx Checksum Auto Calculation Enable */
+	TBMU_TEST_HOME_ADD_PAD_FIX1_EN		= 1<<29, /* Home Address Paddiing FIX1 Enable */
+	TBMU_TEST_HOME_ADD_PAD_FIX1_DIS		= 1<<28, /* Home Address Paddiing FIX1 Disable */
+	TBMU_TEST_ROUTING_ADD_FIX_EN		= 1<<27, /* Routing Address Fix Enable */
+	TBMU_TEST_ROUTING_ADD_FIX_DIS		= 1<<26, /* Routing Address Fix Disable */
+	TBMU_TEST_HOME_ADD_FIX_EN		= 1<<25, /* Home address checksum fix enable */
+	TBMU_TEST_HOME_ADD_FIX_DIS		= 1<<24, /* Home address checksum fix disable */
+
+	TBMU_TEST_TEST_RSPTR_ON			= 1<<22, /* Testmode Shadow Read Ptr On */
+	TBMU_TEST_TEST_RSPTR_OFF		= 1<<21, /* Testmode Shadow Read Ptr Off */
+	TBMU_TEST_TESTSTEP_RSPTR		= 1<<20, /* Teststep Shadow Read Ptr */
+
+	TBMU_TEST_TEST_RPTR_ON			= 1<<18, /* Testmode Read Ptr On */
+	TBMU_TEST_TEST_RPTR_OFF			= 1<<17, /* Testmode Read Ptr Off */
+	TBMU_TEST_TESTSTEP_RPTR			= 1<<16, /* Teststep Read Ptr */
+
+	TBMU_TEST_TEST_WSPTR_ON			= 1<<14, /* Testmode Shadow Write Ptr On */
+	TBMU_TEST_TEST_WSPTR_OFF		= 1<<13, /* Testmode Shadow Write Ptr Off */
+	TBMU_TEST_TESTSTEP_WSPTR		= 1<<12, /* Teststep Shadow Write Ptr */
+
+	TBMU_TEST_TEST_WPTR_ON			= 1<<10, /* Testmode Write Ptr On */
+	TBMU_TEST_TEST_WPTR_OFF			= 1<<9, /* Testmode Write Ptr Off */
+	TBMU_TEST_TESTSTEP_WPTR			= 1<<8,			/* Teststep Write Ptr */
+
+	TBMU_TEST_TEST_REQ_NB_ON		= 1<<6, /* Testmode Req Nbytes/Addr On */
+	TBMU_TEST_TEST_REQ_NB_OFF		= 1<<5, /* Testmode Req Nbytes/Addr Off */
+	TBMU_TEST_TESTSTEP_REQ_NB		= 1<<4, /* Teststep Req Nbytes/Addr */
+
+	TBMU_TEST_TEST_DONE_IDX_ON		= 1<<2, /* Testmode Done Index On */
+	TBMU_TEST_TEST_DONE_IDX_OFF		= 1<<1, /* Testmode Done Index Off */
+	TBMU_TEST_TESTSTEP_DONE_IDX		= 1<<0,	/* Teststep Done Index */
+};
+
 /* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
 /* PREF_UNIT_CTRL	32 bit	Prefetch Control register */
 enum {
@@ -1674,6 +1831,12 @@ enum {
 
 /*	RX_GMF_CTRL_T	32 bit	Rx GMAC FIFO Control/Test */
 enum {
+	RX_GCLKMAC_ENA	= 1<<31,	/* RX MAC Clock Gating Enable */
+	RX_GCLKMAC_OFF	= 1<<30,
+
+	RX_STFW_DIS	= 1<<29,	/* RX Store and Forward Enable */
+	RX_STFW_ENA	= 1<<28,
+
 	RX_TRUNC_ON	= 1<<27,  	/* enable  packet truncation */
 	RX_TRUNC_OFF	= 1<<26, 	/* disable packet truncation */
 	RX_VLAN_STRIP_ON = 1<<25,	/* enable  VLAN stripping */
@@ -1711,6 +1874,20 @@ enum {
 	GMF_RX_CTRL_DEF	= GMF_OPER_ON | GMF_RX_F_FL_ON,
 };
 
+/*	RX_GMF_FL_CTRL	16 bit	Rx GMAC FIFO Flush Control (Yukon-Supreme) */
+enum {
+	RX_IPV6_SA_MOB_ENA	= 1<<9,	/* IPv6 SA Mobility Support Enable */
+	RX_IPV6_SA_MOB_DIS	= 1<<8,	/* IPv6 SA Mobility Support Disable */
+	RX_IPV6_DA_MOB_ENA	= 1<<7,	/* IPv6 DA Mobility Support Enable */
+	RX_IPV6_DA_MOB_DIS	= 1<<6,	/* IPv6 DA Mobility Support Disable */
+	RX_PTR_SYNCDLY_ENA	= 1<<5,	/* Pointers Delay Synch Enable */
+	RX_PTR_SYNCDLY_DIS	= 1<<4,	/* Pointers Delay Synch Disable */
+	RX_ASF_NEWFLAG_ENA	= 1<<3,	/* RX ASF Flag New Logic Enable */
+	RX_ASF_NEWFLAG_DIS	= 1<<2,	/* RX ASF Flag New Logic Disable */
+	RX_FLSH_MISSPKT_ENA	= 1<<1,	/* RX Flush Miss-Packet Enable */
+	RX_FLSH_MISSPKT_DIS	= 1<<0,	/* RX Flush Miss-Packet Disable */
+};
+
 /*	TX_GMF_EA		32 bit	Tx GMAC FIFO End Address */
 enum {
 	TX_DYN_WM_ENA	= 3,	/* Yukon-FE+ specific */
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index fe3cebb984de..ba5bbc503446 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -80,6 +80,7 @@
 #include <linux/rtnetlink.h>
 #include <linux/if_arp.h>
 #include <linux/if_slip.h>
+#include <linux/compat.h>
 #include <linux/delay.h>
 #include <linux/init.h>
 #include "slip.h"
@@ -955,8 +956,8 @@ static void slip_unesc(struct slip *sl, unsigned char s)
 			clear_bit(SLF_KEEPTEST, &sl->flags);
 #endif
 
-		if (!test_and_clear_bit(SLF_ERROR, &sl->flags)
-							&& (sl->rcount > 2))
+		if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
+		    (sl->rcount > 2))
 			sl_bump(sl);
 		clear_bit(SLF_ESCAPE, &sl->flags);
 		sl->rcount = 0;
@@ -1038,8 +1039,8 @@ static void slip_unesc6(struct slip *sl, unsigned char s)
 			clear_bit(SLF_KEEPTEST, &sl->flags);
 #endif
 
-		if (!test_and_clear_bit(SLF_ERROR, &sl->flags)
-							&& (sl->rcount > 2))
+		if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
+		    (sl->rcount > 2))
 			sl_bump(sl);
 		sl->rcount = 0;
 		sl->xbits = 0;
@@ -1169,6 +1170,27 @@ static int slip_ioctl(struct tty_struct *tty, struct file *file,
 	}
 }
 
+#ifdef CONFIG_COMPAT
+static long slip_compat_ioctl(struct tty_struct *tty, struct file *file,
+					unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case SIOCGIFNAME:
+	case SIOCGIFENCAP:
+	case SIOCSIFENCAP:
+	case SIOCSIFHWADDR:
+	case SIOCSKEEPALIVE:
+	case SIOCGKEEPALIVE:
+	case SIOCSOUTFILL:
+	case SIOCGOUTFILL:
+		return slip_ioctl(tty, file, cmd,
+				  (unsigned long)compat_ptr(arg));
+	}
+
+	return -ENOIOCTLCMD;
+}
+#endif
+
 /* VSV changes start here */
 #ifdef CONFIG_SLIP_SMART
 /* function do_ioctl called from net/core/dev.c
@@ -1261,6 +1283,9 @@ static struct tty_ldisc_ops sl_ldisc = {
 	.close	 	= slip_close,
 	.hangup	 	= slip_hangup,
 	.ioctl		= slip_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= slip_compat_ioctl,
+#endif
 	.receive_buf	= slip_receive_buf,
 	.write_wakeup	= slip_write_wakeup,
 };
diff --git a/drivers/net/smc-mca.c b/drivers/net/smc-mca.c
index c791ef76c1d6..a93f122e9a96 100644
--- a/drivers/net/smc-mca.c
+++ b/drivers/net/smc-mca.c
@@ -268,9 +268,9 @@ static int __init ultramca_probe(struct device *gen_dev)
 		}
 	}
 
-	if(!tirq || !tbase
-	   || (irq && irq != tirq)
-	   || (base_addr && tbase != base_addr))
+	if(!tirq || !tbase ||
+	   (irq && irq != tirq) ||
+	   (base_addr && tbase != base_addr))
 		/* FIXME: we're trying to force the ordering of the
 		 * devices here, there should be a way of getting this
 		 * to happen */
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 2a6b6de95339..44ebbaa7457b 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -1984,7 +1984,7 @@ static int __devinit smc911x_probe(struct net_device *dev)
 #endif
 
 	/* Grab the IRQ */
-	retval = request_irq(dev->irq, &smc911x_interrupt,
+	retval = request_irq(dev->irq, smc911x_interrupt,
 			     irq_flags, dev->name, dev);
 	if (retval)
 		goto err_out;
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c
index 934a12012829..8371b82323ac 100644
--- a/drivers/net/smc9194.c
+++ b/drivers/net/smc9194.c
@@ -1050,7 +1050,7 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
 	memset(netdev_priv(dev), 0, sizeof(struct smc_local));
 
 	/* Grab the IRQ */
-      	retval = request_irq(dev->irq, &smc_interrupt, 0, DRV_NAME, dev);
+      	retval = request_irq(dev->irq, smc_interrupt, 0, DRV_NAME, dev);
       	if (retval) {
 		printk("%s: unable to get IRQ %d (irqval=%d).\n", DRV_NAME,
 			dev->irq, retval);
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index f12206bdbb75..ae4983a5127d 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -2031,7 +2031,7 @@ static int __devinit smc_probe(struct net_device *dev, void __iomem *ioaddr,
 	}
 
 	/* Grab the IRQ */
-	retval = request_irq(dev->irq, &smc_interrupt, irq_flags, dev->name, dev);
+	retval = request_irq(dev->irq, smc_interrupt, irq_flags, dev->name, dev);
       	if (retval)
       		goto err_out;
 
@@ -2365,9 +2365,10 @@ static int __devexit smc_drv_remove(struct platform_device *pdev)
 	return 0;
 }
 
-static int smc_drv_suspend(struct platform_device *dev, pm_message_t state)
+static int smc_drv_suspend(struct device *dev)
 {
-	struct net_device *ndev = platform_get_drvdata(dev);
+	struct platform_device *pdev = to_platform_device(dev);
+	struct net_device *ndev = platform_get_drvdata(pdev);
 
 	if (ndev) {
 		if (netif_running(ndev)) {
@@ -2379,9 +2380,10 @@ static int smc_drv_suspend(struct platform_device *dev, pm_message_t state)
 	return 0;
 }
 
-static int smc_drv_resume(struct platform_device *dev)
+static int smc_drv_resume(struct device *dev)
 {
-	struct net_device *ndev = platform_get_drvdata(dev);
+	struct platform_device *pdev = to_platform_device(dev);
+	struct net_device *ndev = platform_get_drvdata(pdev);
 
 	if (ndev) {
 		struct smc_local *lp = netdev_priv(ndev);
@@ -2397,14 +2399,18 @@ static int smc_drv_resume(struct platform_device *dev)
 	return 0;
 }
 
+static struct dev_pm_ops smc_drv_pm_ops = {
+	.suspend	= smc_drv_suspend,
+	.resume		= smc_drv_resume,
+};
+
 static struct platform_driver smc_driver = {
 	.probe		= smc_drv_probe,
 	.remove		= __devexit_p(smc_drv_remove),
-	.suspend	= smc_drv_suspend,
-	.resume		= smc_drv_resume,
 	.driver		= {
 		.name	= CARDNAME,
 		.owner	= THIS_MODULE,
+		.pm	= &smc_drv_pm_ops,
 	},
 };
 
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 3911be7c0cba..7815bfc300f5 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -158,8 +158,8 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
 #define SMC_outsb(a, r, p, l)	writesb((a) + (r), p, (l))
 #define SMC_IRQ_FLAGS		(-1)	/* from resource */
 
-#elif	defined(CONFIG_MACH_LOGICPD_PXA270) \
-	|| defined(CONFIG_MACH_NOMADIK_8815NHK)
+#elif	defined(CONFIG_MACH_LOGICPD_PXA270) ||	\
+	defined(CONFIG_MACH_NOMADIK_8815NHK)
 
 #define SMC_CAN_USE_8BIT	0
 #define SMC_CAN_USE_16BIT	1
@@ -258,9 +258,9 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
 #define RPC_LSA_DEFAULT		RPC_LED_TX_RX
 #define RPC_LSB_DEFAULT		RPC_LED_100_10
 
-#elif   defined(CONFIG_MACH_LPD79520) \
-     || defined(CONFIG_MACH_LPD7A400) \
-     || defined(CONFIG_MACH_LPD7A404)
+#elif   defined(CONFIG_MACH_LPD79520) ||	\
+	defined(CONFIG_MACH_LPD7A400) ||	\
+	defined(CONFIG_MACH_LPD7A404)
 
 /* The LPD7X_IOBARRIER is necessary to overcome a mismatch between the
  * way that the CPU handles chip selects and the way that the SMC chip
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index f9cdcbcb77d4..4d0d5c56bed8 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -748,8 +748,8 @@ static void smsc911x_phy_adjust_link(struct net_device *dev)
 			 * usage is 10/100 indicator */
 			pdata->gpio_setting = smsc911x_reg_read(pdata,
 				GPIO_CFG);
-			if ((pdata->gpio_setting & GPIO_CFG_LED1_EN_)
-			    && (!pdata->using_extphy)) {
+			if ((pdata->gpio_setting & GPIO_CFG_LED1_EN_) &&
+			    (!pdata->using_extphy)) {
 				/* Force 10/100 LED off, after saving
 				 * orginal GPIO configuration */
 				pdata->gpio_orig_setting = pdata->gpio_setting;
@@ -2071,6 +2071,9 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
 	if (is_valid_ether_addr(dev->dev_addr)) {
 		smsc911x_set_hw_mac_address(pdata, dev->dev_addr);
 		SMSC_TRACE(PROBE, "MAC Address is specified by configuration");
+	} else if (is_valid_ether_addr(pdata->config.mac)) {
+		memcpy(dev->dev_addr, pdata->config.mac, 6);
+		SMSC_TRACE(PROBE, "MAC Address specified by platform data");
 	} else {
 		/* Try reading mac address from device. if EEPROM is present
 		 * it will already have been set */
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index 0f7909276237..12f0f5d74e3c 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -1175,7 +1175,7 @@ static int smsc9420_mii_probe(struct net_device *dev)
 		phydev->phy_id);
 
 	phydev = phy_connect(dev, dev_name(&phydev->dev),
-		&smsc9420_phy_adjust_link, 0, PHY_INTERFACE_MODE_MII);
+		smsc9420_phy_adjust_link, 0, PHY_INTERFACE_MODE_MII);
 
 	if (IS_ERR(phydev)) {
 		pr_err("%s: Could not attach to PHY\n", dev->name);
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 90e663f4515c..782910cf220f 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -57,6 +57,7 @@ MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \
 MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(VERSION);
+MODULE_FIRMWARE(SPIDER_NET_FIRMWARE_NAME);
 
 static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT;
 static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT;
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index a36e2b51e88c..95db60adde41 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -928,7 +928,7 @@ static int netdev_open(struct net_device *dev)
 
 	/* Do we ever need to reset the chip??? */
 
-	retval = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev);
+	retval = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
 	if (retval)
 		return retval;
 
@@ -1482,8 +1482,8 @@ static int __netdev_rx(struct net_device *dev, int *quota)
 			printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len, *quota);
 		/* Check if the packet is long enough to accept without copying
 		   to a minimally-sized skbuff. */
-		if (pkt_len < rx_copybreak
-		    && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+		if (pkt_len < rx_copybreak &&
+		    (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
 			skb_reserve(skb, 2);	/* 16 byte align the IP header */
 			pci_dma_sync_single_for_cpu(np->pci_dev,
 						    np->rx_info[entry].mapping,
@@ -1793,8 +1793,8 @@ static void set_rx_mode(struct net_device *dev)
 
 	if (dev->flags & IFF_PROMISC) {	/* Set promiscuous. */
 		rx_mode |= AcceptAll;
-	} else if ((dev->mc_count > multicast_filter_limit)
-		   || (dev->flags & IFF_ALLMULTI)) {
+	} else if ((dev->mc_count > multicast_filter_limit) ||
+		   (dev->flags & IFF_ALLMULTI)) {
 		/* Too many to match, or accept all multicasts. */
 		rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
 	} else if (dev->mc_count <= 14) {
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 9542995ba667..508fba8fa07f 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -922,8 +922,7 @@ static void stmmac_dma_interrupt(struct net_device *dev)
 		DBG(intr, INFO, "CSR5[15] DMA ABNORMAL IRQ: ");
 		if (unlikely(intr_status & DMA_STATUS_UNF)) {
 			DBG(intr, INFO, "transmit underflow\n");
-			if (unlikely(tc != SF_DMA_MODE)
-			    && (tc <= 256)) {
+			if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
 				/* Try to bump up the threshold */
 				tc += 64;
 				priv->mac_type->ops->dma_mode(ioaddr, tc,
@@ -1024,7 +1023,7 @@ static int stmmac_open(struct net_device *dev)
 	}
 
 	/* Request the IRQ lines */
-	ret = request_irq(dev->irq, &stmmac_interrupt,
+	ret = request_irq(dev->irq, stmmac_interrupt,
 			  IRQF_SHARED, dev->name, dev);
 	if (unlikely(ret < 0)) {
 		pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index 2f1eaaf7a727..b447a8719427 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -191,7 +191,7 @@ static int sun3_82586_open(struct net_device *dev)
 	startrecv586(dev);
 	sun3_enaint();
 
-	ret = request_irq(dev->irq, &sun3_82586_interrupt,0,dev->name,dev);
+	ret = request_irq(dev->irq, sun3_82586_interrupt,0,dev->name,dev);
 	if (ret)
 	{
 		sun3_reset586();
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index 536cf7e06bfd..25e81ebd9cd8 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -919,7 +919,7 @@ static int bigmac_open(struct net_device *dev)
 	struct bigmac *bp = netdev_priv(dev);
 	int ret;
 
-	ret = request_irq(dev->irq, &bigmac_interrupt, IRQF_SHARED, dev->name, bp);
+	ret = request_irq(dev->irq, bigmac_interrupt, IRQF_SHARED, dev->name, bp);
 	if (ret) {
 		printk(KERN_ERR "BIGMAC: Can't order irq %d to go.\n", dev->irq);
 		return ret;
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index e13685a570f4..d58e1891ca60 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -603,8 +603,8 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
 			    strcmp (media[card_idx], "4") == 0) {
 				np->speed = 100;
 				np->mii_if.full_duplex = 1;
-			} else if (strcmp (media[card_idx], "100mbps_hd") == 0
-				   || strcmp (media[card_idx], "3") == 0) {
+			} else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
+				   strcmp (media[card_idx], "3") == 0) {
 				np->speed = 100;
 				np->mii_if.full_duplex = 0;
 			} else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
@@ -819,7 +819,7 @@ static int netdev_open(struct net_device *dev)
 
 	/* Do we need to reset the chip??? */
 
-	i = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev);
+	i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
 	if (i)
 		return i;
 
@@ -1079,8 +1079,8 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
 	tasklet_schedule(&np->tx_tasklet);
 
 	/* On some architectures: explicitly flush cache lines here. */
-	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1
-			&& !netif_queue_stopped(dev)) {
+	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
+	    !netif_queue_stopped(dev)) {
 		/* do nothing */
 	} else {
 		netif_stop_queue (dev);
@@ -1336,8 +1336,8 @@ static void rx_poll(unsigned long data)
 #endif
 			/* Check if the packet is long enough to accept without copying
 			   to a minimally-sized skbuff. */
-			if (pkt_len < rx_copybreak
-				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+			if (pkt_len < rx_copybreak &&
+			    (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
 				pci_dma_sync_single_for_cpu(np->pci_dev,
 							    le32_to_cpu(desc->frag[0].addr),
@@ -1517,8 +1517,8 @@ static void set_rx_mode(struct net_device *dev)
 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
 		memset(mc_filter, 0xff, sizeof(mc_filter));
 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
-	} else if ((dev->mc_count > multicast_filter_limit)
-			   ||  (dev->flags & IFF_ALLMULTI)) {
+	} else if ((dev->mc_count > multicast_filter_limit) ||
+		   (dev->flags & IFF_ALLMULTI)) {
 		/* Too many to match, or accept all multicasts. */
 		memset(mc_filter, 0xff, sizeof(mc_filter));
 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 61640b99b705..b571a1babab9 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -1034,10 +1034,8 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
 			(csum_stuff_off << 21));
 	}
 
-	local_irq_save(flags);
-	if (!spin_trylock(&gp->tx_lock)) {
+	if (!spin_trylock_irqsave(&gp->tx_lock, flags)) {
 		/* Tell upper layer to requeue */
-		local_irq_restore(flags);
 		return NETDEV_TX_LOCKED;
 	}
 	/* We raced with gem_do_stop() */
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h
index f7a02917ce5e..19905460def6 100644
--- a/drivers/net/sungem.h
+++ b/drivers/net/sungem.h
@@ -1031,8 +1031,8 @@ struct gem {
 #endif
 };
 
-#define found_mii_phy(gp) ((gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) \
-				&& gp->phy_mii.def && gp->phy_mii.def->ops)
+#define found_mii_phy(gp) ((gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) && \
+			   gp->phy_mii.def && gp->phy_mii.def->ops)
 
 #define ALIGNED_RX_SKB_ADDR(addr) \
         ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 37d721bbdb35..6762f1c6ec8a 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -1226,10 +1226,16 @@ static void happy_meal_clean_rings(struct happy_meal *hp)
 			for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
 				txd = &hp->happy_block->happy_meal_txd[i];
 				dma_addr = hme_read_desc32(hp, &txd->tx_addr);
-				dma_unmap_single(hp->dma_dev, dma_addr,
-						 (hme_read_desc32(hp, &txd->tx_flags)
-						  & TXFLAG_SIZE),
-						 DMA_TO_DEVICE);
+				if (!frag)
+					dma_unmap_single(hp->dma_dev, dma_addr,
+							 (hme_read_desc32(hp, &txd->tx_flags)
+							  & TXFLAG_SIZE),
+							 DMA_TO_DEVICE);
+				else
+					dma_unmap_page(hp->dma_dev, dma_addr,
+							 (hme_read_desc32(hp, &txd->tx_flags)
+							  & TXFLAG_SIZE),
+							 DMA_TO_DEVICE);
 
 				if (frag != skb_shinfo(skb)->nr_frags)
 					i++;
@@ -1953,7 +1959,10 @@ static void happy_meal_tx(struct happy_meal *hp)
 			dma_len = hme_read_desc32(hp, &this->tx_flags);
 
 			dma_len &= TXFLAG_SIZE;
-			dma_unmap_single(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
+			if (!frag)
+				dma_unmap_single(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
+			else
+				dma_unmap_page(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
 
 			elem = NEXT_TX(elem);
 			this = &txbase[elem];
@@ -2184,7 +2193,7 @@ static int happy_meal_open(struct net_device *dev)
 	 * into a single source which we register handling at probe time.
 	 */
 	if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) {
-		if (request_irq(dev->irq, &happy_meal_interrupt,
+		if (request_irq(dev->irq, happy_meal_interrupt,
 				IRQF_SHARED, dev->name, (void *)dev)) {
 			HMD(("EAGAIN\n"));
 			printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n",
@@ -3047,9 +3056,9 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
 		int len;
 
 		if (qfe_slot != -1 &&
-		    (addr = of_get_property(dp,
-					    "local-mac-address", &len)) != NULL
-		    && len == 6) {
+		    (addr = of_get_property(dp, "local-mac-address", &len))
+			!= NULL &&
+		    len == 6) {
 			memcpy(dev->dev_addr, addr, 6);
 		} else {
 			memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 9d6fd4760eab..64e7d08c878f 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -923,7 +923,7 @@ static int lance_open(struct net_device *dev)
 
 	STOP_LANCE(lp);
 
-	if (request_irq(dev->irq, &lance_interrupt, IRQF_SHARED,
+	if (request_irq(dev->irq, lance_interrupt, IRQF_SHARED,
 			lancestr, (void *) dev)) {
 		printk(KERN_ERR "Lance: Can't get irq %d\n", dev->irq);
 		return -EAGAIN;
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index dcefb608a9f4..45c383f285ee 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -807,7 +807,7 @@ static struct sunqec * __devinit get_qec(struct of_device *child)
 
 			qec_init_once(qecp, op);
 
-			if (request_irq(op->irqs[0], &qec_interrupt,
+			if (request_irq(op->irqs[0], qec_interrupt,
 					IRQF_SHARED, "qec", (void *) qecp)) {
 				printk(KERN_ERR "qec: Can't register irq.\n");
 				goto fail;
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index d1298e5b72c5..75a669d48e5e 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -22,11 +22,7 @@
  * All Rights Reserved.
  */
 
-#ifdef TC35815_NAPI
-#define DRV_VERSION	"1.38-NAPI"
-#else
-#define DRV_VERSION	"1.38"
-#endif
+#define DRV_VERSION	"1.39"
 static const char *version = "tc35815.c:v" DRV_VERSION "\n";
 #define MODNAME			"tc35815"
 
@@ -54,13 +50,6 @@ static const char *version = "tc35815.c:v" DRV_VERSION "\n";
 #include <asm/io.h>
 #include <asm/byteorder.h>
 
-/* First, a few definitions that the brave might change. */
-
-#define GATHER_TXINT	/* On-Demand Tx Interrupt */
-#define WORKAROUND_LOSTCAR
-#define WORKAROUND_100HALF_PROMISC
-/* #define TC35815_USE_PACKEDBUFFER */
-
 enum tc35815_chiptype {
 	TC35815CF = 0,
 	TC35815_NWU,
@@ -330,17 +319,10 @@ struct BDesc {
 
 
 /* Some useful constants. */
-#undef NO_CHECK_CARRIER	/* Does not check No-Carrier with TP */
 
-#ifdef NO_CHECK_CARRIER
-#define TX_CTL_CMD	(Tx_EnComp | Tx_EnTxPar | Tx_EnLateColl | \
-	Tx_EnExColl | Tx_EnExDefer | Tx_EnUnder | \
-	Tx_En)	/* maybe  0x7b01 */
-#else
-#define TX_CTL_CMD	(Tx_EnComp | Tx_EnTxPar | Tx_EnLateColl | \
+#define TX_CTL_CMD	(Tx_EnTxPar | Tx_EnLateColl | \
 	Tx_EnExColl | Tx_EnLCarr | Tx_EnExDefer | Tx_EnUnder | \
 	Tx_En)	/* maybe  0x7b01 */
-#endif
 /* Do not use Rx_StripCRC -- it causes trouble on BLEx/FDAEx condition */
 #define RX_CTL_CMD	(Rx_EnGood | Rx_EnRxPar | Rx_EnLongErr | Rx_EnOver \
 	| Rx_EnCRCErr | Rx_EnAlign | Rx_RxEn) /* maybe 0x6f01 */
@@ -361,13 +343,6 @@ struct BDesc {
 #define TX_THRESHOLD_KEEP_LIMIT 10
 
 /* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */
-#ifdef TC35815_USE_PACKEDBUFFER
-#define FD_PAGE_NUM 2
-#define RX_BUF_NUM	8	/* >= 2 */
-#define RX_FD_NUM	250	/* >= 32 */
-#define TX_FD_NUM	128
-#define RX_BUF_SIZE	PAGE_SIZE
-#else /* TC35815_USE_PACKEDBUFFER */
 #define FD_PAGE_NUM 4
 #define RX_BUF_NUM	128	/* < 256 */
 #define RX_FD_NUM	256	/* >= 32 */
@@ -381,7 +356,6 @@ struct BDesc {
 #define RX_BUF_SIZE	\
 	L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN + NET_IP_ALIGN)
 #endif
-#endif /* TC35815_USE_PACKEDBUFFER */
 #define RX_FD_RESERVE	(2 / 2)	/* max 2 BD per RxFD */
 #define NAPI_WEIGHT	16
 
@@ -439,11 +413,7 @@ struct tc35815_local {
 	/*
 	 * Transmitting: Batch Mode.
 	 *	1 BD in 1 TxFD.
-	 * Receiving: Packing Mode. (TC35815_USE_PACKEDBUFFER)
-	 *	1 circular FD for Free Buffer List.
-	 *	RX_BUF_NUM BD in Free Buffer FD.
-	 *	One Free Buffer BD has PAGE_SIZE data buffer.
-	 * Or Non-Packing Mode.
+	 * Receiving: Non-Packing Mode.
 	 *	1 circular FD for Free Buffer List.
 	 *	RX_BUF_NUM BD in Free Buffer FD.
 	 *	One Free Buffer BD has ETH_FRAME_LEN data buffer.
@@ -457,21 +427,11 @@ struct tc35815_local {
 	struct RxFD *rfd_limit;
 	struct RxFD *rfd_cur;
 	struct FrFD *fbl_ptr;
-#ifdef TC35815_USE_PACKEDBUFFER
-	unsigned char fbl_curid;
-	void *data_buf[RX_BUF_NUM];		/* packing */
-	dma_addr_t data_buf_dma[RX_BUF_NUM];
-	struct {
-		struct sk_buff *skb;
-		dma_addr_t skb_dma;
-	} tx_skbs[TX_FD_NUM];
-#else
 	unsigned int fbl_count;
 	struct {
 		struct sk_buff *skb;
 		dma_addr_t skb_dma;
 	} tx_skbs[TX_FD_NUM], rx_skbs[RX_BUF_NUM];
-#endif
 	u32 msg_enable;
 	enum tc35815_chiptype chiptype;
 };
@@ -486,51 +446,6 @@ static inline void *fd_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus)
 	return (void *)((u8 *)lp->fd_buf + (bus - lp->fd_buf_dma));
 }
 #endif
-#ifdef TC35815_USE_PACKEDBUFFER
-static inline void *rxbuf_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus)
-{
-	int i;
-	for (i = 0; i < RX_BUF_NUM; i++) {
-		if (bus >= lp->data_buf_dma[i] &&
-		    bus < lp->data_buf_dma[i] + PAGE_SIZE)
-			return (void *)((u8 *)lp->data_buf[i] +
-					(bus - lp->data_buf_dma[i]));
-	}
-	return NULL;
-}
-
-#define TC35815_DMA_SYNC_ONDEMAND
-static void *alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle)
-{
-#ifdef TC35815_DMA_SYNC_ONDEMAND
-	void *buf;
-	/* pci_map + pci_dma_sync will be more effective than
-	 * pci_alloc_consistent on some archs. */
-	buf = (void *)__get_free_page(GFP_ATOMIC);
-	if (!buf)
-		return NULL;
-	*dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE,
-				     PCI_DMA_FROMDEVICE);
-	if (pci_dma_mapping_error(hwdev, *dma_handle)) {
-		free_page((unsigned long)buf);
-		return NULL;
-	}
-	return buf;
-#else
-	return pci_alloc_consistent(hwdev, PAGE_SIZE, dma_handle);
-#endif
-}
-
-static void free_rxbuf_page(struct pci_dev *hwdev, void *buf, dma_addr_t dma_handle)
-{
-#ifdef TC35815_DMA_SYNC_ONDEMAND
-	pci_unmap_single(hwdev, dma_handle, PAGE_SIZE, PCI_DMA_FROMDEVICE);
-	free_page((unsigned long)buf);
-#else
-	pci_free_consistent(hwdev, PAGE_SIZE, buf, dma_handle);
-#endif
-}
-#else /* TC35815_USE_PACKEDBUFFER */
 static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev,
 				       struct pci_dev *hwdev,
 				       dma_addr_t *dma_handle)
@@ -555,19 +470,14 @@ static void free_rxbuf_skb(struct pci_dev *hwdev, struct sk_buff *skb, dma_addr_
 			 PCI_DMA_FROMDEVICE);
 	dev_kfree_skb_any(skb);
 }
-#endif /* TC35815_USE_PACKEDBUFFER */
 
 /* Index to functions, as function prototypes. */
 
 static int	tc35815_open(struct net_device *dev);
 static int	tc35815_send_packet(struct sk_buff *skb, struct net_device *dev);
 static irqreturn_t	tc35815_interrupt(int irq, void *dev_id);
-#ifdef TC35815_NAPI
 static int	tc35815_rx(struct net_device *dev, int limit);
 static int	tc35815_poll(struct napi_struct *napi, int budget);
-#else
-static void	tc35815_rx(struct net_device *dev);
-#endif
 static void	tc35815_txdone(struct net_device *dev);
 static int	tc35815_close(struct net_device *dev);
 static struct	net_device_stats *tc35815_get_stats(struct net_device *dev);
@@ -654,8 +564,6 @@ static void tc_handle_link_change(struct net_device *dev)
 		 * TX4939 PCFG.SPEEDn bit will be changed on
 		 * NETDEV_CHANGE event.
 		 */
-
-#if !defined(NO_CHECK_CARRIER) && defined(WORKAROUND_LOSTCAR)
 		/*
 		 * WORKAROUND: enable LostCrS only if half duplex
 		 * operation.
@@ -665,7 +573,6 @@ static void tc_handle_link_change(struct net_device *dev)
 		    lp->chiptype != TC35815_TX4939)
 			tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr,
 				  &tr->Tx_Ctl);
-#endif
 
 		lp->speed = phydev->speed;
 		lp->duplex = phydev->duplex;
@@ -674,11 +581,9 @@ static void tc_handle_link_change(struct net_device *dev)
 
 	if (phydev->link != lp->link) {
 		if (phydev->link) {
-#ifdef WORKAROUND_100HALF_PROMISC
 			/* delayed promiscuous enabling */
 			if (dev->flags & IFF_PROMISC)
 				tc35815_set_multicast_list(dev);
-#endif
 		} else {
 			lp->speed = 0;
 			lp->duplex = -1;
@@ -923,9 +828,7 @@ static int __devinit tc35815_init_one(struct pci_dev *pdev,
 	dev->netdev_ops = &tc35815_netdev_ops;
 	dev->ethtool_ops = &tc35815_ethtool_ops;
 	dev->watchdog_timeo = TC35815_TX_TIMEOUT;
-#ifdef TC35815_NAPI
 	netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT);
-#endif
 
 	dev->irq = pdev->irq;
 	dev->base_addr = (unsigned long)ioaddr;
@@ -1007,25 +910,6 @@ tc35815_init_queues(struct net_device *dev)
 		if (!lp->fd_buf)
 			return -ENOMEM;
 		for (i = 0; i < RX_BUF_NUM; i++) {
-#ifdef TC35815_USE_PACKEDBUFFER
-			lp->data_buf[i] =
-				alloc_rxbuf_page(lp->pci_dev,
-						 &lp->data_buf_dma[i]);
-			if (!lp->data_buf[i]) {
-				while (--i >= 0) {
-					free_rxbuf_page(lp->pci_dev,
-							lp->data_buf[i],
-							lp->data_buf_dma[i]);
-					lp->data_buf[i] = NULL;
-				}
-				pci_free_consistent(lp->pci_dev,
-						    PAGE_SIZE * FD_PAGE_NUM,
-						    lp->fd_buf,
-						    lp->fd_buf_dma);
-				lp->fd_buf = NULL;
-				return -ENOMEM;
-			}
-#else
 			lp->rx_skbs[i].skb =
 				alloc_rxbuf_skb(dev, lp->pci_dev,
 						&lp->rx_skbs[i].skb_dma);
@@ -1043,15 +927,9 @@ tc35815_init_queues(struct net_device *dev)
 				lp->fd_buf = NULL;
 				return -ENOMEM;
 			}
-#endif
 		}
 		printk(KERN_DEBUG "%s: FD buf %p DataBuf",
 		       dev->name, lp->fd_buf);
-#ifdef TC35815_USE_PACKEDBUFFER
-		printk(" DataBuf");
-		for (i = 0; i < RX_BUF_NUM; i++)
-			printk(" %p", lp->data_buf[i]);
-#endif
 		printk("\n");
 	} else {
 		for (i = 0; i < FD_PAGE_NUM; i++)
@@ -1084,7 +962,6 @@ tc35815_init_queues(struct net_device *dev)
 	lp->fbl_ptr = (struct FrFD *)fd_addr;
 	lp->fbl_ptr->fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, lp->fbl_ptr));
 	lp->fbl_ptr->fd.FDCtl = cpu_to_le32(RX_BUF_NUM | FD_CownsFD);
-#ifndef TC35815_USE_PACKEDBUFFER
 	/*
 	 * move all allocated skbs to head of rx_skbs[] array.
 	 * fbl_count mighe not be RX_BUF_NUM if alloc_rxbuf_skb() in
@@ -1102,11 +979,7 @@ tc35815_init_queues(struct net_device *dev)
 			lp->fbl_count++;
 		}
 	}
-#endif
 	for (i = 0; i < RX_BUF_NUM; i++) {
-#ifdef TC35815_USE_PACKEDBUFFER
-		lp->fbl_ptr->bd[i].BuffData = cpu_to_le32(lp->data_buf_dma[i]);
-#else
 		if (i >= lp->fbl_count) {
 			lp->fbl_ptr->bd[i].BuffData = 0;
 			lp->fbl_ptr->bd[i].BDCtl = 0;
@@ -1114,15 +987,11 @@ tc35815_init_queues(struct net_device *dev)
 		}
 		lp->fbl_ptr->bd[i].BuffData =
 			cpu_to_le32(lp->rx_skbs[i].skb_dma);
-#endif
 		/* BDID is index of FrFD.bd[] */
 		lp->fbl_ptr->bd[i].BDCtl =
 			cpu_to_le32(BD_CownsBD | (i << BD_RxBDID_SHIFT) |
 				    RX_BUF_SIZE);
 	}
-#ifdef TC35815_USE_PACKEDBUFFER
-	lp->fbl_curid = 0;
-#endif
 
 	printk(KERN_DEBUG "%s: TxFD %p RxFD %p FrFD %p\n",
 	       dev->name, lp->tfd_base, lp->rfd_base, lp->fbl_ptr);
@@ -1196,19 +1065,11 @@ tc35815_free_queues(struct net_device *dev)
 	lp->fbl_ptr = NULL;
 
 	for (i = 0; i < RX_BUF_NUM; i++) {
-#ifdef TC35815_USE_PACKEDBUFFER
-		if (lp->data_buf[i]) {
-			free_rxbuf_page(lp->pci_dev,
-					lp->data_buf[i], lp->data_buf_dma[i]);
-			lp->data_buf[i] = NULL;
-		}
-#else
 		if (lp->rx_skbs[i].skb) {
 			free_rxbuf_skb(lp->pci_dev, lp->rx_skbs[i].skb,
 				       lp->rx_skbs[i].skb_dma);
 			lp->rx_skbs[i].skb = NULL;
 		}
-#endif
 	}
 	if (lp->fd_buf) {
 		pci_free_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM,
@@ -1254,7 +1115,7 @@ dump_rxfd(struct RxFD *fd)
 	return bd_count;
 }
 
-#if defined(DEBUG) || defined(TC35815_USE_PACKEDBUFFER)
+#ifdef DEBUG
 static void
 dump_frfd(struct FrFD *fd)
 {
@@ -1271,9 +1132,7 @@ dump_frfd(struct FrFD *fd)
 		       le32_to_cpu(fd->bd[i].BDCtl));
 	printk("\n");
 }
-#endif
 
-#ifdef DEBUG
 static void
 panic_queues(struct net_device *dev)
 {
@@ -1389,7 +1248,7 @@ tc35815_open(struct net_device *dev)
 	 * This is used if the interrupt line can turned off (shared).
 	 * See 3c503.c for an example of selecting the IRQ at config-time.
 	 */
-	if (request_irq(dev->irq, &tc35815_interrupt, IRQF_SHARED,
+	if (request_irq(dev->irq, tc35815_interrupt, IRQF_SHARED,
 			dev->name, dev))
 		return -EAGAIN;
 
@@ -1400,9 +1259,7 @@ tc35815_open(struct net_device *dev)
 		return -EAGAIN;
 	}
 
-#ifdef TC35815_NAPI
 	napi_enable(&lp->napi);
-#endif
 
 	/* Reset the hardware here. Don't forget to set the station address. */
 	spin_lock_irq(&lp->lock);
@@ -1478,9 +1335,7 @@ static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
 			(struct tc35815_regs __iomem *)dev->base_addr;
 		/* Start DMA Transmitter. */
 		txfd->fd.FDNext |= cpu_to_le32(FD_Next_EOL);
-#ifdef GATHER_TXINT
 		txfd->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx);
-#endif
 		if (netif_msg_tx_queued(lp)) {
 			printk("%s: starting TxFD.\n", dev->name);
 			dump_txfd(txfd);
@@ -1536,11 +1391,7 @@ static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status)
 	tc35815_schedule_restart(dev);
 }
 
-#ifdef TC35815_NAPI
 static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
-#else
-static int tc35815_do_interrupt(struct net_device *dev, u32 status)
-#endif
 {
 	struct tc35815_local *lp = netdev_priv(dev);
 	int ret = -1;
@@ -1579,12 +1430,7 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status)
 	/* normal notification */
 	if (status & Int_IntMacRx) {
 		/* Got a packet(s). */
-#ifdef TC35815_NAPI
 		ret = tc35815_rx(dev, limit);
-#else
-		tc35815_rx(dev);
-		ret = 0;
-#endif
 		lp->lstats.rx_ints++;
 	}
 	if (status & Int_IntMacTx) {
@@ -1592,7 +1438,8 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status)
 		lp->lstats.tx_ints++;
 		tc35815_txdone(dev);
 		netif_wake_queue(dev);
-		ret = 0;
+		if (ret < 0)
+			ret = 0;
 	}
 	return ret;
 }
@@ -1607,7 +1454,6 @@ static irqreturn_t tc35815_interrupt(int irq, void *dev_id)
 	struct tc35815_local *lp = netdev_priv(dev);
 	struct tc35815_regs __iomem *tr =
 		(struct tc35815_regs __iomem *)dev->base_addr;
-#ifdef TC35815_NAPI
 	u32 dmactl = tc_readl(&tr->DMA_Ctl);
 
 	if (!(dmactl & DMA_IntMask)) {
@@ -1624,22 +1470,6 @@ static irqreturn_t tc35815_interrupt(int irq, void *dev_id)
 		return IRQ_HANDLED;
 	}
 	return IRQ_NONE;
-#else
-	int handled;
-	u32 status;
-
-	spin_lock(&lp->lock);
-	status = tc_readl(&tr->Int_Src);
-	/* BLEx, FDAEx will be cleared later */
-	tc_writel(status & ~(Int_BLEx | Int_FDAEx),
-		  &tr->Int_Src);	/* write to clear */
-	handled = tc35815_do_interrupt(dev, status);
-	if (status & (Int_BLEx | Int_FDAEx))
-		tc_writel(status & (Int_BLEx | Int_FDAEx), &tr->Int_Src);
-	(void)tc_readl(&tr->Int_Src);	/* flush */
-	spin_unlock(&lp->lock);
-	return IRQ_RETVAL(handled >= 0);
-#endif /* TC35815_NAPI */
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1652,20 +1482,13 @@ static void tc35815_poll_controller(struct net_device *dev)
 #endif
 
 /* We have a good packet(s), get it/them out of the buffers. */
-#ifdef TC35815_NAPI
 static int
 tc35815_rx(struct net_device *dev, int limit)
-#else
-static void
-tc35815_rx(struct net_device *dev)
-#endif
 {
 	struct tc35815_local *lp = netdev_priv(dev);
 	unsigned int fdctl;
 	int i;
-#ifdef TC35815_NAPI
 	int received = 0;
-#endif
 
 	while (!((fdctl = le32_to_cpu(lp->rfd_cur->fd.FDCtl)) & FD_CownsFD)) {
 		int status = le32_to_cpu(lp->rfd_cur->fd.FDStat);
@@ -1684,52 +1507,9 @@ tc35815_rx(struct net_device *dev)
 			struct sk_buff *skb;
 			unsigned char *data;
 			int cur_bd;
-#ifdef TC35815_USE_PACKEDBUFFER
-			int offset;
-#endif
 
-#ifdef TC35815_NAPI
 			if (--limit < 0)
 				break;
-#endif
-#ifdef TC35815_USE_PACKEDBUFFER
-			BUG_ON(bd_count > 2);
-			skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN);
-			if (skb == NULL) {
-				printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
-				       dev->name);
-				dev->stats.rx_dropped++;
-				break;
-			}
-			skb_reserve(skb, NET_IP_ALIGN);
-
-			data = skb_put(skb, pkt_len);
-
-			/* copy from receive buffer */
-			cur_bd = 0;
-			offset = 0;
-			while (offset < pkt_len && cur_bd < bd_count) {
-				int len = le32_to_cpu(lp->rfd_cur->bd[cur_bd].BDCtl) &
-					BD_BuffLength_MASK;
-				dma_addr_t dma = le32_to_cpu(lp->rfd_cur->bd[cur_bd].BuffData);
-				void *rxbuf = rxbuf_bus_to_virt(lp, dma);
-				if (offset + len > pkt_len)
-					len = pkt_len - offset;
-#ifdef TC35815_DMA_SYNC_ONDEMAND
-				pci_dma_sync_single_for_cpu(lp->pci_dev,
-							    dma, len,
-							    PCI_DMA_FROMDEVICE);
-#endif
-				memcpy(data + offset, rxbuf, len);
-#ifdef TC35815_DMA_SYNC_ONDEMAND
-				pci_dma_sync_single_for_device(lp->pci_dev,
-							       dma, len,
-							       PCI_DMA_FROMDEVICE);
-#endif
-				offset += len;
-				cur_bd++;
-			}
-#else /* TC35815_USE_PACKEDBUFFER */
 			BUG_ON(bd_count > 1);
 			cur_bd = (le32_to_cpu(lp->rfd_cur->bd[0].BDCtl)
 				  & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT;
@@ -1757,16 +1537,11 @@ tc35815_rx(struct net_device *dev)
 				memmove(skb->data, skb->data - NET_IP_ALIGN,
 					pkt_len);
 			data = skb_put(skb, pkt_len);
-#endif /* TC35815_USE_PACKEDBUFFER */
 			if (netif_msg_pktdata(lp))
 				print_eth(data);
 			skb->protocol = eth_type_trans(skb, dev);
-#ifdef TC35815_NAPI
 			netif_receive_skb(skb);
 			received++;
-#else
-			netif_rx(skb);
-#endif
 			dev->stats.rx_packets++;
 			dev->stats.rx_bytes += pkt_len;
 		} else {
@@ -1803,19 +1578,11 @@ tc35815_rx(struct net_device *dev)
 			BUG_ON(id >= RX_BUF_NUM);
 #endif
 			/* free old buffers */
-#ifdef TC35815_USE_PACKEDBUFFER
-			while (lp->fbl_curid != id)
-#else
 			lp->fbl_count--;
 			while (lp->fbl_count < RX_BUF_NUM)
-#endif
 			{
-#ifdef TC35815_USE_PACKEDBUFFER
-				unsigned char curid = lp->fbl_curid;
-#else
 				unsigned char curid =
 					(id + 1 + lp->fbl_count) % RX_BUF_NUM;
-#endif
 				struct BDesc *bd = &lp->fbl_ptr->bd[curid];
 #ifdef DEBUG
 				bdctl = le32_to_cpu(bd->BDCtl);
@@ -1826,7 +1593,6 @@ tc35815_rx(struct net_device *dev)
 				}
 #endif
 				/* pass BD to controller */
-#ifndef TC35815_USE_PACKEDBUFFER
 				if (!lp->rx_skbs[curid].skb) {
 					lp->rx_skbs[curid].skb =
 						alloc_rxbuf_skb(dev,
@@ -1836,21 +1602,11 @@ tc35815_rx(struct net_device *dev)
 						break; /* try on next reception */
 					bd->BuffData = cpu_to_le32(lp->rx_skbs[curid].skb_dma);
 				}
-#endif /* TC35815_USE_PACKEDBUFFER */
 				/* Note: BDLength was modified by chip. */
 				bd->BDCtl = cpu_to_le32(BD_CownsBD |
 							(curid << BD_RxBDID_SHIFT) |
 							RX_BUF_SIZE);
-#ifdef TC35815_USE_PACKEDBUFFER
-				lp->fbl_curid = (curid + 1) % RX_BUF_NUM;
-				if (netif_msg_rx_status(lp)) {
-					printk("%s: Entering new FBD %d\n",
-					       dev->name, lp->fbl_curid);
-					dump_frfd(lp->fbl_ptr);
-				}
-#else
 				lp->fbl_count++;
-#endif
 			}
 		}
 
@@ -1882,12 +1638,9 @@ tc35815_rx(struct net_device *dev)
 #endif
 	}
 
-#ifdef TC35815_NAPI
 	return received;
-#endif
 }
 
-#ifdef TC35815_NAPI
 static int tc35815_poll(struct napi_struct *napi, int budget)
 {
 	struct tc35815_local *lp = container_of(napi, struct tc35815_local, napi);
@@ -1924,13 +1677,8 @@ static int tc35815_poll(struct napi_struct *napi, int budget)
 	}
 	return received;
 }
-#endif
 
-#ifdef NO_CHECK_CARRIER
-#define TX_STA_ERR	(Tx_ExColl|Tx_Under|Tx_Defer|Tx_LateColl|Tx_TxPar|Tx_SQErr)
-#else
 #define TX_STA_ERR	(Tx_ExColl|Tx_Under|Tx_Defer|Tx_NCarr|Tx_LateColl|Tx_TxPar|Tx_SQErr)
-#endif
 
 static void
 tc35815_check_tx_stat(struct net_device *dev, int status)
@@ -1944,16 +1692,12 @@ tc35815_check_tx_stat(struct net_device *dev, int status)
 	if (status & Tx_TxColl_MASK)
 		dev->stats.collisions += status & Tx_TxColl_MASK;
 
-#ifndef NO_CHECK_CARRIER
 	/* TX4939 does not have NCarr */
 	if (lp->chiptype == TC35815_TX4939)
 		status &= ~Tx_NCarr;
-#ifdef WORKAROUND_LOSTCAR
 	/* WORKAROUND: ignore LostCrS in full duplex operation */
 	if (!lp->link || lp->duplex == DUPLEX_FULL)
 		status &= ~Tx_NCarr;
-#endif
-#endif
 
 	if (!(status & TX_STA_ERR)) {
 		/* no error. */
@@ -1983,12 +1727,10 @@ tc35815_check_tx_stat(struct net_device *dev, int status)
 		dev->stats.tx_fifo_errors++;
 		msg = "Excessive Deferral.";
 	}
-#ifndef NO_CHECK_CARRIER
 	if (status & Tx_NCarr) {
 		dev->stats.tx_carrier_errors++;
 		msg = "Lost Carrier Sense.";
 	}
-#endif
 	if (status & Tx_LateColl) {
 		dev->stats.tx_aborted_errors++;
 		msg = "Late Collision.";
@@ -2044,11 +1786,7 @@ tc35815_txdone(struct net_device *dev)
 			pci_unmap_single(lp->pci_dev, lp->tx_skbs[lp->tfd_end].skb_dma, skb->len, PCI_DMA_TODEVICE);
 			lp->tx_skbs[lp->tfd_end].skb = NULL;
 			lp->tx_skbs[lp->tfd_end].skb_dma = 0;
-#ifdef TC35815_NAPI
 			dev_kfree_skb_any(skb);
-#else
-			dev_kfree_skb_irq(skb);
-#endif
 		}
 		txfd->fd.FDSystem = cpu_to_le32(0xffffffff);
 
@@ -2083,9 +1821,7 @@ tc35815_txdone(struct net_device *dev)
 
 				/* start DMA Transmitter again */
 				txhead->fd.FDNext |= cpu_to_le32(FD_Next_EOL);
-#ifdef GATHER_TXINT
 				txhead->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx);
-#endif
 				if (netif_msg_tx_queued(lp)) {
 					printk("%s: start TxFD on queue.\n",
 					       dev->name);
@@ -2112,9 +1848,7 @@ tc35815_close(struct net_device *dev)
 	struct tc35815_local *lp = netdev_priv(dev);
 
 	netif_stop_queue(dev);
-#ifdef TC35815_NAPI
 	napi_disable(&lp->napi);
-#endif
 	if (lp->phy_dev)
 		phy_stop(lp->phy_dev);
 	cancel_work_sync(&lp->restart_work);
@@ -2198,14 +1932,12 @@ tc35815_set_multicast_list(struct net_device *dev)
 		(struct tc35815_regs __iomem *)dev->base_addr;
 
 	if (dev->flags & IFF_PROMISC) {
-#ifdef WORKAROUND_100HALF_PROMISC
 		/* With some (all?) 100MHalf HUB, controller will hang
 		 * if we enabled promiscuous mode before linkup... */
 		struct tc35815_local *lp = netdev_priv(dev);
 
 		if (!lp->link)
 			return;
-#endif
 		/* Enable promiscuous mode */
 		tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl);
 	} else if ((dev->flags & IFF_ALLMULTI) ||
@@ -2392,9 +2124,6 @@ static void tc35815_chip_init(struct net_device *dev)
 		tc_writel(DMA_BURST_SIZE | DMA_RxAlign_2, &tr->DMA_Ctl);
 	else
 		tc_writel(DMA_BURST_SIZE, &tr->DMA_Ctl);
-#ifdef TC35815_USE_PACKEDBUFFER
-	tc_writel(RxFrag_EnPack | ETH_ZLEN, &tr->RxFragSize);	/* Packing */
-#endif
 	tc_writel(0, &tr->TxPollCtr);	/* Batch mode */
 	tc_writel(TX_THRESHOLD, &tr->TxThrsh);
 	tc_writel(INT_EN_CMD, &tr->Int_En);
@@ -2412,19 +2141,12 @@ static void tc35815_chip_init(struct net_device *dev)
 	tc_writel(RX_CTL_CMD, &tr->Rx_Ctl);	/* start MAC receiver */
 
 	/* start MAC transmitter */
-#ifndef NO_CHECK_CARRIER
 	/* TX4939 does not have EnLCarr */
 	if (lp->chiptype == TC35815_TX4939)
 		txctl &= ~Tx_EnLCarr;
-#ifdef WORKAROUND_LOSTCAR
 	/* WORKAROUND: ignore LostCrS in full duplex operation */
 	if (!lp->phy_dev || !lp->link || lp->duplex == DUPLEX_FULL)
 		txctl &= ~Tx_EnLCarr;
-#endif
-#endif /* !NO_CHECK_CARRIER */
-#ifdef GATHER_TXINT
-	txctl &= ~Tx_EnComp;	/* disable global tx completion int. */
-#endif
 	tc_writel(txctl, &tr->Tx_Ctl);
 }
 
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index ec9dfb251f30..80b404f2b938 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -420,7 +420,7 @@ static int bdx_hw_start(struct bdx_priv *priv)
 		  GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB);
 
 #define BDX_IRQ_TYPE	((priv->nic->irq_type == IRQ_MSI)?0:IRQF_SHARED)
-	if ((rc = request_irq(priv->pdev->irq, &bdx_isr_napi, BDX_IRQ_TYPE,
+	if ((rc = request_irq(priv->pdev->irq, bdx_isr_napi, BDX_IRQ_TYPE,
 			 ndev->name, ndev)))
 		goto err_irq;
 	bdx_enable_interrupts(priv);
@@ -1784,9 +1784,9 @@ static void bdx_tx_cleanup(struct bdx_priv *priv)
 	}
 #endif
 
-	if (unlikely(netif_queue_stopped(priv->ndev)
-		     && netif_carrier_ok(priv->ndev)
-		     && (priv->tx_level >= BDX_MIN_TX_LEVEL))) {
+	if (unlikely(netif_queue_stopped(priv->ndev) &&
+		     netif_carrier_ok(priv->ndev) &&
+		     (priv->tx_level >= BDX_MIN_TX_LEVEL))) {
 		DBG("%s: %s: TX Q WAKE level %d\n",
 		    BDX_DRV_NAME, priv->ndev->name, priv->tx_level);
 		netif_wake_queue(priv->ndev);
@@ -1878,7 +1878,7 @@ static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size)
 			udelay(50);	/* give hw a chance to clean fifo */
 			continue;
 		}
-		avail = MIN(avail, size);
+		avail = min(avail, size);
 		DBG("about to push  %d bytes starting %p size %d\n", avail,
 		    data, size);
 		bdx_tx_push_desc(priv, data, avail);
@@ -2105,12 +2105,6 @@ err_pci:
 }
 
 /****************** Ethtool interface *********************/
-/* get strings for tests */
-static const char
- bdx_test_names[][ETH_GSTRING_LEN] = {
-	"No tests defined"
-};
-
 /* get strings for statistics counters */
 static const char
  bdx_stat_names[][ETH_GSTRING_LEN] = {
@@ -2279,8 +2273,8 @@ bdx_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal)
 	    (((tx_max_coal * BDX_TXF_DESC_SZ) + PCK_TH_MULT - 1)
 	     / PCK_TH_MULT);
 
-	if ((rx_coal > 0x7FFF) || (tx_coal > 0x7FFF)
-	    || (rx_max_coal > 0xF) || (tx_max_coal > 0xF))
+	if ((rx_coal > 0x7FFF) || (tx_coal > 0x7FFF) ||
+	    (rx_max_coal > 0xF) || (tx_max_coal > 0xF))
 		return -EINVAL;
 
 	rdintcm = INT_REG_VAL(rx_coal, GET_INT_COAL_RC(priv->rdintcm),
@@ -2353,8 +2347,8 @@ bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
 		tx_size = 3;
 
 	/*Is there anything to do? */
-	if ((rx_size == priv->rxf_size)
-	    && (tx_size == priv->txd_size))
+	if ((rx_size == priv->rxf_size) &&
+	    (tx_size == priv->txd_size))
 		return 0;
 
 	priv->rxf_size = rx_size;
@@ -2380,9 +2374,6 @@ bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
 static void bdx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
 {
 	switch (stringset) {
-	case ETH_SS_TEST:
-		memcpy(data, *bdx_test_names, sizeof(bdx_test_names));
-		break;
 	case ETH_SS_STATS:
 		memcpy(data, *bdx_stat_names, sizeof(bdx_stat_names));
 		break;
@@ -2390,15 +2381,21 @@ static void bdx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
 }
 
 /*
- * bdx_get_stats_count - return number of 64bit statistics counters
+ * bdx_get_sset_count - return number of statistics or tests
  * @netdev
  */
-static int bdx_get_stats_count(struct net_device *netdev)
+static int bdx_get_sset_count(struct net_device *netdev, int stringset)
 {
 	struct bdx_priv *priv = netdev_priv(netdev);
-	BDX_ASSERT(ARRAY_SIZE(bdx_stat_names)
-		   != sizeof(struct bdx_stats) / sizeof(u64));
-	return ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names)	: 0);
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		BDX_ASSERT(ARRAY_SIZE(bdx_stat_names)
+			   != sizeof(struct bdx_stats) / sizeof(u64));
+		return ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names)	: 0);
+	default:
+		return -EINVAL;
+	}
 }
 
 /*
@@ -2441,7 +2438,7 @@ static void bdx_ethtool_ops(struct net_device *netdev)
 		.get_sg = ethtool_op_get_sg,
 		.get_tso = ethtool_op_get_tso,
 		.get_strings = bdx_get_strings,
-		.get_stats_count = bdx_get_stats_count,
+		.get_sset_count = bdx_get_sset_count,
 		.get_ethtool_stats = bdx_get_ethtool_stats,
 	};
 
diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h
index 4fc875e5dcdd..124141909e42 100644
--- a/drivers/net/tehuti.h
+++ b/drivers/net/tehuti.h
@@ -76,8 +76,6 @@
 #define FIFO_SIZE  4096
 #define FIFO_EXTRA_SPACE            1024
 
-#define MIN(x, y)  ((x) < (y) ? (x) : (y))
-
 #if BITS_PER_LONG == 64
 #    define H32_64(x)  (u32) ((u64)(x) >> 32)
 #    define L32_64(x)  (u32) ((u64)(x) & 0xffffffff)
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index ba5d3fe753b6..3a74d2168598 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
 
 #define DRV_MODULE_NAME		"tg3"
 #define PFX DRV_MODULE_NAME	": "
-#define DRV_MODULE_VERSION	"3.102"
-#define DRV_MODULE_RELDATE	"September 1, 2009"
+#define DRV_MODULE_VERSION	"3.105"
+#define DRV_MODULE_RELDATE	"December 2, 2009"
 
 #define TG3_DEF_MAC_MODE	0
 #define TG3_DEF_RX_MODE		0
@@ -137,6 +137,12 @@
 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
 
+#define TG3_RX_STD_BUFF_RING_SIZE \
+	(sizeof(struct ring_info) * TG3_RX_RING_SIZE)
+
+#define TG3_RX_JMB_BUFF_RING_SIZE \
+	(sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
+
 /* minimum number of free TX descriptors required to wake up TX process */
 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
 
@@ -235,6 +241,9 @@ static struct pci_device_id tg3_pci_tbl[] = {
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -396,7 +405,7 @@ static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
 				       TG3_64BIT_REG_LOW, val);
 		return;
 	}
-	if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
+	if (off == TG3_RX_STD_PROD_IDX_REG) {
 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
 				       TG3_64BIT_REG_LOW, val);
 		return;
@@ -937,9 +946,10 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
 	u32 val;
 	struct phy_device *phydev;
 
-	phydev = tp->mdio_bus->phy_map[PHY_ADDR];
+	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
 	case TG3_PHY_ID_BCM50610:
+	case TG3_PHY_ID_BCM50610M:
 		val = MAC_PHYCFG2_50610_LED_MODES;
 		break;
 	case TG3_PHY_ID_BCMAC131:
@@ -1031,7 +1041,7 @@ static void tg3_mdio_start(struct tg3 *tp)
 		if (is_serdes)
 			tp->phy_addr += 7;
 	} else
-		tp->phy_addr = PHY_ADDR;
+		tp->phy_addr = TG3_PHY_MII_ADDR;
 
 	if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
@@ -1062,7 +1072,7 @@ static int tg3_mdio_init(struct tg3 *tp)
 	tp->mdio_bus->read     = &tg3_mdio_read;
 	tp->mdio_bus->write    = &tg3_mdio_write;
 	tp->mdio_bus->reset    = &tg3_mdio_reset;
-	tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
+	tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
 	tp->mdio_bus->irq      = &tp->mdio_irq[0];
 
 	for (i = 0; i < PHY_MAX_ADDR; i++)
@@ -1084,7 +1094,7 @@ static int tg3_mdio_init(struct tg3 *tp)
 		return i;
 	}
 
-	phydev = tp->mdio_bus->phy_map[PHY_ADDR];
+	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 
 	if (!phydev || !phydev->drv) {
 		printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
@@ -1096,8 +1106,14 @@ static int tg3_mdio_init(struct tg3 *tp)
 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
 	case TG3_PHY_ID_BCM57780:
 		phydev->interface = PHY_INTERFACE_MODE_GMII;
+		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
 		break;
 	case TG3_PHY_ID_BCM50610:
+	case TG3_PHY_ID_BCM50610M:
+		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
+				     PHY_BRCM_RX_REFCLK_UNUSED |
+				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
+				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
 		if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
 			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
 		if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
@@ -1111,6 +1127,7 @@ static int tg3_mdio_init(struct tg3 *tp)
 	case TG3_PHY_ID_RTL8201E:
 	case TG3_PHY_ID_BCMAC131:
 		phydev->interface = PHY_INTERFACE_MODE_MII;
+		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
 		tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
 		break;
 	}
@@ -1311,7 +1328,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
 	u32 old_tx_mode = tp->tx_mode;
 
 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
-		autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
+		autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
 	else
 		autoneg = tp->link_config.autoneg;
 
@@ -1348,7 +1365,7 @@ static void tg3_adjust_link(struct net_device *dev)
 	u8 oldflowctrl, linkmesg = 0;
 	u32 mac_mode, lcl_adv, rmt_adv;
 	struct tg3 *tp = netdev_priv(dev);
-	struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
+	struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 
 	spin_lock_bh(&tp->lock);
 
@@ -1363,8 +1380,11 @@ static void tg3_adjust_link(struct net_device *dev)
 
 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
 			mac_mode |= MAC_MODE_PORT_MODE_MII;
-		else
+		else if (phydev->speed == SPEED_1000 ||
+			 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
+		else
+			mac_mode |= MAC_MODE_PORT_MODE_MII;
 
 		if (phydev->duplex == DUPLEX_HALF)
 			mac_mode |= MAC_MODE_HALF_DUPLEX;
@@ -1434,7 +1454,7 @@ static int tg3_phy_init(struct tg3 *tp)
 	/* Bring the PHY back to a known state. */
 	tg3_bmcr_reset(tp);
 
-	phydev = tp->mdio_bus->phy_map[PHY_ADDR];
+	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 
 	/* Attach the MAC to the PHY. */
 	phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
@@ -1461,7 +1481,7 @@ static int tg3_phy_init(struct tg3 *tp)
 				      SUPPORTED_Asym_Pause);
 		break;
 	default:
-		phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
+		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
 		return -EINVAL;
 	}
 
@@ -1479,7 +1499,7 @@ static void tg3_phy_start(struct tg3 *tp)
 	if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
 		return;
 
-	phydev = tp->mdio_bus->phy_map[PHY_ADDR];
+	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 
 	if (tp->link_config.phy_is_low_power) {
 		tp->link_config.phy_is_low_power = 0;
@@ -1499,13 +1519,13 @@ static void tg3_phy_stop(struct tg3 *tp)
 	if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
 		return;
 
-	phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
+	phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
 }
 
 static void tg3_phy_fini(struct tg3 *tp)
 {
 	if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
-		phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
+		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
 		tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
 	}
 }
@@ -2149,6 +2169,26 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
 		udelay(40);
 		return;
+	} else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
+		u32 phytest;
+		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
+			u32 phy;
+
+			tg3_writephy(tp, MII_ADVERTISE, 0);
+			tg3_writephy(tp, MII_BMCR,
+				     BMCR_ANENABLE | BMCR_ANRESTART);
+
+			tg3_writephy(tp, MII_TG3_FET_TEST,
+				     phytest | MII_TG3_FET_SHADOW_EN);
+			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
+				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
+				tg3_writephy(tp,
+					     MII_TG3_FET_SHDW_AUXMODE4,
+					     phy);
+			}
+			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
+		}
+		return;
 	} else if (do_low_power) {
 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
 			     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
@@ -2218,7 +2258,7 @@ static void tg3_nvram_unlock(struct tg3 *tp)
 static void tg3_enable_nvram_access(struct tg3 *tp)
 {
 	if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
-	    !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
+	    !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
 		u32 nvaccess = tr32(NVRAM_ACCESS);
 
 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
@@ -2229,7 +2269,7 @@ static void tg3_enable_nvram_access(struct tg3 *tp)
 static void tg3_disable_nvram_access(struct tg3 *tp)
 {
 	if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
-	    !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
+	    !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
 		u32 nvaccess = tr32(NVRAM_ACCESS);
 
 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
@@ -2474,7 +2514,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
 			struct phy_device *phydev;
 			u32 phyid, advertising;
 
-			phydev = tp->mdio_bus->phy_map[PHY_ADDR];
+			phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 
 			tp->link_config.phy_is_low_power = 1;
 
@@ -3243,15 +3283,6 @@ relink:
 			pci_write_config_word(tp->pdev,
 					      tp->pcie_cap + PCI_EXP_LNKCTL,
 					      newlnkctl);
-	} else if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) {
-		u32 newreg, oldreg = tr32(TG3_PCIE_LNKCTL);
-		if (tp->link_config.active_speed == SPEED_100 ||
-		    tp->link_config.active_speed == SPEED_10)
-			newreg = oldreg & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
-		else
-			newreg = oldreg | TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
-		if (newreg != oldreg)
-			tw32(TG3_PCIE_LNKCTL, newreg);
 	}
 
 	if (current_link_up != netif_carrier_ok(tp->dev)) {
@@ -4320,13 +4351,13 @@ static void tg3_tx(struct tg3_napi *tnapi)
 	struct netdev_queue *txq;
 	int index = tnapi - tp->napi;
 
-	if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
+	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
 		index--;
 
 	txq = netdev_get_tx_queue(tp->dev, index);
 
 	while (sw_idx != hw_idx) {
-		struct tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
+		struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
 		struct sk_buff *skb = ri->skb;
 		int i, tx_bug = 0;
 
@@ -4335,7 +4366,10 @@ static void tg3_tx(struct tg3_napi *tnapi)
 			return;
 		}
 
-		skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
+		pci_unmap_single(tp->pdev,
+				 pci_unmap_addr(ri, mapping),
+				 skb_headlen(skb),
+				 PCI_DMA_TODEVICE);
 
 		ri->skb = NULL;
 
@@ -4345,6 +4379,11 @@ static void tg3_tx(struct tg3_napi *tnapi)
 			ri = &tnapi->tx_buffers[sw_idx];
 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
 				tx_bug = 1;
+
+			pci_unmap_page(tp->pdev,
+				       pci_unmap_addr(ri, mapping),
+				       skb_shinfo(skb)->frags[i].size,
+				       PCI_DMA_TODEVICE);
 			sw_idx = NEXT_TX(sw_idx);
 		}
 
@@ -4375,6 +4414,17 @@ static void tg3_tx(struct tg3_napi *tnapi)
 	}
 }
 
+static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
+{
+	if (!ri->skb)
+		return;
+
+	pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping),
+			 map_sz, PCI_DMA_FROMDEVICE);
+	dev_kfree_skb_any(ri->skb);
+	ri->skb = NULL;
+}
+
 /* Returns size of skb allocated or < 0 on error.
  *
  * We only need to fill in the address because the other members
@@ -4386,16 +4436,14 @@ static void tg3_tx(struct tg3_napi *tnapi)
  * buffers the cpu only reads the last cacheline of the RX descriptor
  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
  */
-static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
-			    int src_idx, u32 dest_idx_unmasked)
+static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
+			    u32 opaque_key, u32 dest_idx_unmasked)
 {
-	struct tg3 *tp = tnapi->tp;
 	struct tg3_rx_buffer_desc *desc;
 	struct ring_info *map, *src_map;
 	struct sk_buff *skb;
 	dma_addr_t mapping;
 	int skb_size, dest_idx;
-	struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
 
 	src_map = NULL;
 	switch (opaque_key) {
@@ -4403,8 +4451,6 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
 		dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
 		desc = &tpr->rx_std[dest_idx];
 		map = &tpr->rx_std_buffers[dest_idx];
-		if (src_idx >= 0)
-			src_map = &tpr->rx_std_buffers[src_idx];
 		skb_size = tp->rx_pkt_map_sz;
 		break;
 
@@ -4412,8 +4458,6 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
 		dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
 		desc = &tpr->rx_jmb[dest_idx].std;
 		map = &tpr->rx_jmb_buffers[dest_idx];
-		if (src_idx >= 0)
-			src_map = &tpr->rx_jmb_buffers[src_idx];
 		skb_size = TG3_RX_JMB_MAP_SZ;
 		break;
 
@@ -4435,13 +4479,14 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
 
 	mapping = pci_map_single(tp->pdev, skb->data, skb_size,
 				 PCI_DMA_FROMDEVICE);
+	if (pci_dma_mapping_error(tp->pdev, mapping)) {
+		dev_kfree_skb(skb);
+		return -EIO;
+	}
 
 	map->skb = skb;
 	pci_unmap_addr_set(map, mapping, mapping);
 
-	if (src_map != NULL)
-		src_map->skb = NULL;
-
 	desc->addr_hi = ((u64)mapping >> 32);
 	desc->addr_lo = ((u64)mapping & 0xffffffff);
 
@@ -4452,30 +4497,32 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
  * members of the RX descriptor are invariant.  See notes above
  * tg3_alloc_rx_skb for full details.
  */
-static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key,
-			   int src_idx, u32 dest_idx_unmasked)
+static void tg3_recycle_rx(struct tg3_napi *tnapi,
+			   struct tg3_rx_prodring_set *dpr,
+			   u32 opaque_key, int src_idx,
+			   u32 dest_idx_unmasked)
 {
 	struct tg3 *tp = tnapi->tp;
 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
 	struct ring_info *src_map, *dest_map;
 	int dest_idx;
-	struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
+	struct tg3_rx_prodring_set *spr = &tp->prodring[0];
 
 	switch (opaque_key) {
 	case RXD_OPAQUE_RING_STD:
 		dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
-		dest_desc = &tpr->rx_std[dest_idx];
-		dest_map = &tpr->rx_std_buffers[dest_idx];
-		src_desc = &tpr->rx_std[src_idx];
-		src_map = &tpr->rx_std_buffers[src_idx];
+		dest_desc = &dpr->rx_std[dest_idx];
+		dest_map = &dpr->rx_std_buffers[dest_idx];
+		src_desc = &spr->rx_std[src_idx];
+		src_map = &spr->rx_std_buffers[src_idx];
 		break;
 
 	case RXD_OPAQUE_RING_JUMBO:
 		dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
-		dest_desc = &tpr->rx_jmb[dest_idx].std;
-		dest_map = &tpr->rx_jmb_buffers[dest_idx];
-		src_desc = &tpr->rx_jmb[src_idx].std;
-		src_map = &tpr->rx_jmb_buffers[src_idx];
+		dest_desc = &dpr->rx_jmb[dest_idx].std;
+		dest_map = &dpr->rx_jmb_buffers[dest_idx];
+		src_desc = &spr->rx_jmb[src_idx].std;
+		src_map = &spr->rx_jmb_buffers[src_idx];
 		break;
 
 	default:
@@ -4487,7 +4534,6 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key,
 			   pci_unmap_addr(src_map, mapping));
 	dest_desc->addr_hi = src_desc->addr_hi;
 	dest_desc->addr_lo = src_desc->addr_lo;
-
 	src_map->skb = NULL;
 }
 
@@ -4519,10 +4565,11 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
 {
 	struct tg3 *tp = tnapi->tp;
 	u32 work_mask, rx_std_posted = 0;
+	u32 std_prod_idx, jmb_prod_idx;
 	u32 sw_idx = tnapi->rx_rcb_ptr;
 	u16 hw_idx;
 	int received;
-	struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
+	struct tg3_rx_prodring_set *tpr = tnapi->prodring;
 
 	hw_idx = *(tnapi->rx_rcb_prod_idx);
 	/*
@@ -4532,7 +4579,10 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
 	rmb();
 	work_mask = 0;
 	received = 0;
+	std_prod_idx = tpr->rx_std_prod_idx;
+	jmb_prod_idx = tpr->rx_jmb_prod_idx;
 	while (sw_idx != hw_idx && budget > 0) {
+		struct ring_info *ri;
 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
 		unsigned int len;
 		struct sk_buff *skb;
@@ -4542,16 +4592,16 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
 		if (opaque_key == RXD_OPAQUE_RING_STD) {
-			struct ring_info *ri = &tpr->rx_std_buffers[desc_idx];
+			ri = &tp->prodring[0].rx_std_buffers[desc_idx];
 			dma_addr = pci_unmap_addr(ri, mapping);
 			skb = ri->skb;
-			post_ptr = &tpr->rx_std_ptr;
+			post_ptr = &std_prod_idx;
 			rx_std_posted++;
 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
-			struct ring_info *ri = &tpr->rx_jmb_buffers[desc_idx];
+			ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
 			dma_addr = pci_unmap_addr(ri, mapping);
 			skb = ri->skb;
-			post_ptr = &tpr->rx_jmb_ptr;
+			post_ptr = &jmb_prod_idx;
 		} else
 			goto next_pkt_nopost;
 
@@ -4560,7 +4610,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
 		drop_it:
-			tg3_recycle_rx(tnapi, opaque_key,
+			tg3_recycle_rx(tnapi, tpr, opaque_key,
 				       desc_idx, *post_ptr);
 		drop_it_no_recycle:
 			/* Other statistics kept track of by card. */
@@ -4571,20 +4621,21 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
 		      ETH_FCS_LEN;
 
-		if (len > RX_COPY_THRESHOLD
-			&& tp->rx_offset == NET_IP_ALIGN
-			/* rx_offset will likely not equal NET_IP_ALIGN
-			 * if this is a 5701 card running in PCI-X mode
-			 * [see tg3_get_invariants()]
-			 */
-		) {
+		if (len > RX_COPY_THRESHOLD &&
+		    tp->rx_offset == NET_IP_ALIGN) {
+		    /* rx_offset will likely not equal NET_IP_ALIGN
+		     * if this is a 5701 card running in PCI-X mode
+		     * [see tg3_get_invariants()]
+		     */
 			int skb_size;
 
-			skb_size = tg3_alloc_rx_skb(tnapi, opaque_key,
-						    desc_idx, *post_ptr);
+			skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
+						    *post_ptr);
 			if (skb_size < 0)
 				goto drop_it;
 
+			ri->skb = NULL;
+
 			pci_unmap_single(tp->pdev, dma_addr, skb_size,
 					 PCI_DMA_FROMDEVICE);
 
@@ -4592,7 +4643,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
 		} else {
 			struct sk_buff *copy_skb;
 
-			tg3_recycle_rx(tnapi, opaque_key,
+			tg3_recycle_rx(tnapi, tpr, opaque_key,
 				       desc_idx, *post_ptr);
 
 			copy_skb = netdev_alloc_skb(tp->dev,
@@ -4643,9 +4694,7 @@ next_pkt:
 
 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
 			u32 idx = *post_ptr % TG3_RX_RING_SIZE;
-
-			tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
-				     TG3_64BIT_REG_LOW, idx);
+			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, idx);
 			work_mask &= ~RXD_OPAQUE_RING_STD;
 			rx_std_posted = 0;
 		}
@@ -4665,33 +4714,45 @@ next_pkt_nopost:
 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
 
 	/* Refill RX ring(s). */
-	if (work_mask & RXD_OPAQUE_RING_STD) {
-		sw_idx = tpr->rx_std_ptr % TG3_RX_RING_SIZE;
-		tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
-			     sw_idx);
-	}
-	if (work_mask & RXD_OPAQUE_RING_JUMBO) {
-		sw_idx = tpr->rx_jmb_ptr % TG3_RX_JUMBO_RING_SIZE;
-		tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
-			     sw_idx);
+	if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) || tnapi == &tp->napi[1]) {
+		if (work_mask & RXD_OPAQUE_RING_STD) {
+			tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
+			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
+				     tpr->rx_std_prod_idx);
+		}
+		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
+			tpr->rx_jmb_prod_idx = jmb_prod_idx %
+					       TG3_RX_JUMBO_RING_SIZE;
+			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
+				     tpr->rx_jmb_prod_idx);
+		}
+		mmiowb();
+	} else if (work_mask) {
+		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
+		 * updated before the producer indices can be updated.
+		 */
+		smp_wmb();
+
+		tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
+		tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
+
+		napi_schedule(&tp->napi[1].napi);
 	}
-	mmiowb();
 
 	return received;
 }
 
-static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
+static void tg3_poll_link(struct tg3 *tp)
 {
-	struct tg3 *tp = tnapi->tp;
-	struct tg3_hw_status *sblk = tnapi->hw_status;
-
 	/* handle link change and other phy events */
 	if (!(tp->tg3_flags &
 	      (TG3_FLAG_USE_LINKCHG_REG |
 	       TG3_FLAG_POLL_SERDES))) {
+		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
+
 		if (sblk->status & SD_STATUS_LINK_CHG) {
 			sblk->status = SD_STATUS_UPDATED |
-				(sblk->status & ~SD_STATUS_LINK_CHG);
+				       (sblk->status & ~SD_STATUS_LINK_CHG);
 			spin_lock(&tp->lock);
 			if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
 				tw32_f(MAC_STATUS,
@@ -4705,6 +4766,98 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
 			spin_unlock(&tp->lock);
 		}
 	}
+}
+
+static void tg3_rx_prodring_xfer(struct tg3 *tp,
+				 struct tg3_rx_prodring_set *dpr,
+				 struct tg3_rx_prodring_set *spr)
+{
+	u32 si, di, cpycnt, src_prod_idx;
+	int i;
+
+	while (1) {
+		src_prod_idx = spr->rx_std_prod_idx;
+
+		/* Make sure updates to the rx_std_buffers[] entries and the
+		 * standard producer index are seen in the correct order.
+		 */
+		smp_rmb();
+
+		if (spr->rx_std_cons_idx == src_prod_idx)
+			break;
+
+		if (spr->rx_std_cons_idx < src_prod_idx)
+			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
+		else
+			cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
+
+		cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
+
+		si = spr->rx_std_cons_idx;
+		di = dpr->rx_std_prod_idx;
+
+		memcpy(&dpr->rx_std_buffers[di],
+		       &spr->rx_std_buffers[si],
+		       cpycnt * sizeof(struct ring_info));
+
+		for (i = 0; i < cpycnt; i++, di++, si++) {
+			struct tg3_rx_buffer_desc *sbd, *dbd;
+			sbd = &spr->rx_std[si];
+			dbd = &dpr->rx_std[di];
+			dbd->addr_hi = sbd->addr_hi;
+			dbd->addr_lo = sbd->addr_lo;
+		}
+
+		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
+				       TG3_RX_RING_SIZE;
+		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
+				       TG3_RX_RING_SIZE;
+	}
+
+	while (1) {
+		src_prod_idx = spr->rx_jmb_prod_idx;
+
+		/* Make sure updates to the rx_jmb_buffers[] entries and
+		 * the jumbo producer index are seen in the correct order.
+		 */
+		smp_rmb();
+
+		if (spr->rx_jmb_cons_idx == src_prod_idx)
+			break;
+
+		if (spr->rx_jmb_cons_idx < src_prod_idx)
+			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
+		else
+			cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
+
+		cpycnt = min(cpycnt,
+			     TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
+
+		si = spr->rx_jmb_cons_idx;
+		di = dpr->rx_jmb_prod_idx;
+
+		memcpy(&dpr->rx_jmb_buffers[di],
+		       &spr->rx_jmb_buffers[si],
+		       cpycnt * sizeof(struct ring_info));
+
+		for (i = 0; i < cpycnt; i++, di++, si++) {
+			struct tg3_rx_buffer_desc *sbd, *dbd;
+			sbd = &spr->rx_jmb[si].std;
+			dbd = &dpr->rx_jmb[di].std;
+			dbd->addr_hi = sbd->addr_hi;
+			dbd->addr_lo = sbd->addr_lo;
+		}
+
+		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
+				       TG3_RX_JUMBO_RING_SIZE;
+		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
+				       TG3_RX_JUMBO_RING_SIZE;
+	}
+}
+
+static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
+{
+	struct tg3 *tp = tnapi->tp;
 
 	/* run TX completion thread */
 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
@@ -4720,6 +4873,74 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
 		work_done += tg3_rx(tnapi, budget - work_done);
 
+	if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
+		int i;
+		u32 std_prod_idx = tp->prodring[0].rx_std_prod_idx;
+		u32 jmb_prod_idx = tp->prodring[0].rx_jmb_prod_idx;
+
+		for (i = 2; i < tp->irq_cnt; i++)
+			tg3_rx_prodring_xfer(tp, tnapi->prodring,
+					     tp->napi[i].prodring);
+
+		wmb();
+
+		if (std_prod_idx != tp->prodring[0].rx_std_prod_idx) {
+			u32 mbox = TG3_RX_STD_PROD_IDX_REG;
+			tw32_rx_mbox(mbox, tp->prodring[0].rx_std_prod_idx);
+		}
+
+		if (jmb_prod_idx != tp->prodring[0].rx_jmb_prod_idx) {
+			u32 mbox = TG3_RX_JMB_PROD_IDX_REG;
+			tw32_rx_mbox(mbox, tp->prodring[0].rx_jmb_prod_idx);
+		}
+
+		mmiowb();
+	}
+
+	return work_done;
+}
+
+static int tg3_poll_msix(struct napi_struct *napi, int budget)
+{
+	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
+	struct tg3 *tp = tnapi->tp;
+	int work_done = 0;
+	struct tg3_hw_status *sblk = tnapi->hw_status;
+
+	while (1) {
+		work_done = tg3_poll_work(tnapi, work_done, budget);
+
+		if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
+			goto tx_recovery;
+
+		if (unlikely(work_done >= budget))
+			break;
+
+		/* tp->last_tag is used in tg3_restart_ints() below
+		 * to tell the hw how much work has been processed,
+		 * so we must read it before checking for more work.
+		 */
+		tnapi->last_tag = sblk->status_tag;
+		tnapi->last_irq_tag = tnapi->last_tag;
+		rmb();
+
+		/* check for RX/TX work to do */
+		if (sblk->idx[0].tx_consumer == tnapi->tx_cons &&
+		    *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) {
+			napi_complete(napi);
+			/* Reenable interrupts. */
+			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
+			mmiowb();
+			break;
+		}
+	}
+
+	return work_done;
+
+tx_recovery:
+	/* work_done is guaranteed to be less than budget. */
+	napi_complete(napi);
+	schedule_work(&tp->reset_task);
 	return work_done;
 }
 
@@ -4731,6 +4952,8 @@ static int tg3_poll(struct napi_struct *napi, int budget)
 	struct tg3_hw_status *sblk = tnapi->hw_status;
 
 	while (1) {
+		tg3_poll_link(tp);
+
 		work_done = tg3_poll_work(tnapi, work_done, budget);
 
 		if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
@@ -5093,11 +5316,11 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
 
 /* Workaround 4GB and 40-bit hardware DMA bugs. */
-static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
-				       u32 last_plus_one, u32 *start,
-				       u32 base_flags, u32 mss)
+static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
+				       struct sk_buff *skb, u32 last_plus_one,
+				       u32 *start, u32 base_flags, u32 mss)
 {
-	struct tg3_napi *tnapi = &tp->napi[0];
+	struct tg3 *tp = tnapi->tp;
 	struct sk_buff *new_skb;
 	dma_addr_t new_addr = 0;
 	u32 entry = *start;
@@ -5118,16 +5341,21 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
 	} else {
 		/* New SKB is guaranteed to be linear. */
 		entry = *start;
-		ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
-		new_addr = skb_shinfo(new_skb)->dma_head;
+		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
+					  PCI_DMA_TODEVICE);
+		/* Make sure the mapping succeeded */
+		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
+			ret = -1;
+			dev_kfree_skb(new_skb);
+			new_skb = NULL;
 
 		/* Make sure new skb does not cross any 4G boundaries.
 		 * Drop the packet if it does.
 		 */
-		if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
-			if (!ret)
-				skb_dma_unmap(&tp->pdev->dev, new_skb,
-					      DMA_TO_DEVICE);
+		} else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
+			    tg3_4g_overflow_test(new_addr, new_skb->len)) {
+			pci_unmap_single(tp->pdev, new_addr, new_skb->len,
+					 PCI_DMA_TODEVICE);
 			ret = -1;
 			dev_kfree_skb(new_skb);
 			new_skb = NULL;
@@ -5141,15 +5369,28 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
 	/* Now clean up the sw ring entries. */
 	i = 0;
 	while (entry != last_plus_one) {
+		int len;
+
 		if (i == 0)
-			tnapi->tx_buffers[entry].skb = new_skb;
+			len = skb_headlen(skb);
 		else
+			len = skb_shinfo(skb)->frags[i-1].size;
+
+		pci_unmap_single(tp->pdev,
+				 pci_unmap_addr(&tnapi->tx_buffers[entry],
+						mapping),
+				 len, PCI_DMA_TODEVICE);
+		if (i == 0) {
+			tnapi->tx_buffers[entry].skb = new_skb;
+			pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
+					   new_addr);
+		} else {
 			tnapi->tx_buffers[entry].skb = NULL;
+		}
 		entry = NEXT_TX(entry);
 		i++;
 	}
 
-	skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
 	dev_kfree_skb(skb);
 
 	return ret;
@@ -5179,21 +5420,22 @@ static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
 }
 
 /* hard_start_xmit for devices that don't have any bugs and
- * support TG3_FLG2_HW_TSO_2 only.
+ * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
  */
 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
 				  struct net_device *dev)
 {
 	struct tg3 *tp = netdev_priv(dev);
 	u32 len, entry, base_flags, mss;
-	struct skb_shared_info *sp;
 	dma_addr_t mapping;
 	struct tg3_napi *tnapi;
 	struct netdev_queue *txq;
+	unsigned int i, last;
+
 
 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
-	if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
+	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
 		tnapi++;
 
 	/* We are running in BH disabled context with netif_tx_lock
@@ -5238,7 +5480,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
 			hdrlen = ip_tcp_len + tcp_opt_len;
 		}
 
-		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+		if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
 			mss |= (hdrlen & 0xc) << 12;
 			if (hdrlen & 0x10)
 				base_flags |= 0x00000010;
@@ -5260,20 +5502,19 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
 			       (vlan_tx_tag_get(skb) << 16));
 #endif
 
-	if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
+	len = skb_headlen(skb);
+
+	/* Queue skb data, a.k.a. the main skb fragment. */
+	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+	if (pci_dma_mapping_error(tp->pdev, mapping)) {
 		dev_kfree_skb(skb);
 		goto out_unlock;
 	}
 
-	sp = skb_shinfo(skb);
-
-	mapping = sp->dma_head;
-
 	tnapi->tx_buffers[entry].skb = skb;
+	pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
 
-	len = skb_headlen(skb);
-
-	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+	if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
 	    !mss && skb->len > ETH_DATA_LEN)
 		base_flags |= TXD_FLAG_JMB_PKT;
 
@@ -5284,15 +5525,21 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
 
 	/* Now loop through additional data fragments, and queue them. */
 	if (skb_shinfo(skb)->nr_frags > 0) {
-		unsigned int i, last;
-
 		last = skb_shinfo(skb)->nr_frags - 1;
 		for (i = 0; i <= last; i++) {
 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
 			len = frag->size;
-			mapping = sp->dma_maps[i];
+			mapping = pci_map_page(tp->pdev,
+					       frag->page,
+					       frag->page_offset,
+					       len, PCI_DMA_TODEVICE);
+			if (pci_dma_mapping_error(tp->pdev, mapping))
+				goto dma_error;
+
 			tnapi->tx_buffers[entry].skb = NULL;
+			pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
+					   mapping);
 
 			tg3_set_txd(tnapi, entry, mapping, len,
 				    base_flags, (i == last) | (mss << 1));
@@ -5315,6 +5562,27 @@ out_unlock:
 	mmiowb();
 
 	return NETDEV_TX_OK;
+
+dma_error:
+	last = i;
+	entry = tnapi->tx_prod;
+	tnapi->tx_buffers[entry].skb = NULL;
+	pci_unmap_single(tp->pdev,
+			 pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
+			 skb_headlen(skb),
+			 PCI_DMA_TODEVICE);
+	for (i = 0; i <= last; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+		entry = NEXT_TX(entry);
+
+		pci_unmap_page(tp->pdev,
+			       pci_unmap_addr(&tnapi->tx_buffers[entry],
+					      mapping),
+			       frag->size, PCI_DMA_TODEVICE);
+	}
+
+	dev_kfree_skb(skb);
+	return NETDEV_TX_OK;
 }
 
 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
@@ -5362,12 +5630,17 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
 {
 	struct tg3 *tp = netdev_priv(dev);
 	u32 len, entry, base_flags, mss;
-	struct skb_shared_info *sp;
 	int would_hit_hwbug;
 	dma_addr_t mapping;
-	struct tg3_napi *tnapi = &tp->napi[0];
+	struct tg3_napi *tnapi;
+	struct netdev_queue *txq;
+	unsigned int i, last;
 
-	len = skb_headlen(skb);
+
+	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
+	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
+		tnapi++;
 
 	/* We are running in BH disabled context with netif_tx_lock
 	 * and TX reclaim runs via tp->napi.poll inside of a software
@@ -5375,8 +5648,8 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
 	 * no IRQ context deadlocks to worry about either.  Rejoice!
 	 */
 	if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
-		if (!netif_queue_stopped(dev)) {
-			netif_stop_queue(dev);
+		if (!netif_tx_queue_stopped(txq)) {
+			netif_tx_stop_queue(txq);
 
 			/* This is a hard error, log it. */
 			printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
@@ -5389,10 +5662,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
 	base_flags = 0;
 	if (skb->ip_summed == CHECKSUM_PARTIAL)
 		base_flags |= TXD_FLAG_TCPUDP_CSUM;
-	mss = 0;
+
 	if ((mss = skb_shinfo(skb)->gso_size) != 0) {
 		struct iphdr *iph;
-		int tcp_opt_len, ip_tcp_len, hdr_len;
+		u32 tcp_opt_len, ip_tcp_len, hdr_len;
 
 		if (skb_header_cloned(skb) &&
 		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
@@ -5423,8 +5696,15 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
 								 IPPROTO_TCP,
 								 0);
 
-		if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
-		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
+		if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
+			mss |= (hdr_len & 0xc) << 12;
+			if (hdr_len & 0x10)
+				base_flags |= 0x00000010;
+			base_flags |= (hdr_len & 0x3e0) << 5;
+		} else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
+			mss |= hdr_len << 9;
+		else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
+			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
 			if (tcp_opt_len || iph->ihl > 5) {
 				int tsflags;
 
@@ -5446,22 +5726,35 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
 			       (vlan_tx_tag_get(skb) << 16));
 #endif
 
-	if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
+	if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
+	    !mss && skb->len > ETH_DATA_LEN)
+		base_flags |= TXD_FLAG_JMB_PKT;
+
+	len = skb_headlen(skb);
+
+	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+	if (pci_dma_mapping_error(tp->pdev, mapping)) {
 		dev_kfree_skb(skb);
 		goto out_unlock;
 	}
 
-	sp = skb_shinfo(skb);
-
-	mapping = sp->dma_head;
-
 	tnapi->tx_buffers[entry].skb = skb;
+	pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
 
 	would_hit_hwbug = 0;
 
-	if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
+	if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
 		would_hit_hwbug = 1;
-	else if (tg3_4g_overflow_test(mapping, len))
+
+	if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
+	    tg3_4g_overflow_test(mapping, len))
+		would_hit_hwbug = 1;
+
+	if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
+	    tg3_40bit_overflow_test(tp, mapping, len))
+		would_hit_hwbug = 1;
+
+	if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
 		would_hit_hwbug = 1;
 
 	tg3_set_txd(tnapi, entry, mapping, len, base_flags,
@@ -5471,21 +5764,32 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
 
 	/* Now loop through additional data fragments, and queue them. */
 	if (skb_shinfo(skb)->nr_frags > 0) {
-		unsigned int i, last;
-
 		last = skb_shinfo(skb)->nr_frags - 1;
 		for (i = 0; i <= last; i++) {
 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
 			len = frag->size;
-			mapping = sp->dma_maps[i];
+			mapping = pci_map_page(tp->pdev,
+					       frag->page,
+					       frag->page_offset,
+					       len, PCI_DMA_TODEVICE);
 
 			tnapi->tx_buffers[entry].skb = NULL;
+			pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
+					   mapping);
+			if (pci_dma_mapping_error(tp->pdev, mapping))
+				goto dma_error;
 
-			if (tg3_4g_overflow_test(mapping, len))
+			if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
+			    len <= 8)
 				would_hit_hwbug = 1;
 
-			if (tg3_40bit_overflow_test(tp, mapping, len))
+			if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
+			    tg3_4g_overflow_test(mapping, len))
+				would_hit_hwbug = 1;
+
+			if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
+			    tg3_40bit_overflow_test(tp, mapping, len))
 				would_hit_hwbug = 1;
 
 			if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
@@ -5509,7 +5813,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
 		/* If the workaround fails due to memory/mapping
 		 * failure, silently drop this packet.
 		 */
-		if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
+		if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
 						&start, base_flags, mss))
 			goto out_unlock;
 
@@ -5517,19 +5821,40 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
 	}
 
 	/* Packets are ready, update Tx producer idx local and on card. */
-	tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, entry);
+	tw32_tx_mbox(tnapi->prodmbox, entry);
 
 	tnapi->tx_prod = entry;
 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
-		netif_stop_queue(dev);
+		netif_tx_stop_queue(txq);
 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
-			netif_wake_queue(tp->dev);
+			netif_tx_wake_queue(txq);
 	}
 
 out_unlock:
 	mmiowb();
 
 	return NETDEV_TX_OK;
+
+dma_error:
+	last = i;
+	entry = tnapi->tx_prod;
+	tnapi->tx_buffers[entry].skb = NULL;
+	pci_unmap_single(tp->pdev,
+			 pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
+			 skb_headlen(skb),
+			 PCI_DMA_TODEVICE);
+	for (i = 0; i <= last; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+		entry = NEXT_TX(entry);
+
+		pci_unmap_page(tp->pdev,
+			       pci_unmap_addr(&tnapi->tx_buffers[entry],
+					      mapping),
+			       frag->size, PCI_DMA_TODEVICE);
+	}
+
+	dev_kfree_skb(skb);
+	return NETDEV_TX_OK;
 }
 
 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
@@ -5594,36 +5919,33 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
 				 struct tg3_rx_prodring_set *tpr)
 {
 	int i;
-	struct ring_info *rxp;
-
-	for (i = 0; i < TG3_RX_RING_SIZE; i++) {
-		rxp = &tpr->rx_std_buffers[i];
 
-		if (rxp->skb == NULL)
-			continue;
+	if (tpr != &tp->prodring[0]) {
+		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
+		     i = (i + 1) % TG3_RX_RING_SIZE)
+			tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+					tp->rx_pkt_map_sz);
+
+		if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
+			for (i = tpr->rx_jmb_cons_idx;
+			     i != tpr->rx_jmb_prod_idx;
+			     i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) {
+				tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+						TG3_RX_JMB_MAP_SZ);
+			}
+		}
 
-		pci_unmap_single(tp->pdev,
-				 pci_unmap_addr(rxp, mapping),
-				 tp->rx_pkt_map_sz,
-				 PCI_DMA_FROMDEVICE);
-		dev_kfree_skb_any(rxp->skb);
-		rxp->skb = NULL;
+		return;
 	}
 
-	if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
-		for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
-			rxp = &tpr->rx_jmb_buffers[i];
-
-			if (rxp->skb == NULL)
-				continue;
+	for (i = 0; i < TG3_RX_RING_SIZE; i++)
+		tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+				tp->rx_pkt_map_sz);
 
-			pci_unmap_single(tp->pdev,
-					 pci_unmap_addr(rxp, mapping),
-					 TG3_RX_JMB_MAP_SZ,
-					 PCI_DMA_FROMDEVICE);
-			dev_kfree_skb_any(rxp->skb);
-			rxp->skb = NULL;
-		}
+	if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
+		for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++)
+			tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+					TG3_RX_JMB_MAP_SZ);
 	}
 }
 
@@ -5638,7 +5960,19 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
 				 struct tg3_rx_prodring_set *tpr)
 {
 	u32 i, rx_pkt_dma_sz;
-	struct tg3_napi *tnapi = &tp->napi[0];
+
+	tpr->rx_std_cons_idx = 0;
+	tpr->rx_std_prod_idx = 0;
+	tpr->rx_jmb_cons_idx = 0;
+	tpr->rx_jmb_prod_idx = 0;
+
+	if (tpr != &tp->prodring[0]) {
+		memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE);
+		if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)
+			memset(&tpr->rx_jmb_buffers[0], 0,
+			       TG3_RX_JMB_BUFF_RING_SIZE);
+		goto done;
+	}
 
 	/* Zero out all descriptors. */
 	memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
@@ -5665,7 +5999,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
 
 	/* Now allocate fresh SKBs for each rx ring. */
 	for (i = 0; i < tp->rx_pending; i++) {
-		if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_STD, -1, i) < 0) {
+		if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
 			printk(KERN_WARNING PFX
 			       "%s: Using a smaller RX standard ring, "
 			       "only %d out of %d buffers were allocated "
@@ -5696,8 +6030,8 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
 		}
 
 		for (i = 0; i < tp->rx_jumbo_pending; i++) {
-			if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_JUMBO,
-					     -1, i) < 0) {
+			if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO,
+					     i) < 0) {
 				printk(KERN_WARNING PFX
 				       "%s: Using a smaller RX jumbo ring, "
 				       "only %d out of %d buffers were "
@@ -5741,8 +6075,7 @@ static void tg3_rx_prodring_fini(struct tg3 *tp,
 static int tg3_rx_prodring_init(struct tg3 *tp,
 				struct tg3_rx_prodring_set *tpr)
 {
-	tpr->rx_std_buffers = kzalloc(sizeof(struct ring_info) *
-				      TG3_RX_RING_SIZE, GFP_KERNEL);
+	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL);
 	if (!tpr->rx_std_buffers)
 		return -ENOMEM;
 
@@ -5752,8 +6085,7 @@ static int tg3_rx_prodring_init(struct tg3 *tp,
 		goto err_out;
 
 	if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
-		tpr->rx_jmb_buffers = kzalloc(sizeof(struct ring_info) *
-					      TG3_RX_JUMBO_RING_SIZE,
+		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE,
 					      GFP_KERNEL);
 		if (!tpr->rx_jmb_buffers)
 			goto err_out;
@@ -5790,8 +6122,9 @@ static void tg3_free_rings(struct tg3 *tp)
 			continue;
 
 		for (i = 0; i < TG3_TX_RING_SIZE; ) {
-			struct tx_ring_info *txp;
+			struct ring_info *txp;
 			struct sk_buff *skb;
+			unsigned int k;
 
 			txp = &tnapi->tx_buffers[i];
 			skb = txp->skb;
@@ -5801,17 +6134,29 @@ static void tg3_free_rings(struct tg3 *tp)
 				continue;
 			}
 
-			skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
-
+			pci_unmap_single(tp->pdev,
+					 pci_unmap_addr(txp, mapping),
+					 skb_headlen(skb),
+					 PCI_DMA_TODEVICE);
 			txp->skb = NULL;
 
-			i += skb_shinfo(skb)->nr_frags + 1;
+			i++;
+
+			for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
+				txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
+				pci_unmap_page(tp->pdev,
+					       pci_unmap_addr(txp, mapping),
+					       skb_shinfo(skb)->frags[k].size,
+					       PCI_DMA_TODEVICE);
+				i++;
+			}
 
 			dev_kfree_skb_any(skb);
 		}
-	}
 
-	tg3_rx_prodring_free(tp, &tp->prodring[0]);
+		if (tp->irq_cnt == 1 || j != tp->irq_cnt - 1)
+			tg3_rx_prodring_free(tp, &tp->prodring[j]);
+	}
 }
 
 /* Initialize tx/rx rings for packet processing.
@@ -5845,9 +6190,13 @@ static int tg3_init_rings(struct tg3 *tp)
 		tnapi->rx_rcb_ptr = 0;
 		if (tnapi->rx_rcb)
 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
+
+		if ((tp->irq_cnt == 1 || i != tp->irq_cnt - 1) &&
+			tg3_rx_prodring_alloc(tp, &tp->prodring[i]))
+			return -ENOMEM;
 	}
 
-	return tg3_rx_prodring_alloc(tp, &tp->prodring[0]);
+	return 0;
 }
 
 /*
@@ -5891,7 +6240,8 @@ static void tg3_free_consistent(struct tg3 *tp)
 		tp->hw_stats = NULL;
 	}
 
-	tg3_rx_prodring_fini(tp, &tp->prodring[0]);
+	for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++)
+		tg3_rx_prodring_fini(tp, &tp->prodring[i]);
 }
 
 /*
@@ -5902,8 +6252,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
 {
 	int i;
 
-	if (tg3_rx_prodring_init(tp, &tp->prodring[0]))
-		return -ENOMEM;
+	for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++) {
+		if (tg3_rx_prodring_init(tp, &tp->prodring[i]))
+			goto err_out;
+	}
 
 	tp->hw_stats = pci_alloc_consistent(tp->pdev,
 					    sizeof(struct tg3_hw_stats),
@@ -5926,6 +6278,24 @@ static int tg3_alloc_consistent(struct tg3 *tp)
 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
 		sblk = tnapi->hw_status;
 
+		/* If multivector TSS is enabled, vector 0 does not handle
+		 * tx interrupts.  Don't allocate any resources for it.
+		 */
+		if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) ||
+		    (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) {
+			tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
+						    TG3_TX_RING_SIZE,
+						    GFP_KERNEL);
+			if (!tnapi->tx_buffers)
+				goto err_out;
+
+			tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
+							      TG3_TX_RING_BYTES,
+						       &tnapi->tx_desc_mapping);
+			if (!tnapi->tx_ring)
+				goto err_out;
+		}
+
 		/*
 		 * When RSS is enabled, the status block format changes
 		 * slightly.  The "rx_jumbo_consumer", "reserved",
@@ -5947,6 +6317,11 @@ static int tg3_alloc_consistent(struct tg3 *tp)
 			break;
 		}
 
+		if (tp->irq_cnt == 1)
+			tnapi->prodring = &tp->prodring[0];
+		else if (i)
+			tnapi->prodring = &tp->prodring[i - 1];
+
 		/*
 		 * If multivector RSS is enabled, vector 0 does not handle
 		 * rx or tx interrupts.  Don't allocate any resources for it.
@@ -5961,17 +6336,6 @@ static int tg3_alloc_consistent(struct tg3 *tp)
 			goto err_out;
 
 		memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
-
-		tnapi->tx_buffers = kzalloc(sizeof(struct tx_ring_info) *
-					    TG3_TX_RING_SIZE, GFP_KERNEL);
-		if (!tnapi->tx_buffers)
-			goto err_out;
-
-		tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
-						      TG3_TX_RING_BYTES,
-						      &tnapi->tx_desc_mapping);
-		if (!tnapi->tx_ring)
-			goto err_out;
 	}
 
 	return 0;
@@ -6580,10 +6944,35 @@ static int tg3_chip_reset(struct tg3 *tp)
 
 	tg3_mdio_start(tp);
 
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+		u8 phy_addr;
+
+		phy_addr = tp->phy_addr;
+		tp->phy_addr = TG3_PHY_PCIE_ADDR;
+
+		tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
+			     TG3_PCIEPHY_TXB_BLK << TG3_PCIEPHY_BLOCK_SHIFT);
+		val = TG3_PCIEPHY_TX0CTRL1_TXOCM | TG3_PCIEPHY_TX0CTRL1_RDCTL |
+		      TG3_PCIEPHY_TX0CTRL1_TXCMV | TG3_PCIEPHY_TX0CTRL1_TKSEL |
+		      TG3_PCIEPHY_TX0CTRL1_NB_EN;
+		tg3_writephy(tp, TG3_PCIEPHY_TX0CTRL1, val);
+		udelay(10);
+
+		tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
+			     TG3_PCIEPHY_XGXS_BLK1 << TG3_PCIEPHY_BLOCK_SHIFT);
+		val = TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN |
+		      TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN;
+		tg3_writephy(tp, TG3_PCIEPHY_PWRMGMT4, val);
+		udelay(10);
+
+		tp->phy_addr = phy_addr;
+	}
+
 	if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
 	    tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
-	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
+	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
 		val = tr32(0x7c00);
 
 		tw32(0x7c00, val | (1 << 25));
@@ -6935,19 +7324,21 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
 {
 	int i;
 
-	if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
+	if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) {
 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
-
-		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
-		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
-		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
 	} else {
 		tw32(HOSTCC_TXCOL_TICKS, 0);
 		tw32(HOSTCC_TXMAX_FRAMES, 0);
 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
+	}
 
+	if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
+		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
+		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
+		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
+	} else {
 		tw32(HOSTCC_RXCOL_TICKS, 0);
 		tw32(HOSTCC_RXMAX_FRAMES, 0);
 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
@@ -6970,25 +7361,31 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
 
 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
 		tw32(reg, ec->rx_coalesce_usecs);
-		reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
-		tw32(reg, ec->tx_coalesce_usecs);
 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
 		tw32(reg, ec->rx_max_coalesced_frames);
-		reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
-		tw32(reg, ec->tx_max_coalesced_frames);
 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
 		tw32(reg, ec->rx_max_coalesced_frames_irq);
-		reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
-		tw32(reg, ec->tx_max_coalesced_frames_irq);
+
+		if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
+			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
+			tw32(reg, ec->tx_coalesce_usecs);
+			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
+			tw32(reg, ec->tx_max_coalesced_frames);
+			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
+			tw32(reg, ec->tx_max_coalesced_frames_irq);
+		}
 	}
 
 	for (; i < tp->irq_max - 1; i++) {
 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
-		tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
-		tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
-		tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
+
+		if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
+			tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
+			tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
+			tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
+		}
 	}
 }
 
@@ -7002,6 +7399,8 @@ static void tg3_rings_reset(struct tg3 *tp)
 	/* Disable all transmit rings but the first. */
 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
+	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
 	else
 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
 
@@ -7016,7 +7415,8 @@ static void tg3_rings_reset(struct tg3 *tp)
 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
 	else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
-	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
 	else
 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
@@ -7089,17 +7489,19 @@ static void tg3_rings_reset(struct tg3 *tp)
 		/* Clear status block in ram. */
 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
 
-		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
-			       (TG3_TX_RING_SIZE <<
-				BDINFO_FLAGS_MAXLEN_SHIFT),
-			       NIC_SRAM_TX_BUFFER_DESC);
+		if (tnapi->tx_ring) {
+			tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
+				       (TG3_TX_RING_SIZE <<
+					BDINFO_FLAGS_MAXLEN_SHIFT),
+				       NIC_SRAM_TX_BUFFER_DESC);
+			txrcb += TG3_BDINFO_SIZE;
+		}
 
 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
 			       (TG3_RX_RCB_RING_SIZE(tp) <<
 				BDINFO_FLAGS_MAXLEN_SHIFT), 0);
 
 		stblk += 8;
-		txrcb += TG3_BDINFO_SIZE;
 		rxrcb += TG3_BDINFO_SIZE;
 	}
 }
@@ -7162,15 +7564,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
 
 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
-	}
 
-	if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) {
-		val = tr32(TG3_PCIE_LNKCTL);
-		if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG)
-			val |= TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
-		else
-			val &= ~TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
-		tw32(TG3_PCIE_LNKCTL, val);
+		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
+		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
 	}
 
 	/* This works around an issue with Athlon chipsets on
@@ -7217,9 +7613,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
 	if (err)
 		return err;
 
-	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
-	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
-	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+		val = tr32(TG3PCI_DMA_RW_CTRL) &
+		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
+		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
+	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
+		   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
 		/* This value is determined during the probe time DMA
 		 * engine test, tg3_test_dma.
 		 */
@@ -7342,8 +7742,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
 	     ((u64) tpr->rx_std_mapping >> 32));
 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
-	tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
-	     NIC_SRAM_RX_BUFFER_DESC);
+	if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
+		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
+		     NIC_SRAM_RX_BUFFER_DESC);
 
 	/* Disable the mini ring */
 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
@@ -7366,14 +7767,16 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
 			     (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
 			     BDINFO_FLAGS_USE_EXT_RECV);
-			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
-			     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
+			if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
+				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
 		} else {
 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
 			     BDINFO_FLAGS_DISABLED);
 		}
 
-		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
 			val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
 			      (RX_STD_MAX_SIZE << 2);
 		else
@@ -7383,16 +7786,15 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
 
 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
 
-	tpr->rx_std_ptr = tp->rx_pending;
-	tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
-		     tpr->rx_std_ptr);
+	tpr->rx_std_prod_idx = tp->rx_pending;
+	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
 
-	tpr->rx_jmb_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
+	tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
 			  tp->rx_jumbo_pending : 0;
-	tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
-		     tpr->rx_jmb_ptr);
+	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
 
-	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
 		tw32(STD_REPLENISH_LWM, 32);
 		tw32(JMB_REPLENISH_LWM, 16);
 	}
@@ -7453,7 +7855,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
 	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
 
-	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
+	if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
 
@@ -7602,6 +8005,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
 	if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
 		val |= WDMAC_MODE_STATUS_TAG_FIX;
 
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+		val |= WDMAC_MODE_BURST_ALL_DATA;
+
 	tw32_f(WDMAC_MODE, val);
 	udelay(40);
 
@@ -7641,7 +8047,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
 	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
-	if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
+	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
 	tw32(SNDBDI_MODE, val);
 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
@@ -8065,7 +8471,8 @@ static int tg3_test_interrupt(struct tg3 *tp)
 	 * Turn off MSI one shot mode.  Otherwise this test has no
 	 * observable way to know whether the interrupt was delivered.
 	 */
-	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
 	    (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
 		tw32(MSGINT_MODE, val);
@@ -8108,7 +8515,8 @@ static int tg3_test_interrupt(struct tg3 *tp)
 
 	if (intr_ok) {
 		/* Reenable MSI one shot mode. */
-		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+		     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
 		    (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
 			tw32(MSGINT_MODE, val);
@@ -8249,7 +8657,11 @@ static bool tg3_enable_msix(struct tg3 *tp)
 	for (i = 0; i < tp->irq_max; i++)
 		tp->napi[i].irq_vec = msix_ent[i].vector;
 
-	tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+		tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
+		tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
+	} else
+		tp->dev->real_num_tx_queues = 1;
 
 	return true;
 }
@@ -8400,6 +8812,7 @@ static int tg3_open(struct net_device *dev)
 		}
 
 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
 		    (tp->tg3_flags2 & TG3_FLG2_USING_MSI) &&
 		    (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) {
 			u32 val = tr32(PCIE_TRANSACTION_CFG);
@@ -9240,9 +9653,11 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 	struct tg3 *tp = netdev_priv(dev);
 
 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
+		struct phy_device *phydev;
 		if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
 			return -EAGAIN;
-		return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
+		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+		return phy_ethtool_gset(phydev, cmd);
 	}
 
 	cmd->supported = (SUPPORTED_Autoneg);
@@ -9281,9 +9696,11 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 	struct tg3 *tp = netdev_priv(dev);
 
 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
+		struct phy_device *phydev;
 		if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
 			return -EAGAIN;
-		return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
+		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+		return phy_ethtool_sset(phydev, cmd);
 	}
 
 	if (cmd->autoneg != AUTONEG_ENABLE &&
@@ -9436,15 +9853,16 @@ static int tg3_set_tso(struct net_device *dev, u32 value)
 		return 0;
 	}
 	if ((dev->features & NETIF_F_IPV6_CSUM) &&
-	    (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) {
+	    ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
+	     (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) {
 		if (value) {
 			dev->features |= NETIF_F_TSO6;
-			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
+			if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
+			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
 			    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
 			     GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
-			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
-			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
 				dev->features |= NETIF_F_TSO_ECN;
 		} else
 			dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
@@ -9466,7 +9884,7 @@ static int tg3_nway_reset(struct net_device *dev)
 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
 		if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
 			return -EAGAIN;
-		r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
+		r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
 	} else {
 		u32 bmcr;
 
@@ -9585,7 +10003,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
 			u32 newadv;
 			struct phy_device *phydev;
 
-			phydev = tp->mdio_bus->phy_map[PHY_ADDR];
+			phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 
 			if (epause->rx_pause) {
 				if (epause->tx_pause)
@@ -10339,6 +10757,10 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
 		tx_data[i] = (u8) (i & 0xff);
 
 	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
+	if (pci_dma_mapping_error(tp->pdev, map)) {
+		dev_kfree_skb(skb);
+		return -EIO;
+	}
 
 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
 	       rnapi->coal_now);
@@ -10359,8 +10781,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
 
 	udelay(10);
 
-	/* 250 usec to allow enough time on some 10/100 Mbps devices.  */
-	for (i = 0; i < 25; i++) {
+	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
+	for (i = 0; i < 35; i++) {
 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
 		       coal_now);
 
@@ -10565,9 +10987,11 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 	int err;
 
 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
+		struct phy_device *phydev;
 		if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
 			return -EAGAIN;
-		return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
+		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+		return phy_mii_ioctl(phydev, data, cmd);
 	}
 
 	switch(cmd) {
@@ -10887,7 +11311,7 @@ static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
 
 	/* NVRAM protection for TPM */
 	if (nvcfg1 & (1 << 27))
-		tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
+		tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
 
 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
@@ -10928,7 +11352,7 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
 
 	/* NVRAM protection for TPM */
 	if (nvcfg1 & (1 << 27)) {
-		tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
+		tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
 		protect = 1;
 	}
 
@@ -11022,7 +11446,7 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
 
 	/* NVRAM protection for TPM */
 	if (nvcfg1 & (1 << 27)) {
-		tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
+		tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
 		protect = 1;
 	}
 
@@ -11283,7 +11707,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
 			tg3_get_5761_nvram_info(tp);
 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
 			tg3_get_5906_nvram_info(tp);
-		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
 			tg3_get_57780_nvram_info(tp);
 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
 			tg3_get_5717_nvram_info(tp);
@@ -11524,7 +11949,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
 
 		tg3_enable_nvram_access(tp);
 		if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
-		    !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
+		    !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
 			tw32(NVRAM_WRITE1, 0x406);
 
 		grc_mode = tr32(GRC_MODE);
@@ -12008,7 +12433,7 @@ skip_phy_reset:
 
 static void __devinit tg3_read_partno(struct tg3 *tp)
 {
-	unsigned char vpd_data[256];   /* in little-endian format */
+	unsigned char vpd_data[TG3_NVM_VPD_LEN];   /* in little-endian format */
 	unsigned int i;
 	u32 magic;
 
@@ -12017,48 +12442,37 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
 		goto out_not_found;
 
 	if (magic == TG3_EEPROM_MAGIC) {
-		for (i = 0; i < 256; i += 4) {
+		for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
 			u32 tmp;
 
 			/* The data is in little-endian format in NVRAM.
 			 * Use the big-endian read routines to preserve
 			 * the byte order as it exists in NVRAM.
 			 */
-			if (tg3_nvram_read_be32(tp, 0x100 + i, &tmp))
+			if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &tmp))
 				goto out_not_found;
 
 			memcpy(&vpd_data[i], &tmp, sizeof(tmp));
 		}
 	} else {
-		int vpd_cap;
-
-		vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
-		for (i = 0; i < 256; i += 4) {
-			u32 tmp, j = 0;
-			__le32 v;
-			u16 tmp16;
-
-			pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
-					      i);
-			while (j++ < 100) {
-				pci_read_config_word(tp->pdev, vpd_cap +
-						     PCI_VPD_ADDR, &tmp16);
-				if (tmp16 & 0x8000)
-					break;
-				msleep(1);
-			}
-			if (!(tmp16 & 0x8000))
+		ssize_t cnt;
+		unsigned int pos = 0, i = 0;
+
+		for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) {
+			cnt = pci_read_vpd(tp->pdev, pos,
+					   TG3_NVM_VPD_LEN - pos,
+					   &vpd_data[pos]);
+			if (cnt == -ETIMEDOUT || -EINTR)
+				cnt = 0;
+			else if (cnt < 0)
 				goto out_not_found;
-
-			pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
-					      &tmp);
-			v = cpu_to_le32(tmp);
-			memcpy(&vpd_data[i], &v, sizeof(v));
 		}
+		if (pos != TG3_NVM_VPD_LEN)
+			goto out_not_found;
 	}
 
 	/* Now parse and find the part number. */
-	for (i = 0; i < 254; ) {
+	for (i = 0; i < TG3_NVM_VPD_LEN - 2; ) {
 		unsigned char val = vpd_data[i];
 		unsigned int block_end;
 
@@ -12077,7 +12491,7 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
 			      (vpd_data[i + 2] << 8)));
 		i += 3;
 
-		if (block_end > 256)
+		if (block_end > TG3_NVM_VPD_LEN)
 			goto out_not_found;
 
 		while (i < (block_end - 2)) {
@@ -12086,7 +12500,8 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
 				int partno_len = vpd_data[i + 2];
 
 				i += 3;
-				if (partno_len > 24 || (partno_len + i) > 256)
+				if (partno_len > TG3_BPN_SIZE ||
+				    (partno_len + i) > TG3_NVM_VPD_LEN)
 					goto out_not_found;
 
 				memcpy(tp->board_part_number,
@@ -12117,6 +12532,8 @@ out_not_found:
 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
 		 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
 		strcpy(tp->board_part_number, "BCM57788");
+	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+		strcpy(tp->board_part_number, "BCM57765");
 	else
 		strcpy(tp->board_part_number, "none");
 }
@@ -12400,13 +12817,21 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
 		u32 prod_id_asic_rev;
 
-		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717C ||
-		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717S ||
-		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718C ||
-		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718S)
+		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
+		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
+		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724)
 			pci_read_config_dword(tp->pdev,
 					      TG3PCI_GEN2_PRODID_ASICREV,
 					      &prod_id_asic_rev);
+		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
+			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
+			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
+			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
+			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
+			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
+			pci_read_config_dword(tp->pdev,
+					      TG3PCI_GEN15_PRODID_ASICREV,
+					      &prod_id_asic_rev);
 		else
 			pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
 					      &prod_id_asic_rev);
@@ -12560,7 +12985,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
 		tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
 
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
@@ -12586,6 +13012,30 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
 			tp->dev->features |= NETIF_F_IPV6_CSUM;
 	}
 
+	/* Determine TSO capabilities */
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+		tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
+	else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
+		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+		tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
+	else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
+		tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
+		    tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
+			tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
+	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
+		   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
+		   tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
+		tp->tg3_flags2 |= TG3_FLG2_TSO_BUG;
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
+			tp->fw_needed = FIRMWARE_TG3TSO5;
+		else
+			tp->fw_needed = FIRMWARE_TG3TSO;
+	}
+
+	tp->irq_max = 1;
+
 	if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
 		tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
 		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
@@ -12597,29 +13047,31 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
 
 		if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
-			tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
 			tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
-		} else {
-			tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
-			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
-				ASIC_REV_5750 &&
-	     		    tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
-				tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
 		}
-	}
 
-	tp->irq_max = 1;
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+			tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
+			tp->irq_max = TG3_IRQ_MAX_VECS;
+		}
+	}
 
-#ifdef TG3_NAPI
-	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
-		tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
-		tp->irq_max = TG3_IRQ_MAX_VECS;
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+		tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
+	else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
+		tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
+		tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
 	}
-#endif
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+		tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
 
 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
 	     (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+		 (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
 		tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
 
 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
@@ -12812,7 +13264,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
 		tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
 
 	/* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
@@ -12891,7 +13344,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
 	    !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
-	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
+	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
@@ -12926,11 +13380,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
 		tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
 
-	if ((tp->pci_chip_rev_id == CHIPREV_ID_57780_A1 &&
-	     tr32(RCVLPC_STATS_ENABLE) & RCVLPC_STATSENAB_ASF_FIX) ||
-	    tp->pci_chip_rev_id == CHIPREV_ID_57780_A0)
-		tp->tg3_flags3 |= TG3_FLG3_TOGGLE_10_100_L1PLLPD;
-
 	err = tg3_mdio_init(tp);
 	if (err)
 		return err;
@@ -13220,6 +13669,12 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
 #endif
 #endif
 
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
+		goto out;
+	}
+
 	if (!goal)
 		goto out;
 
@@ -13414,7 +13869,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
 {
 	dma_addr_t buf_dma;
 	u32 *buf, saved_dma_rwctrl;
-	int ret;
+	int ret = 0;
 
 	buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
 	if (!buf) {
@@ -13427,6 +13882,10 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
 
 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
 
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+		goto out;
+
 	if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
 		/* DMA read watermark not used on PCIE */
 		tp->dma_rwctrl |= 0x00180000;
@@ -13499,7 +13958,6 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
 	tg3_switch_clocks(tp);
 #endif
 
-	ret = 0;
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
 		goto out;
@@ -13618,7 +14076,8 @@ static void __devinit tg3_init_link_config(struct tg3 *tp)
 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
 {
 	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS &&
-	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
+	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
 		tp->bufmgr_config.mbuf_read_dma_low_water =
 			DEFAULT_MB_RDMA_LOW_WATER_5705;
 		tp->bufmgr_config.mbuf_mac_rx_low_water =
@@ -13678,6 +14137,7 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
 	case PHY_ID_BCM5756:	return "5722/5756";
 	case PHY_ID_BCM5906:	return "5906";
 	case PHY_ID_BCM5761:	return "5761";
+	case PHY_ID_BCM5717:	return "5717";
 	case PHY_ID_BCM8002:	return "8002/serdes";
 	case 0:			return "serdes";
 	default:		return "unknown";
@@ -13919,51 +14379,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
 
-	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
-	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
-	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
-	for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
-		struct tg3_napi *tnapi = &tp->napi[i];
-
-		tnapi->tp = tp;
-		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
-
-		tnapi->int_mbox = intmbx;
-		if (i < 4)
-			intmbx += 0x8;
-		else
-			intmbx += 0x4;
-
-		tnapi->consmbox = rcvmbx;
-		tnapi->prodmbox = sndmbx;
-
-		if (i)
-			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
-		else
-			tnapi->coal_now = HOSTCC_MODE_NOW;
-
-		if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
-			break;
-
-		/*
-		 * If we support MSIX, we'll be using RSS.  If we're using
-		 * RSS, the first vector only handles link interrupts and the
-		 * remaining vectors handle rx and tx interrupts.  Reuse the
-		 * mailbox values for the next iteration.  The values we setup
-		 * above are still useful for the single vectored mode.
-		 */
-		if (!i)
-			continue;
-
-		rcvmbx += 0x8;
-
-		if (sndmbx & 0x4)
-			sndmbx -= 0x4;
-		else
-			sndmbx += 0xc;
-	}
-
-	netif_napi_add(dev, &tp->napi[0].napi, tg3_poll, 64);
 	dev->ethtool_ops = &tg3_ethtool_ops;
 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
 	dev->irq = pdev->irq;
@@ -13975,8 +14390,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
 		goto err_out_iounmap;
 	}
 
-	if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+	if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
+	    tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
 		dev->netdev_ops = &tg3_netdev_ops;
 	else
 		dev->netdev_ops = &tg3_netdev_ops_dma_bug;
@@ -14023,46 +14438,39 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
 
 	tg3_init_bufmgr_config(tp);
 
-	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
-		tp->fw_needed = FIRMWARE_TG3;
-
-	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
+	/* Selectively allow TSO based on operating conditions */
+	if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
+	    (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)))
 		tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
+	else {
+		tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG);
+		tp->fw_needed = NULL;
 	}
-	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
-	    tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
-	    (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
-		tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
-	} else {
-		tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
-		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
-			tp->fw_needed = FIRMWARE_TG3TSO5;
-		else
-			tp->fw_needed = FIRMWARE_TG3TSO;
-	}
+
+	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
+		tp->fw_needed = FIRMWARE_TG3;
 
 	/* TSO is on by default on chips that support hardware TSO.
 	 * Firmware TSO on older chips gives lower performance, so it
 	 * is off by default, but can be enabled using ethtool.
 	 */
-	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
-		if (dev->features & NETIF_F_IP_CSUM)
-			dev->features |= NETIF_F_TSO;
-		if ((dev->features & NETIF_F_IPV6_CSUM) &&
-		    (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2))
+	if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
+	    (dev->features & NETIF_F_IP_CSUM))
+		dev->features |= NETIF_F_TSO;
+
+	if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
+	    (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
+		if (dev->features & NETIF_F_IPV6_CSUM)
 			dev->features |= NETIF_F_TSO6;
-		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
+		if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
+		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
 		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
 		     GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
 			GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
-		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
-		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
 			dev->features |= NETIF_F_TSO_ECN;
 	}
 
-
 	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
 	    !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
@@ -14074,7 +14482,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
 	if (err) {
 		printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
 		       "aborting.\n");
-		goto err_out_fw;
+		goto err_out_iounmap;
 	}
 
 	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
@@ -14083,7 +14491,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
 			printk(KERN_ERR PFX "Cannot map APE registers, "
 			       "aborting.\n");
 			err = -ENOMEM;
-			goto err_out_fw;
+			goto err_out_iounmap;
 		}
 
 		tg3_ape_lock_init(tp);
@@ -14113,6 +14521,53 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
 	tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
 
+	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
+	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
+	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
+	for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+
+		tnapi->tp = tp;
+		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
+
+		tnapi->int_mbox = intmbx;
+		if (i < 4)
+			intmbx += 0x8;
+		else
+			intmbx += 0x4;
+
+		tnapi->consmbox = rcvmbx;
+		tnapi->prodmbox = sndmbx;
+
+		if (i) {
+			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
+			netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64);
+		} else {
+			tnapi->coal_now = HOSTCC_MODE_NOW;
+			netif_napi_add(dev, &tnapi->napi, tg3_poll, 64);
+		}
+
+		if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
+			break;
+
+		/*
+		 * If we support MSIX, we'll be using RSS.  If we're using
+		 * RSS, the first vector only handles link interrupts and the
+		 * remaining vectors handle rx and tx interrupts.  Reuse the
+		 * mailbox values for the next iteration.  The values we setup
+		 * above are still useful for the single vectored mode.
+		 */
+		if (!i)
+			continue;
+
+		rcvmbx += 0x8;
+
+		if (sndmbx & 0x4)
+			sndmbx -= 0x4;
+		else
+			sndmbx += 0xc;
+	}
+
 	tg3_init_coal(tp);
 
 	pci_set_drvdata(pdev, dev);
@@ -14131,13 +14586,14 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
 	       tg3_bus_string(tp, str),
 	       dev->dev_addr);
 
-	if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
+	if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
+		struct phy_device *phydev;
+		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 		printk(KERN_INFO
 		       "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
-		       tp->dev->name,
-		       tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
-		       dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev));
-	else
+		       tp->dev->name, phydev->drv->name,
+		       dev_name(&phydev->dev));
+	} else
 		printk(KERN_INFO
 		       "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
 		       tp->dev->name, tg3_phy_string(tp),
@@ -14166,10 +14622,6 @@ err_out_apeunmap:
 		tp->aperegs = NULL;
 	}
 
-err_out_fw:
-	if (tp->fw)
-		release_firmware(tp->fw);
-
 err_out_iounmap:
 	if (tp->regs) {
 		iounmap(tp->regs);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index bab7940158e6..cd30889650f8 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -46,10 +46,15 @@
 #define  TG3PCI_DEVICE_TIGON3_57788	 0x1691
 #define  TG3PCI_DEVICE_TIGON3_5785_G	 0x1699 /* GPHY */
 #define  TG3PCI_DEVICE_TIGON3_5785_F	 0x16a0 /* 10/100 only */
-#define  TG3PCI_DEVICE_TIGON3_5717C	 0x1655
-#define  TG3PCI_DEVICE_TIGON3_5717S	 0x1656
-#define  TG3PCI_DEVICE_TIGON3_5718C	 0x1665
-#define  TG3PCI_DEVICE_TIGON3_5718S	 0x1666
+#define  TG3PCI_DEVICE_TIGON3_5717	 0x1655
+#define  TG3PCI_DEVICE_TIGON3_5718	 0x1656
+#define  TG3PCI_DEVICE_TIGON3_5724	 0x165c
+#define  TG3PCI_DEVICE_TIGON3_57781	 0x16b1
+#define  TG3PCI_DEVICE_TIGON3_57785	 0x16b5
+#define  TG3PCI_DEVICE_TIGON3_57761	 0x16b0
+#define  TG3PCI_DEVICE_TIGON3_57765	 0x16b4
+#define  TG3PCI_DEVICE_TIGON3_57791	 0x16b2
+#define  TG3PCI_DEVICE_TIGON3_57795	 0x16b6
 /* 0x04 --> 0x64 unused */
 #define TG3PCI_MSI_DATA			0x00000064
 /* 0x66 --> 0x68 unused */
@@ -103,6 +108,7 @@
 #define  CHIPREV_ID_5906_A1		 0xc001
 #define  CHIPREV_ID_57780_A0		 0x57780000
 #define  CHIPREV_ID_57780_A1		 0x57780001
+#define  CHIPREV_ID_5717_A0		 0x05717000
 #define  GET_ASIC_REV(CHIP_REV_ID)	((CHIP_REV_ID) >> 12)
 #define   ASIC_REV_5700			 0x07
 #define   ASIC_REV_5701			 0x00
@@ -122,6 +128,7 @@
 #define   ASIC_REV_5785			 0x5785
 #define   ASIC_REV_57780		 0x57780
 #define   ASIC_REV_5717			 0x5717
+#define   ASIC_REV_57765		 0x57785
 #define  GET_CHIP_REV(CHIP_REV_ID)	((CHIP_REV_ID) >> 8)
 #define   CHIPREV_5700_AX		 0x70
 #define   CHIPREV_5700_BX		 0x71
@@ -141,8 +148,7 @@
 #define   METAL_REV_B1			 0x01
 #define   METAL_REV_B2			 0x02
 #define TG3PCI_DMA_RW_CTRL		0x0000006c
-#define  DMA_RWCTRL_MIN_DMA		 0x000000ff
-#define  DMA_RWCTRL_MIN_DMA_SHIFT	 0
+#define  DMA_RWCTRL_DIS_CACHE_ALIGNMENT  0x00000001
 #define  DMA_RWCTRL_READ_BNDRY_MASK	 0x00000700
 #define  DMA_RWCTRL_READ_BNDRY_DISAB	 0x00000000
 #define  DMA_RWCTRL_READ_BNDRY_16	 0x00000100
@@ -221,6 +227,7 @@
 /* 0xc0 --> 0xf4 unused */
 
 #define TG3PCI_GEN2_PRODID_ASICREV	0x000000f4
+#define TG3PCI_GEN15_PRODID_ASICREV	0x000000fc
 /* 0xf8 --> 0x200 unused */
 
 #define TG3_CORR_ERR_STAT		0x00000110
@@ -242,7 +249,11 @@
 #define MAILBOX_GENERAL_7		0x00000258 /* 64-bit */
 #define MAILBOX_RELOAD_STAT		0x00000260 /* 64-bit */
 #define MAILBOX_RCV_STD_PROD_IDX	0x00000268 /* 64-bit */
+#define TG3_RX_STD_PROD_IDX_REG		(MAILBOX_RCV_STD_PROD_IDX + \
+					 TG3_64BIT_REG_LOW)
 #define MAILBOX_RCV_JUMBO_PROD_IDX	0x00000270 /* 64-bit */
+#define TG3_RX_JMB_PROD_IDX_REG		(MAILBOX_RCV_JUMBO_PROD_IDX + \
+					 TG3_64BIT_REG_LOW)
 #define MAILBOX_RCV_MINI_PROD_IDX	0x00000278 /* 64-bit */
 #define MAILBOX_RCVRET_CON_IDX_0	0x00000280 /* 64-bit */
 #define MAILBOX_RCVRET_CON_IDX_1	0x00000288 /* 64-bit */
@@ -1264,8 +1275,9 @@
 #define  WDMAC_MODE_FIFOURUN_ENAB	 0x00000080
 #define  WDMAC_MODE_FIFOOREAD_ENAB	 0x00000100
 #define  WDMAC_MODE_LNGREAD_ENAB	 0x00000200
-#define  WDMAC_MODE_RX_ACCEL	 	 0x00000400
+#define  WDMAC_MODE_RX_ACCEL		 0x00000400
 #define  WDMAC_MODE_STATUS_TAG_FIX	 0x20000000
+#define  WDMAC_MODE_BURST_ALL_DATA	 0xc0000000
 #define WDMAC_STATUS			0x00004c04
 #define  WDMAC_STATUS_TGTABORT		 0x00000004
 #define  WDMAC_STATUS_MSTABORT		 0x00000008
@@ -1809,6 +1821,11 @@
 
 #define TG3_OTP_DEFAULT			0x286c1640
 
+
+/* Hardware Legacy NVRAM layout */
+#define TG3_NVM_VPD_OFF			0x100
+#define TG3_NVM_VPD_LEN			256
+
 /* Hardware Selfboot NVRAM layout */
 #define TG3_NVM_HWSB_CFG1		0x00000004
 #define  TG3_NVM_HWSB_CFG1_MAJMSK	0xf8000000
@@ -1953,10 +1970,34 @@
 #define  NIC_SRAM_MBUF_POOL_BASE5705	0x00010000
 #define  NIC_SRAM_MBUF_POOL_SIZE5705	0x0000e000
 
+
 /* Currently this is fixed. */
-#define PHY_ADDR		0x01
+#define TG3_PHY_PCIE_ADDR		0x00
+#define TG3_PHY_MII_ADDR		0x01
+
+
+/*** Tigon3 specific PHY PCIE registers. ***/
+
+#define TG3_PCIEPHY_BLOCK_ADDR		0x1f
+#define  TG3_PCIEPHY_XGXS_BLK1		0x0801
+#define  TG3_PCIEPHY_TXB_BLK		0x0861
+#define  TG3_PCIEPHY_BLOCK_SHIFT	4
+
+/* TG3_PCIEPHY_TXB_BLK */
+#define TG3_PCIEPHY_TX0CTRL1		0x15
+#define  TG3_PCIEPHY_TX0CTRL1_TXOCM	0x0003
+#define  TG3_PCIEPHY_TX0CTRL1_RDCTL	0x0008
+#define  TG3_PCIEPHY_TX0CTRL1_TXCMV	0x0030
+#define  TG3_PCIEPHY_TX0CTRL1_TKSEL	0x0040
+#define  TG3_PCIEPHY_TX0CTRL1_NB_EN	0x0400
 
-/* Tigon3 specific PHY MII registers. */
+/* TG3_PCIEPHY_XGXS_BLK1 */
+#define TG3_PCIEPHY_PWRMGMT4		0x1a
+#define TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN	0x0038
+#define TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN	0x4000
+
+
+/*** Tigon3 specific PHY MII registers. ***/
 #define  TG3_BMCR_SPEED1000		0x0040
 
 #define MII_TG3_CTRL			0x09 /* 1000-baseT control register */
@@ -2055,6 +2096,9 @@
 #define MII_TG3_FET_SHDW_MISCCTRL	0x10
 #define  MII_TG3_FET_SHDW_MISCCTRL_MDIX	0x4000
 
+#define MII_TG3_FET_SHDW_AUXMODE4	0x1a
+#define MII_TG3_FET_SHDW_AUXMODE4_SBPD	0x0008
+
 #define MII_TG3_FET_SHDW_AUXSTAT2	0x1b
 #define  MII_TG3_FET_SHDW_AUXSTAT2_APD	0x0020
 
@@ -2410,10 +2454,6 @@ struct ring_info {
 	DECLARE_PCI_UNMAP_ADDR(mapping)
 };
 
-struct tx_ring_info {
-	struct sk_buff			*skb;
-};
-
 struct tg3_config_info {
 	u32				flags;
 };
@@ -2542,8 +2582,10 @@ struct tg3_ethtool_stats {
 };
 
 struct tg3_rx_prodring_set {
-	u32				rx_std_ptr;
-	u32				rx_jmb_ptr;
+	u32				rx_std_prod_idx;
+	u32				rx_std_cons_idx;
+	u32				rx_jmb_prod_idx;
+	u32				rx_jmb_cons_idx;
 	struct tg3_rx_buffer_desc	*rx_std;
 	struct tg3_ext_rx_buffer_desc	*rx_jmb;
 	struct ring_info		*rx_std_buffers;
@@ -2571,10 +2613,11 @@ struct tg3_napi {
 	u32				consmbox;
 	u32				rx_rcb_ptr;
 	u16				*rx_rcb_prod_idx;
+	struct tg3_rx_prodring_set	*prodring;
 
 	struct tg3_rx_buffer_desc	*rx_rcb;
 	struct tg3_tx_buffer_desc	*tx_ring;
-	struct tx_ring_info		*tx_buffers;
+	struct ring_info		*tx_buffers;
 
 	dma_addr_t			status_mapping;
 	dma_addr_t			rx_rcb_mapping;
@@ -2654,7 +2697,7 @@ struct tg3 {
 	struct vlan_group		*vlgrp;
 #endif
 
-	struct tg3_rx_prodring_set	prodring[1];
+	struct tg3_rx_prodring_set	prodring[TG3_IRQ_MAX_VECS - 1];
 
 
 	/* begin "everything else" cacheline(s) section */
@@ -2725,7 +2768,7 @@ struct tg3 {
 #define TG3_FLG2_SERDES_PREEMPHASIS	0x00020000
 #define TG3_FLG2_5705_PLUS		0x00040000
 #define TG3_FLG2_5750_PLUS		0x00080000
-#define TG3_FLG2_PROTECTED_NVRAM	0x00100000
+#define TG3_FLG2_HW_TSO_3		0x00100000
 #define TG3_FLG2_USING_MSI		0x00200000
 #define TG3_FLG2_USING_MSIX		0x00400000
 #define TG3_FLG2_USING_MSI_OR_MSIX	(TG3_FLG2_USING_MSI | \
@@ -2737,7 +2780,9 @@ struct tg3 {
 #define TG3_FLG2_ICH_WORKAROUND		0x02000000
 #define TG3_FLG2_5780_CLASS		0x04000000
 #define TG3_FLG2_HW_TSO_2		0x08000000
-#define TG3_FLG2_HW_TSO			(TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2)
+#define TG3_FLG2_HW_TSO			(TG3_FLG2_HW_TSO_1 | \
+					 TG3_FLG2_HW_TSO_2 | \
+					 TG3_FLG2_HW_TSO_3)
 #define TG3_FLG2_1SHOT_MSI		0x10000000
 #define TG3_FLG2_PHY_JITTER_BUG		0x20000000
 #define TG3_FLG2_NO_FWARE_REPORTED	0x40000000
@@ -2745,6 +2790,7 @@ struct tg3 {
 	u32				tg3_flags3;
 #define TG3_FLG3_NO_NVRAM_ADDR_TRANS	0x00000001
 #define TG3_FLG3_ENABLE_APE		0x00000002
+#define TG3_FLG3_PROTECTED_NVRAM	0x00000004
 #define TG3_FLG3_5701_DMA_BUG		0x00000008
 #define TG3_FLG3_USE_PHYLIB		0x00000010
 #define TG3_FLG3_MDIOBUS_INITED		0x00000020
@@ -2756,9 +2802,13 @@ struct tg3 {
 #define TG3_FLG3_PHY_ENABLE_APD		0x00001000
 #define TG3_FLG3_5755_PLUS		0x00002000
 #define TG3_FLG3_NO_NVRAM		0x00004000
-#define TG3_FLG3_TOGGLE_10_100_L1PLLPD	0x00008000
 #define TG3_FLG3_PHY_IS_FET		0x00010000
 #define TG3_FLG3_ENABLE_RSS		0x00020000
+#define TG3_FLG3_ENABLE_TSS		0x00040000
+#define TG3_FLG3_4G_DMA_BNDRY_BUG	0x00080000
+#define TG3_FLG3_40BIT_DMA_LIMIT_BUG	0x00100000
+#define TG3_FLG3_SHORT_DMA_BUG		0x00200000
+#define TG3_FLG3_USE_JUMBO_BDFLAG	0x00400000
 
 	struct timer_list		timer;
 	u16				timer_counter;
@@ -2825,6 +2875,7 @@ struct tg3 {
 #define PHY_ID_BCM5756			0xbc050ed0
 #define PHY_ID_BCM5784			0xbc050fa0
 #define PHY_ID_BCM5761			0xbc050fd0
+#define PHY_ID_BCM5717			0x5c0d8a00
 #define PHY_ID_BCM5906			0xdc00ac40
 #define PHY_ID_BCM8002			0x60010140
 #define PHY_ID_INVALID			0xffffffff
@@ -2834,6 +2885,7 @@ struct tg3 {
 #define PHY_REV_BCM5401_C0		0x6
 #define PHY_REV_BCM5411_X0		0x1 /* Found on Netgear GA302T */
 #define TG3_PHY_ID_BCM50610		0x143bd60
+#define TG3_PHY_ID_BCM50610M	0x143bd70
 #define TG3_PHY_ID_BCMAC131		0x143bc70
 #define TG3_PHY_ID_RTL8211C		0x001cc910
 #define TG3_PHY_ID_RTL8201E		0x00008200
@@ -2846,8 +2898,9 @@ struct tg3 {
 	u32				led_ctrl;
 	u32				phy_otp;
 
-	char				board_part_number[24];
-#define TG3_VER_SIZE 32
+#define TG3_BPN_SIZE			24
+	char				board_part_number[TG3_BPN_SIZE];
+#define TG3_VER_SIZE			ETHTOOL_FWVERS_LEN
 	char				fw_ver[TG3_VER_SIZE];
 	u32				nic_sram_data_cfg;
 	u32				pci_clock_ctrl;
@@ -2865,7 +2918,7 @@ struct tg3 {
 	 (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \
 	 (X) == PHY_ID_BCM5755 || (X) == PHY_ID_BCM5756 || \
 	 (X) == PHY_ID_BCM5906 || (X) == PHY_ID_BCM5761 || \
-	 (X) == PHY_ID_BCM8002)
+	 (X) == PHY_ID_BCM5717 || (X) == PHY_ID_BCM8002)
 
 	struct tg3_hw_stats		*hw_stats;
 	dma_addr_t			stats_mapping;
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index 3d31b47332bb..fabaeffb3155 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -1549,7 +1549,8 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
 		if (tmpCStat & TLAN_CSTAT_EOC)
 			eoc = 1;
 
-		new_skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 );
+		new_skb = netdev_alloc_skb_ip_align(dev,
+						    TLAN_MAX_FRAME_SIZE + 5);
 		if ( !new_skb )
 			goto drop_and_reuse;
 
@@ -1563,7 +1564,6 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
 		skb->protocol = eth_type_trans( skb, dev );
 		netif_rx( skb );
 
-		skb_reserve( new_skb, NET_IP_ALIGN );
 		head_list->buffer[0].address = pci_map_single(priv->pciDev,
 							      new_skb->data,
 							      TLAN_MAX_FRAME_SIZE,
@@ -1755,8 +1755,8 @@ static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
 			     ! ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
                 		tlphy_ctl |= TLAN_TC_SWAPOL;
                 		TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
-        		} else if ( ( tlphy_sts & TLAN_TS_POLOK )
-				    && ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
+			} else if ( ( tlphy_sts & TLAN_TS_POLOK ) &&
+				    ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
                 		tlphy_ctl &= ~TLAN_TC_SWAPOL;
                 		TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
         		}
@@ -1967,13 +1967,12 @@ static void TLan_ResetLists( struct net_device *dev )
 		list->cStat = TLAN_CSTAT_READY;
 		list->frameSize = TLAN_MAX_FRAME_SIZE;
 		list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
-		skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 );
+		skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
 		if ( !skb ) {
 			pr_err("TLAN: out of memory for received data.\n" );
 			break;
 		}
 
-		skb_reserve( skb, NET_IP_ALIGN );
 		list->buffer[0].address = pci_map_single(priv->pciDev,
 							 skb->data,
 							 TLAN_MAX_FRAME_SIZE,
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 724158966ec1..cf552d1d9629 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -610,9 +610,8 @@ static int xl_open(struct net_device *dev)
 
 	u16 switchsettings, switchsettings_eeprom  ;
  
-	if(request_irq(dev->irq, &xl_interrupt, IRQF_SHARED , "3c359", dev)) {
+	if (request_irq(dev->irq, xl_interrupt, IRQF_SHARED , "3c359", dev))
 		return -EAGAIN;
-	}
 
 	/* 
 	 * Read the information from the EEPROM that we need.
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 75fa32e34fd0..5db0270957ac 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -680,7 +680,7 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
 
 	/* The PCMCIA has already got the interrupt line and the io port, 
 	   so no chance of anybody else getting it - MLP */
-	if (request_irq(dev->irq = irq, &tok_interrupt, 0, "ibmtr", dev) != 0) {
+	if (request_irq(dev->irq = irq, tok_interrupt, 0, "ibmtr", dev) != 0) {
 		DPRINTK("Could not grab irq %d.  Halting Token Ring driver.\n",
 					irq);
 		iounmap(t_mmio);
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 26dca2b2bdbd..d6ccd59c7d07 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -596,7 +596,7 @@ static int streamer_open(struct net_device *dev)
 	        rc=streamer_reset(dev);
 	}
 
-	if (request_irq(dev->irq, &streamer_interrupt, IRQF_SHARED, "lanstreamer", dev)) {
+	if (request_irq(dev->irq, streamer_interrupt, IRQF_SHARED, "lanstreamer", dev)) {
 		return -EAGAIN;
 	}
 #if STREAMER_DEBUG
@@ -712,8 +712,8 @@ static int streamer_open(struct net_device *dev)
 					strcat(open_error, " - ");
 					strcat(open_error, open_min_error[(error_code & 0x0f)]);
 
-					if (!streamer_priv->streamer_ring_speed
-					    && ((error_code & 0x0f) == 0x0d)) 
+					if (!streamer_priv->streamer_ring_speed &&
+					    ((error_code & 0x0f) == 0x0d))
 					{
 						printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n", dev->name);
 						printk(KERN_WARNING "%s: Please try again with a specified ring speed \n", dev->name);
@@ -1032,8 +1032,8 @@ static irqreturn_t streamer_interrupt(int irq, void *dev_id)
 	sisr = readw(streamer_mmio + SISR);
 
 	while((sisr & (SISR_MI | SISR_SRB_REPLY | SISR_ADAPTER_CHECK | SISR_ASB_FREE | 
-		       SISR_ARB_CMD | SISR_TRB_REPLY | SISR_PAR_ERR | SISR_SERR_ERR))
-               && (max_intr > 0)) {
+		       SISR_ARB_CMD | SISR_TRB_REPLY | SISR_PAR_ERR | SISR_SERR_ERR)) &&
+	      (max_intr > 0)) {
 
 		if(sisr & SISR_PAR_ERR) {
 			writew(~SISR_PAR_ERR, streamer_mmio + SISR_RUM);
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index d9ec7f0bbd0a..df32025c5132 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -445,9 +445,9 @@ static int olympic_open(struct net_device *dev)
 
 	olympic_init(dev);
 
-	if(request_irq(dev->irq, &olympic_interrupt, IRQF_SHARED , "olympic", dev)) {
+	if (request_irq(dev->irq, olympic_interrupt, IRQF_SHARED , "olympic",
+			dev))
 		return -EAGAIN;
-	}
 
 #if OLYMPIC_DEBUG
 	printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index ebda61bc4c2f..427a8970b6fe 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -2309,9 +2309,9 @@ static irqreturn_t smctr_interrupt(int irq, void *dev_id)
                                 else
                                 {
                                         if((tp->acb_head->cmd
-                                                == ACB_CMD_READ_TRC_STATUS)
-                                                && (tp->acb_head->subcmd
-                                                == RW_TRC_STATUS_BLOCK))
+					    == ACB_CMD_READ_TRC_STATUS) &&
+					   (tp->acb_head->subcmd
+					    == RW_TRC_STATUS_BLOCK))
                                         {
                                                 if(tp->ptr_bcn_type)
                                                 {
@@ -2331,8 +2331,8 @@ static irqreturn_t smctr_interrupt(int irq, void *dev_id)
                                                         smctr_disable_16bit(dev);
                                                         err = smctr_ring_status_chg(dev);
                                                         smctr_enable_16bit(dev);
-                                                        if((tp->ring_status & REMOVE_RECEIVED)
-                                                                && (tp->config_word0 & NO_AUTOREMOVE))
+                                                        if((tp->ring_status & REMOVE_RECEIVED) &&
+							   (tp->config_word0 & NO_AUTOREMOVE))
                                                         {
                                                                 smctr_issue_remove_cmd(dev);
                                                         }
@@ -2511,9 +2511,9 @@ static int smctr_issue_init_timers_cmd(struct net_device *dev)
         tp->config_word0 = THDREN | DMA_TRIGGER | USETPT | NO_AUTOREMOVE;
         tp->config_word1 = 0;
 
-        if((tp->media_type == MEDIA_STP_16)
-                || (tp->media_type == MEDIA_UTP_16)
-                || (tp->media_type == MEDIA_STP_16_UTP_16))
+        if((tp->media_type == MEDIA_STP_16) ||
+	   (tp->media_type == MEDIA_UTP_16) ||
+	   (tp->media_type == MEDIA_STP_16_UTP_16))
         {
                 tp->config_word0 |= FREQ_16MB_BIT;
         }
@@ -2556,9 +2556,9 @@ static int smctr_issue_init_timers_cmd(struct net_device *dev)
                         tp->config_word1 &= ~SOURCE_ROUTING_SPANNING_BITS;
         }
 
-        if((tp->media_type == MEDIA_STP_16)
-                || (tp->media_type == MEDIA_UTP_16)
-                || (tp->media_type == MEDIA_STP_16_UTP_16))
+        if((tp->media_type == MEDIA_STP_16) ||
+	   (tp->media_type == MEDIA_UTP_16) ||
+	   (tp->media_type == MEDIA_STP_16_UTP_16))
         {
                 tp->config_word1 |= INTERFRAME_SPACING_16;
         }
@@ -2568,9 +2568,9 @@ static int smctr_issue_init_timers_cmd(struct net_device *dev)
         *pTimer_Struc++ = tp->config_word0;
         *pTimer_Struc++ = tp->config_word1;
 
-        if((tp->media_type == MEDIA_STP_4)
-                || (tp->media_type == MEDIA_UTP_4)
-                || (tp->media_type == MEDIA_STP_4_UTP_4))
+        if((tp->media_type == MEDIA_STP_4) ||
+	   (tp->media_type == MEDIA_UTP_4) ||
+	   (tp->media_type == MEDIA_STP_4_UTP_4))
         {
                 *pTimer_Struc++ = 0x00FA;       /* prescale */
                 *pTimer_Struc++ = 0x2710;       /* TPT_limit */
@@ -2990,8 +2990,8 @@ static int smctr_load_firmware(struct net_device *dev)
 	}
 
         /* Verify the firmware exists and is there in the right amount. */
-        if (!fw->data
-                || (*(fw->data + UCODE_VERSION_OFFSET) < UCODE_VERSION))
+        if (!fw->data ||
+	    (*(fw->data + UCODE_VERSION_OFFSET) < UCODE_VERSION))
         {
                 err = (UCODE_NOT_PRESENT);
 		goto out;
@@ -3010,9 +3010,8 @@ static int smctr_load_firmware(struct net_device *dev)
         smctr_enable_16bit(dev);
         smctr_set_page(dev, (__u8 *)tp->ram_access);
 
-        if((smctr_checksum_firmware(dev))
-                || (*(fw->data + UCODE_VERSION_OFFSET)
-                > tp->microcode_version))
+        if((smctr_checksum_firmware(dev)) ||
+	   (*(fw->data + UCODE_VERSION_OFFSET) > tp->microcode_version))
         {
                 smctr_enable_adapter_ctrl_store(dev);
 
@@ -3117,9 +3116,9 @@ static int smctr_lobe_media_test(struct net_device *dev)
         }
 
         /* Check if any frames received during test. */
-        if((tp->rx_fcb_curr[MAC_QUEUE]->frame_status)
-                || (tp->rx_fcb_curr[NON_MAC_QUEUE]->frame_status))
-			goto err;
+        if((tp->rx_fcb_curr[MAC_QUEUE]->frame_status) ||
+	   (tp->rx_fcb_curr[NON_MAC_QUEUE]->frame_status))
+		goto err;
 
         /* Set receive mask to "Promisc" mode. */
         tp->receive_mask = saved_rcv_mask;
@@ -3303,8 +3302,8 @@ static int smctr_make_group_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv)
         /* Set Group Address Sub-vector to all zeros if only the
          * Group Address/Functional Address Indicator is set.
          */
-        if(tsv->svv[0] == 0x80 && tsv->svv[1] == 0x00
-        	&& tsv->svv[2] == 0x00 && tsv->svv[3] == 0x00)
+        if(tsv->svv[0] == 0x80 && tsv->svv[1] == 0x00 &&
+	   tsv->svv[2] == 0x00 && tsv->svv[3] == 0x00)
                 tsv->svv[0] = 0x00;
 
         return (0);
@@ -3876,10 +3875,10 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
         /* NOTE: UNKNOWN MAC frames will NOT be passed up unless
          * ACCEPT_ATT_MAC_FRAMES is set.
          */
-        if(((tp->receive_mask & ACCEPT_ATT_MAC_FRAMES)
-                && (xframe == (__u8)0))
-                || ((tp->receive_mask & ACCEPT_EXT_MAC_FRAMES)
-                && (xframe == (__u8)1)))
+        if(((tp->receive_mask & ACCEPT_ATT_MAC_FRAMES) &&
+	    (xframe == (__u8)0)) ||
+	   ((tp->receive_mask & ACCEPT_EXT_MAC_FRAMES) &&
+	    (xframe == (__u8)1)))
         {
                 rmf->vl = SWAP_BYTES(rmf->vl);
 
@@ -3934,8 +3933,8 @@ static int smctr_ram_memory_test(struct net_device *dev)
 
                 word_pattern = start_pattern;
 
-                for(j = 1; j < (__u32)(tp->ram_usable * 1024) - 1
-                        && (~err); j += 2, word_pattern++)
+                for(j = 1; j < (__u32)(tp->ram_usable * 1024) - 1 && (~err);
+		    j += 2, word_pattern++)
                 {
                         word_read = *(__u16 *)(pword + j);
                         if(word_read != word_pattern)
@@ -3959,8 +3958,7 @@ static int smctr_ram_memory_test(struct net_device *dev)
                 for(j = 0; j < (__u32)tp->ram_usable * 1024; j +=2)
                         *(__u16 *)(pword + j) = word_pattern;
 
-                for(j =0; j < (__u32)tp->ram_usable * 1024
-                        && (~err); j += 2)
+                for(j =0; j < (__u32)tp->ram_usable * 1024 && (~err); j += 2)
                 {
                         word_read = *(__u16 *)(pword + j);
                         if(word_read != word_pattern)
@@ -4325,8 +4323,8 @@ static int smctr_restart_tx_chain(struct net_device *dev, short queue)
         if(smctr_debug > 10)
                 printk(KERN_DEBUG "%s: smctr_restart_tx_chain\n", dev->name);
 
-        if(tp->num_tx_fcbs_used[queue] != 0
-                && tp->tx_queue_status[queue] == NOT_TRANSMITING)
+        if(tp->num_tx_fcbs_used[queue] != 0 &&
+	   tp->tx_queue_status[queue] == NOT_TRANSMITING)
         {
                 tp->tx_queue_status[queue] = TRANSMITING;
                 err = smctr_issue_resume_tx_fcb_cmd(dev, queue);
@@ -4349,8 +4347,8 @@ static int smctr_ring_status_chg(struct net_device *dev)
          */
         if(tp->ring_status_flags == MONITOR_STATE_CHANGED)
         {
-                if((tp->monitor_state == MS_ACTIVE_MONITOR_STATE)
-                        || (tp->monitor_state == MS_STANDBY_MONITOR_STATE))
+                if((tp->monitor_state == MS_ACTIVE_MONITOR_STATE) ||
+		   (tp->monitor_state == MS_STANDBY_MONITOR_STATE))
                 {
                         tp->monitor_state_ready = 1;
                 }
@@ -4363,8 +4361,8 @@ static int smctr_ring_status_chg(struct net_device *dev)
                         tp->monitor_state_ready = 0;
 
 			/* Ring speed problem, switching to auto mode. */
-			if(tp->monitor_state == MS_MONITOR_FSM_INACTIVE
-				&& !tp->cleanup)
+			if(tp->monitor_state == MS_MONITOR_FSM_INACTIVE &&
+			   !tp->cleanup)
 			{
 				printk(KERN_INFO "%s: Incorrect ring speed switching.\n",
 					dev->name);
@@ -4442,8 +4440,8 @@ static int smctr_rx_frame(struct net_device *dev)
         {
                 err = HARDWARE_FAILED;
 
-                if(((status & 0x007f) == 0)
-                        || ((tp->receive_mask & ACCEPT_ERR_PACKETS) != 0))
+                if(((status & 0x007f) == 0) ||
+		   ((tp->receive_mask & ACCEPT_ERR_PACKETS) != 0))
                 {
                         /* frame length less the CRC (4 bytes) + FS (1 byte) */
                         rx_size = tp->rx_fcb_curr[queue]->frame_length - 5;
@@ -4538,8 +4536,8 @@ static int smctr_send_dat(struct net_device *dev)
         }
 
         /* Check if GOOD frame Tx'ed. */
-        if(!(fcb->frame_status &  FCB_COMMAND_DONE)
-                || fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS))
+        if(!(fcb->frame_status &  FCB_COMMAND_DONE) ||
+	   fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS))
         {
                 return (INITIALIZE_FAILED);
         }
@@ -4653,8 +4651,8 @@ static int smctr_send_lobe_media_test(struct net_device *dev)
         }
 
         /* Check if GOOD frame Tx'ed */
-        if(!(fcb->frame_status & FCB_COMMAND_DONE)
-                || fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS))
+        if(!(fcb->frame_status & FCB_COMMAND_DONE) ||
+	   fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS))
         {
                 return (LOBE_MEDIA_TEST_FAILED);
         }
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index a7b6888829b5..e3c42f5ac4a9 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -729,8 +729,8 @@ static void tms380tr_timer_chk(unsigned long data)
 		return;
 
 	tms380tr_chk_outstanding_cmds(dev);
-	if(time_before(tp->LastSendTime + SEND_TIMEOUT, jiffies)
-		&& (tp->TplFree != tp->TplBusy))
+	if(time_before(tp->LastSendTime + SEND_TIMEOUT, jiffies) &&
+	   (tp->TplFree != tp->TplBusy))
 	{
 		/* Anything to send, but stalled too long */
 		tp->LastSendTime = jiffies;
@@ -830,8 +830,8 @@ irqreturn_t tms380tr_interrupt(int irq, void *dev_id)
 		}
 
 		/* Reset system interrupt if not already done. */
-		if(irq_type != STS_IRQ_TRANSMIT_STATUS
-			&& irq_type != STS_IRQ_RECEIVE_STATUS) {
+		if(irq_type != STS_IRQ_TRANSMIT_STATUS &&
+		   irq_type != STS_IRQ_RECEIVE_STATUS) {
 			tms380tr_reset_interrupt(dev);
 		}
 
@@ -895,10 +895,10 @@ static unsigned char tms380tr_chk_ssb(struct net_local *tp, unsigned short IrqTy
 
 	/* Check if this interrupt does use the SSB. */
 
-	if(IrqType != STS_IRQ_TRANSMIT_STATUS
-		&& IrqType != STS_IRQ_RECEIVE_STATUS
-		&& IrqType != STS_IRQ_COMMAND_STATUS
-		&& IrqType != STS_IRQ_RING_STATUS)
+	if(IrqType != STS_IRQ_TRANSMIT_STATUS &&
+	   IrqType != STS_IRQ_RECEIVE_STATUS &&
+	   IrqType != STS_IRQ_COMMAND_STATUS &&
+	   IrqType != STS_IRQ_RING_STATUS)
 	{
 		return (1);	/* SSB not involved. */
 	}
@@ -1364,6 +1364,8 @@ static int tms380tr_reset_adapter(struct net_device *dev)
 	return (-1);
 }
 
+MODULE_FIRMWARE("tms380tr.bin");
+
 /*
  * Starts bring up diagnostics of token ring adapter and evaluates
  * diagnostic results.
@@ -1483,8 +1485,8 @@ static int tms380tr_init_adapter(struct net_device *dev)
 			/* Mask interesting status bits */
 			Status = SIFREADW(SIFSTS);
 			Status &= STS_MASK;
-		} while(((Status &(STS_INITIALIZE | STS_ERROR | STS_TEST)) != 0)
-			&& ((Status & STS_ERROR) == 0) && (loop_cnt != 0));
+		} while(((Status &(STS_INITIALIZE | STS_ERROR | STS_TEST)) != 0) &&
+			((Status & STS_ERROR) == 0) && (loop_cnt != 0));
 
 		if((Status & (STS_INITIALIZE | STS_ERROR | STS_TEST)) == 0)
 		{
@@ -2181,8 +2183,8 @@ static void tms380tr_rcv_status_irq(struct net_device *dev)
 				}
 			}
 
-			if(skb && (rpl->SkbStat == SKB_DATA_COPY
-				|| rpl->SkbStat == SKB_DMA_DIRECT))
+			if(skb && (rpl->SkbStat == SKB_DATA_COPY ||
+				   rpl->SkbStat == SKB_DMA_DIRECT))
 			{
 				if(rpl->SkbStat == SKB_DATA_COPY)
 					skb_copy_to_linear_data(skb, ReceiveDataPtr,
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index 7030bd5e9848..a69c4a48bab9 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -802,13 +802,11 @@ static int tsi108_refill_rx(struct net_device *dev, int budget)
 		int rx = data->rxhead;
 		struct sk_buff *skb;
 
-		data->rxskbs[rx] = skb = netdev_alloc_skb(dev,
-							  TSI108_RXBUF_SIZE + 2);
+		skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE);
+		data->rxskbs[rx] = skb;
 		if (!skb)
 			break;
 
-		skb_reserve(skb, 2); /* Align the data on a 4-byte boundary. */
-
 		data->rxring[rx].buf0 = dma_map_single(NULL, skb->data,
 							TSI108_RX_SKB_SIZE,
 							DMA_FROM_DEVICE);
@@ -1356,7 +1354,7 @@ static int tsi108_open(struct net_device *dev)
 	for (i = 0; i < TSI108_RXRING_LEN; i++) {
 		struct sk_buff *skb;
 
-		skb = netdev_alloc_skb(dev, TSI108_RXBUF_SIZE + NET_IP_ALIGN);
+		skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE);
 		if (!skb) {
 			/* Bah.  No memory for now, but maybe we'll get
 			 * some more later.
@@ -1370,8 +1368,6 @@ static int tsi108_open(struct net_device *dev)
 		}
 
 		data->rxskbs[i] = skb;
-		/* Align the payload on a 4-byte boundary */
-		skb_reserve(skb, 2);
 		data->rxskbs[i] = skb;
 		data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
 		data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c
index db7d5e11855d..9f6742fad6ca 100644
--- a/drivers/net/tulip/21142.c
+++ b/drivers/net/tulip/21142.c
@@ -209,10 +209,10 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
 			printk(KERN_DEBUG "%s:  Setting CSR6 %8.8x/%x CSR12 %8.8x.\n",
 				   dev->name, tp->csr6, ioread32(ioaddr + CSR6),
 				   ioread32(ioaddr + CSR12));
-	} else if ((tp->nwayset  &&  (csr5 & 0x08000000)
-				&& (dev->if_port == 3  ||  dev->if_port == 5)
-				&& (csr12 & 2) == 2) ||
-			   (tp->nway && (csr5 & (TPLnkFail)))) {
+	} else if ((tp->nwayset  &&  (csr5 & 0x08000000) &&
+		    (dev->if_port == 3  ||  dev->if_port == 5) &&
+		    (csr12 & 2) == 2) ||
+		   (tp->nway && (csr5 & (TPLnkFail)))) {
 		/* Link blew? Maybe restart NWay. */
 		del_timer_sync(&tp->timer);
 		t21142_start_nway(dev);
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 74e5ba42d38d..d4255d44cb75 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -62,9 +62,9 @@ module_param (debug, int, 0);
 MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
 
 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
-#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
-        || defined(CONFIG_SPARC) || defined(__ia64__) \
-        || defined(__sh__) || defined(__mips__)
+#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
+        defined(CONFIG_SPARC) || defined(__ia64__) ||		   \
+        defined(__sh__) || defined(__mips__)
 static int rx_copybreak = 1518;
 #else
 static int rx_copybreak = 100;
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index a45ded0538b8..ad63621913c3 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -543,7 +543,7 @@ static int dmfe_open(struct DEVICE *dev)
 
 	DMFE_DBUG(0, "dmfe_open", 0);
 
-	ret = request_irq(dev->irq, &dmfe_interrupt,
+	ret = request_irq(dev->irq, dmfe_interrupt,
 			  IRQF_SHARED, dev->name, dev);
 	if (ret)
 		return ret;
diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
index 391acd32a6a5..889f57aae89b 100644
--- a/drivers/net/tulip/eeprom.c
+++ b/drivers/net/tulip/eeprom.c
@@ -174,10 +174,10 @@ void __devinit tulip_parse_eeprom(struct net_device *dev)
 		}
 	  /* Do a fix-up based on the vendor half of the station address prefix. */
 	  for (i = 0; eeprom_fixups[i].name; i++) {
-		if (dev->dev_addr[0] == eeprom_fixups[i].addr0
-			&&  dev->dev_addr[1] == eeprom_fixups[i].addr1
-			&&  dev->dev_addr[2] == eeprom_fixups[i].addr2) {
-		  if (dev->dev_addr[2] == 0xE8  &&  ee_data[0x1a] == 0x55)
+		  if (dev->dev_addr[0] == eeprom_fixups[i].addr0 &&
+		      dev->dev_addr[1] == eeprom_fixups[i].addr1 &&
+		      dev->dev_addr[2] == eeprom_fixups[i].addr2) {
+		  if (dev->dev_addr[2] == 0xE8 && ee_data[0x1a] == 0x55)
 			  i++;			/* An Accton EN1207, not an outlaw Maxtech. */
 		  memcpy(ee_data + 26, eeprom_fixups[i].newtable,
 				 sizeof(eeprom_fixups[i].newtable));
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index c8d220cf2cce..2e8e8ee893c7 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -170,8 +170,8 @@ int tulip_poll(struct napi_struct *napi, int budget)
 				      RxDescCollisionSeen |
 				      RxDescRunt |
 				      RxDescDescErr |
-				      RxWholePkt)) != RxWholePkt
-			   || pkt_len > 1518) {
+				      RxWholePkt)) != RxWholePkt ||
+			   pkt_len > 1518) {
 			       if ((status & (RxLengthOver2047 |
 					      RxWholePkt)) != RxWholePkt) {
                                 /* Ingore earlier buffers. */
@@ -201,8 +201,8 @@ int tulip_poll(struct napi_struct *napi, int budget)
 
                                /* Check if the packet is long enough to accept without copying
                                   to a minimally-sized skbuff. */
-                               if (pkt_len < tulip_rx_copybreak
-                                   && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+                               if (pkt_len < tulip_rx_copybreak &&
+                                   (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
                                        skb_reserve(skb, 2);    /* 16 byte align the IP header */
                                        pci_dma_sync_single_for_cpu(tp->pdev,
 								   tp->rx_buffers[entry].mapping,
@@ -395,8 +395,8 @@ static int tulip_rx(struct net_device *dev)
 			       RxDescCollisionSeen |
 			       RxDescRunt |
 			       RxDescDescErr |
-			       RxWholePkt))        != RxWholePkt
-		     || pkt_len > 1518) {
+			       RxWholePkt))        != RxWholePkt ||
+		    pkt_len > 1518) {
 			if ((status & (RxLengthOver2047 |
 			     RxWholePkt))         != RxWholePkt) {
 				/* Ingore earlier buffers. */
@@ -425,8 +425,8 @@ static int tulip_rx(struct net_device *dev)
 
 			/* Check if the packet is long enough to accept without copying
 			   to a minimally-sized skbuff. */
-			if (pkt_len < tulip_rx_copybreak
-				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+			if (pkt_len < tulip_rx_copybreak &&
+			    (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
 				pci_dma_sync_single_for_cpu(tp->pdev,
 							    tp->rx_buffers[entry].mapping,
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
index daddfa51853e..d8fda83705bf 100644
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/tulip/media.c
@@ -468,8 +468,8 @@ void __devinit tulip_find_mii (struct net_device *dev, int board_idx)
 		int phy = phyn & 0x1f;
 		int mii_status = tulip_mdio_read (dev, phy, MII_BMSR);
 		if ((mii_status & 0x8301) == 0x8001 ||
-		    ((mii_status & BMSR_100BASE4) == 0
-		     && (mii_status & 0x7800) != 0)) {
+		    ((mii_status & BMSR_100BASE4) == 0 &&
+		     (mii_status & 0x7800) != 0)) {
 			/* preserve Becker logic, gain indentation level */
 		} else {
 			continue;
diff --git a/drivers/net/tulip/pnic2.c b/drivers/net/tulip/pnic2.c
index f49579128fb5..d8418694bf46 100644
--- a/drivers/net/tulip/pnic2.c
+++ b/drivers/net/tulip/pnic2.c
@@ -316,9 +316,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
 		}
 	}
 
-	if ((tp->nwayset  &&  (csr5 & 0x08000000)
-			  && (dev->if_port == 3  ||  dev->if_port == 5)
-			  && (csr12 & 2) == 2) || (tp->nway && (csr5 & (TPLnkFail)))) {
+	if ((tp->nwayset  &&  (csr5 & 0x08000000) &&
+	     (dev->if_port == 3  ||  dev->if_port == 5) &&
+	     (csr12 & 2) == 2) || (tp->nway && (csr5 & (TPLnkFail)))) {
 
 		/* Link blew? Maybe restart NWay. */
 
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 6b2330e4206e..0fa3140d65bf 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -64,9 +64,9 @@ const char * const medianame[32] = {
 };
 
 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
-#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
-	|| defined(CONFIG_SPARC) || defined(__ia64__) \
-	|| defined(__sh__) || defined(__mips__)
+#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
+	defined(CONFIG_SPARC) || defined(__ia64__) || \
+	defined(__sh__) || defined(__mips__)
 static int rx_copybreak = 1518;
 #else
 static int rx_copybreak = 100;
@@ -449,8 +449,8 @@ media_picked:
 			iowrite32(0x0201B078, ioaddr + 0xB8);
 			next_tick = 1*HZ;
 		}
-	} else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881)
-			   && ! tp->medialock) {
+	} else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) &&
+		   ! tp->medialock) {
 		dev->if_port = 0;
 		tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
 		iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
@@ -506,7 +506,7 @@ tulip_open(struct net_device *dev)
 
 	tulip_init_ring (dev);
 
-	retval = request_irq(dev->irq, &tulip_interrupt, IRQF_SHARED, dev->name, dev);
+	retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev);
 	if (retval)
 		goto free_ring;
 
@@ -535,9 +535,9 @@ static void tulip_tx_timeout(struct net_device *dev)
 		if (tulip_debug > 1)
 			printk(KERN_WARNING "%s: Transmit timeout using MII device.\n",
 				   dev->name);
-	} else if (tp->chip_id == DC21140 || tp->chip_id == DC21142
-			   || tp->chip_id == MX98713 || tp->chip_id == COMPEX9881
-			   || tp->chip_id == DM910X) {
+	} else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
+		   tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
+		   tp->chip_id == DM910X) {
 		printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, "
 			   "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n",
 			   dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
@@ -1538,8 +1538,10 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
 		}
 	}
 	/* Lite-On boards have the address byte-swapped. */
-	if ((dev->dev_addr[0] == 0xA0  ||  dev->dev_addr[0] == 0xC0 || dev->dev_addr[0] == 0x02)
-		&&  dev->dev_addr[1] == 0x00)
+	if ((dev->dev_addr[0] == 0xA0 ||
+	     dev->dev_addr[0] == 0xC0 ||
+	     dev->dev_addr[0] == 0x02) &&
+	    dev->dev_addr[1] == 0x00)
 		for (i = 0; i < 6; i+=2) {
 			char tmp = dev->dev_addr[i];
 			dev->dev_addr[i] = dev->dev_addr[i+1];
@@ -1782,7 +1784,7 @@ static int tulip_resume(struct pci_dev *pdev)
 		return retval;
 	}
 
-	if ((retval = request_irq(dev->irq, &tulip_interrupt, IRQF_SHARED, dev->name, dev))) {
+	if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) {
 		printk (KERN_ERR "tulip: request_irq failed in resume\n");
 		return retval;
 	}
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index c457a0ca55ad..fa019cabc355 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -461,7 +461,7 @@ static int uli526x_open(struct net_device *dev)
 	/* Initialize ULI526X board */
 	uli526x_init(dev);
 
-	ret = request_irq(dev->irq, &uli526x_interrupt, IRQF_SHARED, dev->name, dev);
+	ret = request_irq(dev->irq, uli526x_interrupt, IRQF_SHARED, dev->name, dev);
 	if (ret)
 		return ret;
 
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index b38d3b7f6e35..869a7a0005f9 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -639,7 +639,7 @@ static int netdev_open(struct net_device *dev)
 	iowrite32(0x00000001, ioaddr + PCIBusCfg);		/* Reset */
 
 	netif_device_detach(dev);
-	i = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev);
+	i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
 	if (i)
 		goto out_err;
 
@@ -1230,8 +1230,8 @@ static int netdev_rx(struct net_device *dev)
 #endif
 			/* Check if the packet is long enough to accept without copying
 			   to a minimally-sized skbuff. */
-			if (pkt_len < rx_copybreak
-				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+			if (pkt_len < rx_copybreak &&
+			    (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
 				pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
 							    np->rx_skbuff[entry]->len,
@@ -1357,8 +1357,8 @@ static u32 __set_rx_mode(struct net_device *dev)
 		memset(mc_filter, 0xff, sizeof(mc_filter));
 		rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
 			| AcceptMyPhys;
-	} else if ((dev->mc_count > multicast_filter_limit)
-			   ||  (dev->flags & IFF_ALLMULTI)) {
+	} else if ((dev->mc_count > multicast_filter_limit) ||
+		   (dev->flags & IFF_ALLMULTI)) {
 		/* Too many to match, or accept all multicasts. */
 		memset(mc_filter, 0xff, sizeof(mc_filter));
 		rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 0f2ca5980c3c..9924c4c7e2d6 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -458,7 +458,7 @@ static int xircom_open(struct net_device *dev)
 	int retval;
 	enter("xircom_open");
 	printk(KERN_INFO "xircom cardbus adaptor found, registering as %s, using irq %i \n",dev->name,dev->irq);
-	retval = request_irq(dev->irq, &xircom_interrupt, IRQF_SHARED, dev->name, dev);
+	retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
 	if (retval) {
 		leave("xircom_open - No IRQ");
 		return retval;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 4fdfa2ae5418..01e99f22210e 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -44,7 +44,6 @@
 #include <linux/kernel.h>
 #include <linux/major.h>
 #include <linux/slab.h>
-#include <linux/smp_lock.h>
 #include <linux/poll.h>
 #include <linux/fcntl.h>
 #include <linux/init.h>
@@ -54,6 +53,7 @@
 #include <linux/miscdevice.h>
 #include <linux/ethtool.h>
 #include <linux/rtnetlink.h>
+#include <linux/compat.h>
 #include <linux/if.h>
 #include <linux/if_arp.h>
 #include <linux/if_ether.h>
@@ -1110,8 +1110,8 @@ static int set_offload(struct net_device *dev, unsigned long arg)
 	return 0;
 }
 
-static long tun_chr_ioctl(struct file *file, unsigned int cmd,
-			  unsigned long arg)
+static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
+			    unsigned long arg, int ifreq_len)
 {
 	struct tun_file *tfile = file->private_data;
 	struct tun_struct *tun;
@@ -1121,7 +1121,7 @@ static long tun_chr_ioctl(struct file *file, unsigned int cmd,
 	int ret;
 
 	if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
-		if (copy_from_user(&ifr, argp, sizeof ifr))
+		if (copy_from_user(&ifr, argp, ifreq_len))
 			return -EFAULT;
 
 	if (cmd == TUNGETFEATURES) {
@@ -1144,7 +1144,7 @@ static long tun_chr_ioctl(struct file *file, unsigned int cmd,
 		if (ret)
 			goto unlock;
 
-		if (copy_to_user(argp, &ifr, sizeof(ifr)))
+		if (copy_to_user(argp, &ifr, ifreq_len))
 			ret = -EFAULT;
 		goto unlock;
 	}
@@ -1162,7 +1162,7 @@ static long tun_chr_ioctl(struct file *file, unsigned int cmd,
 		if (ret)
 			break;
 
-		if (copy_to_user(argp, &ifr, sizeof(ifr)))
+		if (copy_to_user(argp, &ifr, ifreq_len))
 			ret = -EFAULT;
 		break;
 
@@ -1236,7 +1236,7 @@ static long tun_chr_ioctl(struct file *file, unsigned int cmd,
 		/* Get hw addres */
 		memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
 		ifr.ifr_hwaddr.sa_family = tun->dev->type;
-		if (copy_to_user(argp, &ifr, sizeof ifr))
+		if (copy_to_user(argp, &ifr, ifreq_len))
 			ret = -EFAULT;
 		break;
 
@@ -1275,6 +1275,41 @@ unlock:
 	return ret;
 }
 
+static long tun_chr_ioctl(struct file *file,
+			  unsigned int cmd, unsigned long arg)
+{
+	return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
+}
+
+#ifdef CONFIG_COMPAT
+static long tun_chr_compat_ioctl(struct file *file,
+			 unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case TUNSETIFF:
+	case TUNGETIFF:
+	case TUNSETTXFILTER:
+	case TUNGETSNDBUF:
+	case TUNSETSNDBUF:
+	case SIOCGIFHWADDR:
+	case SIOCSIFHWADDR:
+		arg = (unsigned long)compat_ptr(arg);
+		break;
+	default:
+		arg = (compat_ulong_t)arg;
+		break;
+	}
+
+	/*
+	 * compat_ifreq is shorter than ifreq, so we must not access beyond
+	 * the end of that structure. All fields that are used in this
+	 * driver are compatible though, we don't need to convert the
+	 * contents.
+	 */
+	return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
+}
+#endif /* CONFIG_COMPAT */
+
 static int tun_chr_fasync(int fd, struct file *file, int on)
 {
 	struct tun_struct *tun = tun_get(file);
@@ -1285,7 +1320,6 @@ static int tun_chr_fasync(int fd, struct file *file, int on)
 
 	DBG(KERN_INFO "%s: tun_chr_fasync %d\n", tun->dev->name, on);
 
-	lock_kernel();
 	if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0)
 		goto out;
 
@@ -1298,7 +1332,6 @@ static int tun_chr_fasync(int fd, struct file *file, int on)
 		tun->flags &= ~TUN_FASYNC;
 	ret = 0;
 out:
-	unlock_kernel();
 	tun_put(tun);
 	return ret;
 }
@@ -1306,7 +1339,7 @@ out:
 static int tun_chr_open(struct inode *inode, struct file * file)
 {
 	struct tun_file *tfile;
-	cycle_kernel_lock();
+
 	DBG1(KERN_INFO "tunX: tun_chr_open\n");
 
 	tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
@@ -1359,7 +1392,10 @@ static const struct file_operations tun_fops = {
 	.write = do_sync_write,
 	.aio_write = tun_chr_aio_write,
 	.poll	= tun_chr_poll,
-	.unlocked_ioctl = tun_chr_ioctl,
+	.unlocked_ioctl	= tun_chr_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = tun_chr_compat_ioctl,
+#endif
 	.open	= tun_chr_open,
 	.release = tun_chr_close,
 	.fasync = tun_chr_fasync
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 5921f5bdd764..39f1fc650be6 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -1769,8 +1769,8 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
 		csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
 			TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
 		if(csum_bits ==
-		   (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD)
-		   || csum_bits ==
+		   (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) ||
+		   csum_bits ==
 		   (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
 			new_skb->ip_summed = CHECKSUM_UNNECESSARY;
 		} else
@@ -2151,7 +2151,7 @@ typhoon_open(struct net_device *dev)
 		goto out_sleep;
 	}
 
-	err = request_irq(dev->irq, &typhoon_interrupt, IRQF_SHARED,
+	err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED,
 				dev->name, dev);
 	if(err < 0)
 		goto out_sleep;
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 4469f2451a6f..9f44c99777a8 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -1306,8 +1306,8 @@ static int init_max_rx_buff_len(u16 max_rx_buf_len,
 				u16 __iomem *mrblr_register)
 {
 	/* max_rx_buf_len value must be a multiple of 128 */
-	if ((max_rx_buf_len == 0)
-	    || (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT))
+	if ((max_rx_buf_len == 0) ||
+	    (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT))
 		return -EINVAL;
 
 	out_be16(mrblr_register, max_rx_buf_len);
@@ -2159,8 +2159,8 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
 	}
 
 	if ((ug_info->numStationAddresses !=
-	     UCC_GETH_NUM_OF_STATION_ADDRESSES_1)
-	    && ug_info->rxExtendedFiltering) {
+	     UCC_GETH_NUM_OF_STATION_ADDRESSES_1) &&
+	    ug_info->rxExtendedFiltering) {
 		if (netif_msg_probe(ugeth))
 			ugeth_err("%s: Number of station addresses greater than 1 "
 				  "not allowed in extended parsing mode.",
@@ -2284,9 +2284,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
 	     UCC_GETH_NUM_OF_STATION_ADDRESSES_1);
 
 	ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features ||
-	    (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
-	    || (ug_info->vlanOperationNonTagged !=
-		UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP);
+		(ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) ||
+		(ug_info->vlanOperationNonTagged !=
+		 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP);
 
 	init_default_reg_vals(&uf_regs->upsmr,
 			      &ug_regs->maccfg1, &ug_regs->maccfg2);
@@ -2987,11 +2987,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
 	ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
 	    ugeth->rx_glbl_pram_offset | ug_info->riscRx;
 	if ((ug_info->largestexternallookupkeysize !=
-	     QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE)
-	    && (ug_info->largestexternallookupkeysize !=
-		QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
-	    && (ug_info->largestexternallookupkeysize !=
-		QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
+	     QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE) &&
+	    (ug_info->largestexternallookupkeysize !=
+	     QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) &&
+	    (ug_info->largestexternallookupkeysize !=
+	     QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
 		if (netif_msg_ifup(ugeth))
 			ugeth_err("%s: Invalid largest External Lookup Key Size.",
 				  __func__);
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 6ce7f775bb74..a516185cbc9f 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -365,8 +365,8 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
 
 	padlen = ((skb->len + 4) % 512) ? 0 : 4;
 
-	if ((!skb_cloned(skb))
-	    && ((headroom + tailroom) >= (4 + padlen))) {
+	if ((!skb_cloned(skb)) &&
+	    ((headroom + tailroom) >= (4 + padlen))) {
 		if ((headroom < 4) || (tailroom < padlen)) {
 			skb->data = memmove(skb->head + 4, skb->data, skb->len);
 			skb_set_tail_pointer(skb, skb->len);
@@ -541,8 +541,8 @@ static void asix_set_multicast(struct net_device *net)
 
 	if (net->flags & IFF_PROMISC) {
 		rx_ctl |= AX_RX_CTL_PRO;
-	} else if (net->flags & IFF_ALLMULTI
-		   || net->mc_count > AX_MAX_MCAST) {
+	} else if (net->flags & IFF_ALLMULTI ||
+		   net->mc_count > AX_MAX_MCAST) {
 		rx_ctl |= AX_RX_CTL_AMALL;
 	} else if (net->mc_count == 0) {
 		/* just broadcast and directed */
@@ -753,8 +753,8 @@ static void ax88172_set_multicast(struct net_device *net)
 
 	if (net->flags & IFF_PROMISC) {
 		rx_ctl |= 0x01;
-	} else if (net->flags & IFF_ALLMULTI
-		   || net->mc_count > AX_MAX_MCAST) {
+	} else if (net->flags & IFF_ALLMULTI ||
+		   net->mc_count > AX_MAX_MCAST) {
 		rx_ctl |= 0x02;
 	} else if (net->mc_count == 0) {
 		/* just broadcast and directed */
@@ -1327,7 +1327,7 @@ static const struct driver_info ax8817x_info = {
 	.status = asix_status,
 	.link_reset = ax88172_link_reset,
 	.reset = ax88172_link_reset,
-	.flags =  FLAG_ETHER,
+	.flags =  FLAG_ETHER | FLAG_LINK_INTR,
 	.data = 0x00130103,
 };
 
@@ -1337,7 +1337,7 @@ static const struct driver_info dlink_dub_e100_info = {
 	.status = asix_status,
 	.link_reset = ax88172_link_reset,
 	.reset = ax88172_link_reset,
-	.flags =  FLAG_ETHER,
+	.flags =  FLAG_ETHER | FLAG_LINK_INTR,
 	.data = 0x009f9d9f,
 };
 
@@ -1347,7 +1347,7 @@ static const struct driver_info netgear_fa120_info = {
 	.status = asix_status,
 	.link_reset = ax88172_link_reset,
 	.reset = ax88172_link_reset,
-	.flags =  FLAG_ETHER,
+	.flags =  FLAG_ETHER | FLAG_LINK_INTR,
 	.data = 0x00130103,
 };
 
@@ -1357,7 +1357,7 @@ static const struct driver_info hawking_uf200_info = {
 	.status = asix_status,
 	.link_reset = ax88172_link_reset,
 	.reset = ax88172_link_reset,
-	.flags =  FLAG_ETHER,
+	.flags =  FLAG_ETHER | FLAG_LINK_INTR,
 	.data = 0x001f1d1f,
 };
 
@@ -1367,7 +1367,7 @@ static const struct driver_info ax88772_info = {
 	.status = asix_status,
 	.link_reset = ax88772_link_reset,
 	.reset = ax88772_link_reset,
-	.flags = FLAG_ETHER | FLAG_FRAMING_AX,
+	.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
 	.rx_fixup = asix_rx_fixup,
 	.tx_fixup = asix_tx_fixup,
 };
@@ -1378,7 +1378,7 @@ static const struct driver_info ax88178_info = {
 	.status = asix_status,
 	.link_reset = ax88178_link_reset,
 	.reset = ax88178_link_reset,
-	.flags = FLAG_ETHER | FLAG_FRAMING_AX,
+	.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
 	.rx_fixup = asix_rx_fixup,
 	.tx_fixup = asix_tx_fixup,
 };
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 2bed6b087d16..22b87e64a810 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -436,8 +436,8 @@ static netdev_tx_t catc_start_xmit(struct sk_buff *skb,
 			clear_bit(TX_RUNNING, &catc->flags);
 	}
 
-	if ((catc->is_f5u011 && catc->tx_ptr)
-	     || (catc->tx_ptr >= ((TX_MAX_BURST - 1) * (PKT_SZ + 2))))
+	if ((catc->is_f5u011 && catc->tx_ptr) ||
+	    (catc->tx_ptr >= ((TX_MAX_BURST - 1) * (PKT_SZ + 2))))
 		netif_stop_queue(netdev);
 
 	spin_unlock_irqrestore(&catc->tx_lock, flags);
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 33d5c579c5ad..6491c9c00c83 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -372,12 +372,12 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
 	/* Data interface has one inactive and one active setting */
 	if (data_intf->num_altsetting != 2)
 		return -EINVAL;
-	if (data_intf->altsetting[0].desc.bNumEndpoints == 0
-	 && data_intf->altsetting[1].desc.bNumEndpoints == 2)
+	if (data_intf->altsetting[0].desc.bNumEndpoints == 0 &&
+	    data_intf->altsetting[1].desc.bNumEndpoints == 2)
 		data_desc = data_intf->altsetting + 1;
 	else
-	if (data_intf->altsetting[0].desc.bNumEndpoints == 2
-	 && data_intf->altsetting[1].desc.bNumEndpoints == 0)
+	if (data_intf->altsetting[0].desc.bNumEndpoints == 2 &&
+	    data_intf->altsetting[1].desc.bNumEndpoints == 0)
 		data_desc = data_intf->altsetting;
 	else
 		return -EINVAL;
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 23300656c266..c337ffc3304a 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -121,8 +121,8 @@ static struct sk_buff *eem_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
 		int	headroom = skb_headroom(skb);
 		int	tailroom = skb_tailroom(skb);
 
-		if ((tailroom >= ETH_FCS_LEN + padlen)
-				&& (headroom >= EEM_HEAD))
+		if ((tailroom >= ETH_FCS_LEN + padlen) &&
+		    (headroom >= EEM_HEAD))
 			goto done;
 
 		if ((headroom + tailroom)
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 21e1ba160008..21e183a83b99 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -37,23 +37,23 @@
 
 static int is_rndis(struct usb_interface_descriptor *desc)
 {
-	return desc->bInterfaceClass == USB_CLASS_COMM
-		&& desc->bInterfaceSubClass == 2
-		&& desc->bInterfaceProtocol == 0xff;
+	return (desc->bInterfaceClass == USB_CLASS_COMM &&
+		desc->bInterfaceSubClass == 2 &&
+		desc->bInterfaceProtocol == 0xff);
 }
 
 static int is_activesync(struct usb_interface_descriptor *desc)
 {
-	return desc->bInterfaceClass == USB_CLASS_MISC
-		&& desc->bInterfaceSubClass == 1
-		&& desc->bInterfaceProtocol == 1;
+	return (desc->bInterfaceClass == USB_CLASS_MISC &&
+		desc->bInterfaceSubClass == 1 &&
+		desc->bInterfaceProtocol == 1);
 }
 
 static int is_wireless_rndis(struct usb_interface_descriptor *desc)
 {
-	return desc->bInterfaceClass == USB_CLASS_WIRELESS_CONTROLLER
-		&& desc->bInterfaceSubClass == 1
-		&& desc->bInterfaceProtocol == 3;
+	return (desc->bInterfaceClass == USB_CLASS_WIRELESS_CONTROLLER &&
+		desc->bInterfaceSubClass == 1 &&
+		desc->bInterfaceProtocol == 3);
 }
 
 #else
@@ -116,9 +116,9 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
 	/* this assumes that if there's a non-RNDIS vendor variant
 	 * of cdc-acm, it'll fail RNDIS requests cleanly.
 	 */
-	rndis = is_rndis(&intf->cur_altsetting->desc)
-		|| is_activesync(&intf->cur_altsetting->desc)
-		|| is_wireless_rndis(&intf->cur_altsetting->desc);
+	rndis = (is_rndis(&intf->cur_altsetting->desc) ||
+		 is_activesync(&intf->cur_altsetting->desc) ||
+		 is_wireless_rndis(&intf->cur_altsetting->desc));
 
 	memset(info, 0, sizeof *info);
 	info->control = intf;
@@ -279,10 +279,10 @@ next_desc:
 
 		dev->status = &info->control->cur_altsetting->endpoint [0];
 		desc = &dev->status->desc;
-		if (!usb_endpoint_is_int_in(desc)
-				|| (le16_to_cpu(desc->wMaxPacketSize)
-					< sizeof(struct usb_cdc_notification))
-				|| !desc->bInterval) {
+		if (!usb_endpoint_is_int_in(desc) ||
+		    (le16_to_cpu(desc->wMaxPacketSize)
+		     < sizeof(struct usb_cdc_notification)) ||
+		    !desc->bInterval) {
 			dev_dbg(&intf->dev, "bad notification endpoint\n");
 			dev->status = NULL;
 		}
@@ -411,13 +411,28 @@ static int cdc_bind(struct usbnet *dev, struct usb_interface *intf)
 	return 0;
 }
 
+static int cdc_manage_power(struct usbnet *dev, int on)
+{
+	dev->intf->needs_remote_wakeup = on;
+	return 0;
+}
+
 static const struct driver_info	cdc_info = {
 	.description =	"CDC Ethernet Device",
-	.flags =	FLAG_ETHER,
+	.flags =	FLAG_ETHER | FLAG_LINK_INTR,
 	// .check_connect = cdc_check_connect,
 	.bind =		cdc_bind,
 	.unbind =	usbnet_cdc_unbind,
 	.status =	cdc_status,
+	.manage_power =	cdc_manage_power,
+};
+
+static const struct driver_info mbm_info = {
+	.description =	"Mobile Broadband Network Device",
+	.flags =	FLAG_WWAN,
+	.bind = 	cdc_bind,
+	.unbind =	usbnet_cdc_unbind,
+	.status =	cdc_status,
 };
 
 /*-------------------------------------------------------------------------*/
@@ -532,72 +547,72 @@ static const struct usb_device_id	products [] = {
 	/* Ericsson F3507g */
 	USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1900, USB_CLASS_COMM,
 			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
-	.driver_info = (unsigned long) &cdc_info,
+	.driver_info = (unsigned long) &mbm_info,
 }, {
 	/* Ericsson F3507g ver. 2 */
 	USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1902, USB_CLASS_COMM,
 			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
-	.driver_info = (unsigned long) &cdc_info,
+	.driver_info = (unsigned long) &mbm_info,
 }, {
 	/* Ericsson F3607gw */
 	USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1904, USB_CLASS_COMM,
 			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
-	.driver_info = (unsigned long) &cdc_info,
+	.driver_info = (unsigned long) &mbm_info,
 }, {
 	/* Ericsson F3607gw ver 2 */
 	USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1905, USB_CLASS_COMM,
 			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
-	.driver_info = (unsigned long) &cdc_info,
+	.driver_info = (unsigned long) &mbm_info,
 }, {
 	/* Ericsson F3607gw ver 3 */
 	USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1906, USB_CLASS_COMM,
 			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
-	.driver_info = (unsigned long) &cdc_info,
+	.driver_info = (unsigned long) &mbm_info,
 }, {
 	/* Ericsson F3307 */
 	USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190a, USB_CLASS_COMM,
 			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
-	.driver_info = (unsigned long) &cdc_info,
+	.driver_info = (unsigned long) &mbm_info,
 }, {
 	/* Ericsson F3307 ver 2 */
 	USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1909, USB_CLASS_COMM,
 			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
-	.driver_info = (unsigned long) &cdc_info,
+	.driver_info = (unsigned long) &mbm_info,
 }, {
 	/* Ericsson C3607w */
 	USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1049, USB_CLASS_COMM,
 			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
-	.driver_info = (unsigned long) &cdc_info,
+	.driver_info = (unsigned long) &mbm_info,
 }, {
 	/* Toshiba F3507g */
 	USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
 			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
-	.driver_info = (unsigned long) &cdc_info,
+	.driver_info = (unsigned long) &mbm_info,
 }, {
 	/* Toshiba F3607gw */
 	USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130c, USB_CLASS_COMM,
 			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
-	.driver_info = (unsigned long) &cdc_info,
+	.driver_info = (unsigned long) &mbm_info,
 }, {
 	/* Toshiba F3607gw ver 2 */
 	USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x1311, USB_CLASS_COMM,
 			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
-	.driver_info = (unsigned long) &cdc_info,
+	.driver_info = (unsigned long) &mbm_info,
 }, {
 	/* Dell F3507g */
 	USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8147, USB_CLASS_COMM,
 			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
-	.driver_info = (unsigned long) &cdc_info,
+	.driver_info = (unsigned long) &mbm_info,
 }, {
 	/* Dell F3607gw */
 	USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8183, USB_CLASS_COMM,
 			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
-	.driver_info = (unsigned long) &cdc_info,
+	.driver_info = (unsigned long) &mbm_info,
 }, {
 	/* Dell F3607gw ver 2 */
 	USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8184, USB_CLASS_COMM,
 			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
-	.driver_info = (unsigned long) &cdc_info,
+	.driver_info = (unsigned long) &mbm_info,
 },
 	{ },		// END
 };
@@ -610,6 +625,8 @@ static struct usb_driver cdc_driver = {
 	.disconnect =	usbnet_disconnect,
 	.suspend =	usbnet_suspend,
 	.resume =	usbnet_resume,
+	.reset_resume =	usbnet_resume,
+	.supports_autosuspend = 1,
 };
 
 
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index a2b30a10064f..3d406f9b2f29 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -611,7 +611,7 @@ static int dm9601_link_reset(struct usbnet *dev)
 
 static const struct driver_info dm9601_info = {
 	.description	= "Davicom DM9601 USB Ethernet",
-	.flags		= FLAG_ETHER,
+	.flags		= FLAG_ETHER | FLAG_LINK_INTR,
 	.bind		= dm9601_bind,
 	.rx_fixup	= dm9601_rx_fixup,
 	.tx_fixup	= dm9601_tx_fixup,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 43bc3fcc0d85..f78f0903b073 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -602,9 +602,9 @@ static struct hso_serial *get_serial_by_shared_int_and_type(
 	port = hso_mux_to_port(mux);
 
 	for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
-		if (serial_table[i]
-		    && (dev2ser(serial_table[i])->shared_int == shared_int)
-		    && ((serial_table[i]->port_spec & HSO_PORT_MASK) == port)) {
+		if (serial_table[i] &&
+		    (dev2ser(serial_table[i])->shared_int == shared_int) &&
+		    ((serial_table[i]->port_spec & HSO_PORT_MASK) == port)) {
 			return dev2ser(serial_table[i]);
 		}
 	}
@@ -846,8 +846,8 @@ static void hso_net_tx_timeout(struct net_device *net)
 	dev_warn(&net->dev, "Tx timed out.\n");
 
 	/* Tear the waiting frame off the list */
-	if (odev->mux_bulk_tx_urb
-	    && (odev->mux_bulk_tx_urb->status == -EINPROGRESS))
+	if (odev->mux_bulk_tx_urb &&
+	    (odev->mux_bulk_tx_urb->status == -EINPROGRESS))
 		usb_unlink_urb(odev->mux_bulk_tx_urb);
 
 	/* Update statistics */
@@ -1020,9 +1020,9 @@ static void read_bulk_callback(struct urb *urb)
 		u32 rest;
 		u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
 		rest = urb->actual_length % odev->in_endp->wMaxPacketSize;
-		if (((rest == 5) || (rest == 6))
-		    && !memcmp(((u8 *) urb->transfer_buffer) +
-			       urb->actual_length - 4, crc_check, 4)) {
+		if (((rest == 5) || (rest == 6)) &&
+		    !memcmp(((u8 *) urb->transfer_buffer) +
+			    urb->actual_length - 4, crc_check, 4)) {
 			urb->actual_length -= 4;
 		}
 	}
@@ -1226,9 +1226,9 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
 			rest =
 			    urb->actual_length %
 			    serial->in_endp->wMaxPacketSize;
-			if (((rest == 5) || (rest == 6))
-			    && !memcmp(((u8 *) urb->transfer_buffer) +
-				       urb->actual_length - 4, crc_check, 4)) {
+			if (((rest == 5) || (rest == 6)) &&
+			    !memcmp(((u8 *) urb->transfer_buffer) +
+				    urb->actual_length - 4, crc_check, 4)) {
 				urb->actual_length -= 4;
 			}
 		}
@@ -2982,8 +2982,8 @@ static int hso_probe(struct usb_interface *interface,
 
 	case HSO_INTF_BULK:
 		/* It's a regular bulk interface */
-		if (((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK)
-		    && !disable_net)
+		if (((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) &&
+		    !disable_net)
 			hso_dev = hso_create_net_device(interface, port_spec);
 		else
 			hso_dev =
@@ -3146,8 +3146,8 @@ static void hso_free_interface(struct usb_interface *interface)
 	int i;
 
 	for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
-		if (serial_table[i]
-		    && (serial_table[i]->interface == interface)) {
+		if (serial_table[i] &&
+		    (serial_table[i]->interface == interface)) {
 			hso_dev = dev2ser(serial_table[i]);
 			spin_lock_irq(&hso_dev->serial_lock);
 			tty = tty_kref_get(hso_dev->tty);
@@ -3163,8 +3163,8 @@ static void hso_free_interface(struct usb_interface *interface)
 	}
 
 	for (i = 0; i < HSO_MAX_NET_DEVICES; i++) {
-		if (network_table[i]
-		    && (network_table[i]->interface == interface)) {
+		if (network_table[i] &&
+		    (network_table[i]->interface == interface)) {
 			struct rfkill *rfk = dev2net(network_table[i])->rfkill;
 			/* hso_stop_net_device doesn't stop the net queue since
 			 * traffic needs to start it again when suspended */
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index e391ef969c28..3b80e8d2d621 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -471,16 +471,7 @@ static int kaweth_reset(struct kaweth_device *kaweth)
 	int result;
 
 	dbg("kaweth_reset(%p)", kaweth);
-	result = kaweth_control(kaweth,
-				usb_sndctrlpipe(kaweth->dev, 0),
-				USB_REQ_SET_CONFIGURATION,
-				0,
-				kaweth->dev->config[0].desc.bConfigurationValue,
-				0,
-				NULL,
-				0,
-				KAWETH_CONTROL_TIMEOUT);
-
+	result = usb_reset_configuration(kaweth->dev);
 	mdelay(10);
 
 	dbg("kaweth_reset() returns %d.",result);
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 10873d96b2da..87374317f480 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -391,8 +391,8 @@ static void mcs7830_set_multicast(struct net_device *net)
 
 	if (net->flags & IFF_PROMISC) {
 		data->config |= HIF_REG_CONFIG_PROMISCIOUS;
-	} else if (net->flags & IFF_ALLMULTI
-		   || net->mc_count > MCS7830_MAX_MCAST) {
+	} else if (net->flags & IFF_ALLMULTI ||
+		   net->mc_count > MCS7830_MAX_MCAST) {
 		data->config |= HIF_REG_CONFIG_ALLMULTICAST;
 	} else if (net->mc_count == 0) {
 		/* just broadcast and directed */
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index f56dec6119c3..490fa8f55424 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -114,8 +114,8 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
 	 */
 
 	/* Issue the request; xid is unique, don't bother byteswapping it */
-	if (likely(buf->msg_type != RNDIS_MSG_HALT
-			&& buf->msg_type != RNDIS_MSG_RESET)) {
+	if (likely(buf->msg_type != RNDIS_MSG_HALT &&
+		   buf->msg_type != RNDIS_MSG_RESET)) {
 		xid = dev->xid++;
 		if (!xid)
 			xid = dev->xid++;
@@ -493,9 +493,9 @@ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 		data_len = le32_to_cpu(hdr->data_len);
 
 		/* don't choke if we see oob, per-packet data, etc */
-		if (unlikely(hdr->msg_type != RNDIS_MSG_PACKET
-				|| skb->len < msg_len
-				|| (data_offset + data_len + 8) > msg_len)) {
+		if (unlikely(hdr->msg_type != RNDIS_MSG_PACKET ||
+			     skb->len < msg_len ||
+			     (data_offset + data_len + 8) > msg_len)) {
 			dev->net->stats.rx_frame_errors++;
 			devdbg(dev, "bad rndis message %d/%d/%d/%d, len %d",
 				le32_to_cpu(hdr->msg_type),
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ca5ca5ae061d..035fab04c0a0 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -140,8 +140,8 @@ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
 	if (!alt || !in || !out)
 		return -EINVAL;
 
-	if (alt->desc.bAlternateSetting != 0
-			|| !(dev->driver_info->flags & FLAG_NO_SETINT)) {
+	if (alt->desc.bAlternateSetting != 0 ||
+	    !(dev->driver_info->flags & FLAG_NO_SETINT)) {
 		tmp = usb_set_interface (dev->udev, alt->desc.bInterfaceNumber,
 				alt->desc.bAlternateSetting);
 		if (tmp < 0)
@@ -351,9 +351,10 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
 
 	spin_lock_irqsave (&dev->rxq.lock, lockflags);
 
-	if (netif_running (dev->net)
-			&& netif_device_present (dev->net)
-			&& !test_bit (EVENT_RX_HALT, &dev->flags)) {
+	if (netif_running (dev->net) &&
+	    netif_device_present (dev->net) &&
+	    !test_bit (EVENT_RX_HALT, &dev->flags) &&
+	    !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) {
 		switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
 		case -EPIPE:
 			usbnet_defer_kevent (dev, EVENT_RX_HALT);
@@ -391,8 +392,8 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
 
 static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
 {
-	if (dev->driver_info->rx_fixup
-			&& !dev->driver_info->rx_fixup (dev, skb))
+	if (dev->driver_info->rx_fixup &&
+	    !dev->driver_info->rx_fixup (dev, skb))
 		goto error;
 	// else network stack removes extra byte if we forced a short packet
 
@@ -484,8 +485,8 @@ block:
 	defer_bh(dev, skb, &dev->rxq);
 
 	if (urb) {
-		if (netif_running (dev->net)
-				&& !test_bit (EVENT_RX_HALT, &dev->flags)) {
+		if (netif_running (dev->net) &&
+		    !test_bit (EVENT_RX_HALT, &dev->flags)) {
 			rx_submit (dev, urb, GFP_ATOMIC);
 			return;
 		}
@@ -611,15 +612,39 @@ EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
 /*-------------------------------------------------------------------------*/
 
 // precondition: never called in_interrupt
+static void usbnet_terminate_urbs(struct usbnet *dev)
+{
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
+	DECLARE_WAITQUEUE(wait, current);
+	int temp;
+
+	/* ensure there are no more active urbs */
+	add_wait_queue(&unlink_wakeup, &wait);
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	dev->wait = &unlink_wakeup;
+	temp = unlink_urbs(dev, &dev->txq) +
+		unlink_urbs(dev, &dev->rxq);
+
+	/* maybe wait for deletions to finish. */
+	while (!skb_queue_empty(&dev->rxq)
+		&& !skb_queue_empty(&dev->txq)
+		&& !skb_queue_empty(&dev->done)) {
+			schedule_timeout(UNLINK_TIMEOUT_MS);
+			set_current_state(TASK_UNINTERRUPTIBLE);
+			if (netif_msg_ifdown(dev))
+				devdbg(dev, "waited for %d urb completions",
+					temp);
+	}
+	set_current_state(TASK_RUNNING);
+	dev->wait = NULL;
+	remove_wait_queue(&unlink_wakeup, &wait);
+}
 
 int usbnet_stop (struct net_device *net)
 {
 	struct usbnet		*dev = netdev_priv(net);
 	struct driver_info	*info = dev->driver_info;
-	int			temp;
 	int			retval;
-	DECLARE_WAIT_QUEUE_HEAD_ONSTACK (unlink_wakeup);
-	DECLARE_WAITQUEUE (wait, current);
 
 	netif_stop_queue (net);
 
@@ -641,25 +666,8 @@ int usbnet_stop (struct net_device *net)
 				info->description);
 	}
 
-	if (!(info->flags & FLAG_AVOID_UNLINK_URBS)) {
-		/* ensure there are no more active urbs */
-		add_wait_queue(&unlink_wakeup, &wait);
-		dev->wait = &unlink_wakeup;
-		temp = unlink_urbs(dev, &dev->txq) +
-			unlink_urbs(dev, &dev->rxq);
-
-		/* maybe wait for deletions to finish. */
-		while (!skb_queue_empty(&dev->rxq)
-				&& !skb_queue_empty(&dev->txq)
-				&& !skb_queue_empty(&dev->done)) {
-			msleep(UNLINK_TIMEOUT_MS);
-			if (netif_msg_ifdown(dev))
-				devdbg(dev, "waited for %d urb completions",
-					temp);
-		}
-		dev->wait = NULL;
-		remove_wait_queue(&unlink_wakeup, &wait);
-	}
+	if (!(info->flags & FLAG_AVOID_UNLINK_URBS))
+		usbnet_terminate_urbs(dev);
 
 	usb_kill_urb(dev->interrupt);
 
@@ -672,7 +680,10 @@ int usbnet_stop (struct net_device *net)
 	dev->flags = 0;
 	del_timer_sync (&dev->delay);
 	tasklet_kill (&dev->bh);
-	usb_autopm_put_interface(dev->intf);
+	if (info->manage_power)
+		info->manage_power(dev, 0);
+	else
+		usb_autopm_put_interface(dev->intf);
 
 	return 0;
 }
@@ -753,6 +764,12 @@ int usbnet_open (struct net_device *net)
 
 	// delay posting reads until we're fully open
 	tasklet_schedule (&dev->bh);
+	if (info->manage_power) {
+		retval = info->manage_power(dev, 1);
+		if (retval < 0)
+			goto done;
+		usb_autopm_put_interface(dev->intf);
+	}
 	return retval;
 done:
 	usb_autopm_put_interface(dev->intf);
@@ -881,11 +898,16 @@ kevent (struct work_struct *work)
 	/* usb_clear_halt() needs a thread context */
 	if (test_bit (EVENT_TX_HALT, &dev->flags)) {
 		unlink_urbs (dev, &dev->txq);
+		status = usb_autopm_get_interface(dev->intf);
+		if (status < 0)
+			goto fail_pipe;
 		status = usb_clear_halt (dev->udev, dev->out);
-		if (status < 0
-				&& status != -EPIPE
-				&& status != -ESHUTDOWN) {
+		usb_autopm_put_interface(dev->intf);
+		if (status < 0 &&
+		    status != -EPIPE &&
+		    status != -ESHUTDOWN) {
 			if (netif_msg_tx_err (dev))
+fail_pipe:
 				deverr (dev, "can't clear tx halt, status %d",
 					status);
 		} else {
@@ -896,11 +918,16 @@ kevent (struct work_struct *work)
 	}
 	if (test_bit (EVENT_RX_HALT, &dev->flags)) {
 		unlink_urbs (dev, &dev->rxq);
+		status = usb_autopm_get_interface(dev->intf);
+		if (status < 0)
+			goto fail_halt;
 		status = usb_clear_halt (dev->udev, dev->in);
-		if (status < 0
-				&& status != -EPIPE
-				&& status != -ESHUTDOWN) {
+		usb_autopm_put_interface(dev->intf);
+		if (status < 0 &&
+		    status != -EPIPE &&
+		    status != -ESHUTDOWN) {
 			if (netif_msg_rx_err (dev))
+fail_halt:
 				deverr (dev, "can't clear rx halt, status %d",
 					status);
 		} else {
@@ -919,7 +946,12 @@ kevent (struct work_struct *work)
 			clear_bit (EVENT_RX_MEMORY, &dev->flags);
 		if (urb != NULL) {
 			clear_bit (EVENT_RX_MEMORY, &dev->flags);
+			status = usb_autopm_get_interface(dev->intf);
+			if (status < 0)
+				goto fail_lowmem;
 			rx_submit (dev, urb, GFP_KERNEL);
+			usb_autopm_put_interface(dev->intf);
+fail_lowmem:
 			tasklet_schedule (&dev->bh);
 		}
 	}
@@ -929,11 +961,18 @@ kevent (struct work_struct *work)
 		int			retval = 0;
 
 		clear_bit (EVENT_LINK_RESET, &dev->flags);
+		status = usb_autopm_get_interface(dev->intf);
+		if (status < 0)
+			goto skip_reset;
 		if(info->link_reset && (retval = info->link_reset(dev)) < 0) {
+			usb_autopm_put_interface(dev->intf);
+skip_reset:
 			devinfo(dev, "link reset failed (%d) usbnet usb-%s-%s, %s",
 				retval,
 				dev->udev->bus->bus_name, dev->udev->devpath,
 				info->description);
+		} else {
+			usb_autopm_put_interface(dev->intf);
 		}
 	}
 
@@ -971,6 +1010,7 @@ static void tx_complete (struct urb *urb)
 		case -EPROTO:
 		case -ETIME:
 		case -EILSEQ:
+			usb_mark_last_busy(dev->udev);
 			if (!timer_pending (&dev->delay)) {
 				mod_timer (&dev->delay,
 					jiffies + THROTTLE_JIFFIES);
@@ -987,6 +1027,7 @@ static void tx_complete (struct urb *urb)
 		}
 	}
 
+	usb_autopm_put_interface_async(dev->intf);
 	urb->dev = NULL;
 	entry->state = tx_done;
 	defer_bh(dev, skb, &dev->txq);
@@ -1057,14 +1098,34 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
 		}
 	}
 
-	spin_lock_irqsave (&dev->txq.lock, flags);
+	spin_lock_irqsave(&dev->txq.lock, flags);
+	retval = usb_autopm_get_interface_async(dev->intf);
+	if (retval < 0) {
+		spin_unlock_irqrestore(&dev->txq.lock, flags);
+		goto drop;
+	}
+
+#ifdef CONFIG_PM
+	/* if this triggers the device is still a sleep */
+	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
+		/* transmission will be done in resume */
+		usb_anchor_urb(urb, &dev->deferred);
+		/* no use to process more packets */
+		netif_stop_queue(net);
+		spin_unlock_irqrestore(&dev->txq.lock, flags);
+		devdbg(dev, "Delaying transmission for resumption");
+		goto deferred;
+	}
+#endif
 
 	switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
 	case -EPIPE:
 		netif_stop_queue (net);
 		usbnet_defer_kevent (dev, EVENT_TX_HALT);
+		usb_autopm_put_interface_async(dev->intf);
 		break;
 	default:
+		usb_autopm_put_interface_async(dev->intf);
 		if (netif_msg_tx_err (dev))
 			devdbg (dev, "tx: submit urb err %d", retval);
 		break;
@@ -1088,6 +1149,9 @@ drop:
 		devdbg (dev, "> tx, len %d, type 0x%x",
 			length, skb->protocol);
 	}
+#ifdef CONFIG_PM
+deferred:
+#endif
 	return NETDEV_TX_OK;
 }
 EXPORT_SYMBOL_GPL(usbnet_start_xmit);
@@ -1126,10 +1190,10 @@ static void usbnet_bh (unsigned long param)
 		}
 
 	// or are we maybe short a few urbs?
-	} else if (netif_running (dev->net)
-			&& netif_device_present (dev->net)
-			&& !timer_pending (&dev->delay)
-			&& !test_bit (EVENT_RX_HALT, &dev->flags)) {
+	} else if (netif_running (dev->net) &&
+		   netif_device_present (dev->net) &&
+		   !timer_pending (&dev->delay) &&
+		   !test_bit (EVENT_RX_HALT, &dev->flags)) {
 		int	temp = dev->rxq.qlen;
 		int	qlen = RX_QLEN (dev);
 
@@ -1210,6 +1274,14 @@ static const struct net_device_ops usbnet_netdev_ops = {
 
 // precondition: never called in_interrupt
 
+static struct device_type wlan_type = {
+	.name	= "wlan",
+};
+
+static struct device_type wwan_type = {
+	.name	= "wwan",
+};
+
 int
 usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
 {
@@ -1255,6 +1327,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
 	dev->bh.func = usbnet_bh;
 	dev->bh.data = (unsigned long) dev;
 	INIT_WORK (&dev->kevent, kevent);
+	init_usb_anchor(&dev->deferred);
 	dev->delay.function = usbnet_bh;
 	dev->delay.data = (unsigned long) dev;
 	init_timer (&dev->delay);
@@ -1289,12 +1362,15 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
 		// heuristic:  "usb%d" for links we know are two-host,
 		// else "eth%d" when there's reasonable doubt.  userspace
 		// can rename the link if it knows better.
-		if ((dev->driver_info->flags & FLAG_ETHER) != 0
-				&& (net->dev_addr [0] & 0x02) == 0)
+		if ((dev->driver_info->flags & FLAG_ETHER) != 0 &&
+		    (net->dev_addr [0] & 0x02) == 0)
 			strcpy (net->name, "eth%d");
 		/* WLAN devices should always be named "wlan%d" */
 		if ((dev->driver_info->flags & FLAG_WLAN) != 0)
 			strcpy(net->name, "wlan%d");
+		/* WWAN devices should always be named "wwan%d" */
+		if ((dev->driver_info->flags & FLAG_WWAN) != 0)
+			strcpy(net->name, "wwan%d");
 
 		/* maybe the remote can't receive an Ethernet MTU */
 		if (net->mtu > (dev->hard_mtu - net->hard_header_len))
@@ -1322,6 +1398,12 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
 	dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
 
 	SET_NETDEV_DEV(net, &udev->dev);
+
+	if ((dev->driver_info->flags & FLAG_WLAN) != 0)
+		SET_NETDEV_DEVTYPE(net, &wlan_type);
+	if ((dev->driver_info->flags & FLAG_WWAN) != 0)
+		SET_NETDEV_DEVTYPE(net, &wwan_type);
+
 	status = register_netdev (net);
 	if (status)
 		goto out3;
@@ -1335,9 +1417,11 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
 	// ok, it's ready to go.
 	usb_set_intfdata (udev, dev);
 
-	// start as if the link is up
 	netif_device_attach (net);
 
+	if (dev->driver_info->flags & FLAG_LINK_INTR)
+		netif_carrier_off(net);
+
 	return 0;
 
 out3:
@@ -1363,13 +1447,23 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
 	struct usbnet		*dev = usb_get_intfdata(intf);
 
 	if (!dev->suspend_count++) {
+		spin_lock_irq(&dev->txq.lock);
+		/* don't autosuspend while transmitting */
+		if (dev->txq.qlen && (message.event & PM_EVENT_AUTO)) {
+			spin_unlock_irq(&dev->txq.lock);
+			return -EBUSY;
+		} else {
+			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
+			spin_unlock_irq(&dev->txq.lock);
+		}
 		/*
 		 * accelerate emptying of the rx and queues, to avoid
 		 * having everything error out.
 		 */
 		netif_device_detach (dev->net);
-		(void) unlink_urbs (dev, &dev->rxq);
-		(void) unlink_urbs (dev, &dev->txq);
+		usbnet_terminate_urbs(dev);
+		usb_kill_urb(dev->interrupt);
+
 		/*
 		 * reattach so runtime management can use and
 		 * wake the device
@@ -1383,10 +1477,34 @@ EXPORT_SYMBOL_GPL(usbnet_suspend);
 int usbnet_resume (struct usb_interface *intf)
 {
 	struct usbnet		*dev = usb_get_intfdata(intf);
+	struct sk_buff          *skb;
+	struct urb              *res;
+	int                     retval;
+
+	if (!--dev->suspend_count) {
+		spin_lock_irq(&dev->txq.lock);
+		while ((res = usb_get_from_anchor(&dev->deferred))) {
+
+			printk(KERN_INFO"%s has delayed data\n", __func__);
+			skb = (struct sk_buff *)res->context;
+			retval = usb_submit_urb(res, GFP_ATOMIC);
+			if (retval < 0) {
+				dev_kfree_skb_any(skb);
+				usb_free_urb(res);
+				usb_autopm_put_interface_async(dev->intf);
+			} else {
+				dev->net->trans_start = jiffies;
+				__skb_queue_tail(&dev->txq, skb);
+			}
+		}
 
-	if (!--dev->suspend_count)
+		smp_mb();
+		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
+		spin_unlock_irq(&dev->txq.lock);
+		if (!(dev->txq.qlen >= TX_QLEN(dev)))
+			netif_start_queue(dev->net);
 		tasklet_schedule (&dev->bh);
-
+	}
 	return 0;
 }
 EXPORT_SYMBOL_GPL(usbnet_resume);
diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
index 04882c8f9bf1..3eb0b167b5b4 100644
--- a/drivers/net/usb/zaurus.c
+++ b/drivers/net/usb/zaurus.c
@@ -174,8 +174,8 @@ static int blan_mdlm_bind(struct usbnet *dev, struct usb_interface *intf)
 				goto bad_desc;
 			}
 			/* expect bcdVersion 1.0, ignore */
-			if (memcmp(&desc->bGUID, blan_guid, 16)
-				    && memcmp(&desc->bGUID, safe_guid, 16) ) {
+			if (memcmp(&desc->bGUID, blan_guid, 16) &&
+			    memcmp(&desc->bGUID, safe_guid, 16)) {
 				/* hey, this one might _really_ be MDLM! */
 				dev_dbg(&intf->dev, "MDLM guid\n");
 				goto bad_desc;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 52af5017c46b..63099c58a6dd 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -155,8 +155,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
 	struct veth_net_stats *stats, *rcv_stats;
 	int length, cpu;
 
-	skb_orphan(skb);
-
 	priv = netdev_priv(dev);
 	rcv = priv->peer;
 	rcv_priv = netdev_priv(rcv);
@@ -168,20 +166,12 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
 	if (!(rcv->flags & IFF_UP))
 		goto tx_drop;
 
-	if (skb->len > (rcv->mtu + MTU_PAD))
-		goto rx_drop;
-
-        skb->tstamp.tv64 = 0;
-	skb->pkt_type = PACKET_HOST;
-	skb->protocol = eth_type_trans(skb, rcv);
 	if (dev->features & NETIF_F_NO_CSUM)
 		skb->ip_summed = rcv_priv->ip_summed;
 
-	skb->mark = 0;
-	secpath_reset(skb);
-	nf_reset(skb);
-
-	length = skb->len;
+	length = skb->len + ETH_HLEN;
+	if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS)
+		goto rx_drop;
 
 	stats->tx_bytes += length;
 	stats->tx_packets++;
@@ -189,7 +179,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
 	rcv_stats->rx_bytes += length;
 	rcv_stats->rx_packets++;
 
-	netif_rx(skb);
 	return NETDEV_TX_OK;
 
 tx_drop:
@@ -337,7 +326,7 @@ static int veth_validate(struct nlattr *tb[], struct nlattr *data[])
 
 static struct rtnl_link_ops veth_link_ops;
 
-static int veth_newlink(struct net_device *dev,
+static int veth_newlink(struct net *src_net, struct net_device *dev,
 			 struct nlattr *tb[], struct nlattr *data[])
 {
 	int err;
@@ -345,6 +334,7 @@ static int veth_newlink(struct net_device *dev,
 	struct veth_priv *priv;
 	char ifname[IFNAMSIZ];
 	struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
+	struct net *net;
 
 	/*
 	 * create and register peer first
@@ -377,14 +367,22 @@ static int veth_newlink(struct net_device *dev,
 	else
 		snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
 
-	peer = rtnl_create_link(dev_net(dev), ifname, &veth_link_ops, tbp);
-	if (IS_ERR(peer))
+	net = rtnl_link_get_net(src_net, tbp);
+	if (IS_ERR(net))
+		return PTR_ERR(net);
+
+	peer = rtnl_create_link(src_net, net, ifname, &veth_link_ops, tbp);
+	if (IS_ERR(peer)) {
+		put_net(net);
 		return PTR_ERR(peer);
+	}
 
 	if (tbp[IFLA_ADDRESS] == NULL)
 		random_ether_addr(peer->dev_addr);
 
 	err = register_netdevice(peer);
+	put_net(net);
+	net = NULL;
 	if (err < 0)
 		goto err_register_peer;
 
@@ -439,7 +437,7 @@ err_register_peer:
 	return err;
 }
 
-static void veth_dellink(struct net_device *dev)
+static void veth_dellink(struct net_device *dev, struct list_head *head)
 {
 	struct veth_priv *priv;
 	struct net_device *peer;
@@ -447,8 +445,8 @@ static void veth_dellink(struct net_device *dev)
 	priv = netdev_priv(dev);
 	peer = priv->peer;
 
-	unregister_netdevice(dev);
-	unregister_netdevice(peer);
+	unregister_netdevice_queue(dev, head);
+	unregister_netdevice_queue(peer, head);
 }
 
 static const struct nla_policy veth_policy[VETH_INFO_MAX + 1];
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 1fd70583be44..593e01f64e9b 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -42,9 +42,9 @@ static int max_interrupt_work = 20;
 
 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
    Setting to > 1518 effectively disables this feature. */
-#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
-       || defined(CONFIG_SPARC) || defined(__ia64__) \
-       || defined(__sh__) || defined(__mips__)
+#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
+	defined(CONFIG_SPARC) || defined(__ia64__) ||		   \
+	defined(__sh__) || defined(__mips__)
 static int rx_copybreak = 1518;
 #else
 static int rx_copybreak;
@@ -1150,7 +1150,7 @@ static int rhine_open(struct net_device *dev)
 	void __iomem *ioaddr = rp->base;
 	int rc;
 
-	rc = request_irq(rp->pdev->irq, &rhine_interrupt, IRQF_SHARED, dev->name,
+	rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
 			dev);
 	if (rc)
 		return rc;
@@ -1484,15 +1484,15 @@ static int rhine_rx(struct net_device *dev, int limit)
 				}
 			}
 		} else {
-			struct sk_buff *skb;
+			struct sk_buff *skb = NULL;
 			/* Length should omit the CRC */
 			int pkt_len = data_size - 4;
 
 			/* Check if the packet is long enough to accept without
 			   copying to a minimally-sized skbuff. */
-			if (pkt_len < rx_copybreak &&
-				(skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN)) != NULL) {
-				skb_reserve(skb, NET_IP_ALIGN);	/* 16 byte align the IP header */
+			if (pkt_len < rx_copybreak)
+				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
+			if (skb) {
 				pci_dma_sync_single_for_cpu(rp->pdev,
 							    rp->rx_skbuff_dma[entry],
 							    rp->rx_buf_sz,
@@ -1683,8 +1683,8 @@ static void rhine_set_rx_mode(struct net_device *dev)
 		rx_mode = 0x1C;
 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
-	} else if ((dev->mc_count > multicast_filter_limit)
-		   || (dev->flags & IFF_ALLMULTI)) {
+	} else if ((dev->mc_count > multicast_filter_limit) ||
+		   (dev->flags & IFF_ALLMULTI)) {
 		/* Too many to match, or accept all multicasts. */
 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index e04e5bee005c..4ceb441f2687 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -9,7 +9,6 @@
  *
  * TODO
  *	rx_copybreak/alignment
- *	Scatter gather
  *	More testing
  *
  * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
@@ -275,7 +274,7 @@ VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
 
 #define DMA_LENGTH_MIN  0
 #define DMA_LENGTH_MAX  7
-#define DMA_LENGTH_DEF  0
+#define DMA_LENGTH_DEF  6
 
 /* DMA_length[] is used for controlling the DMA length
    0: 8 DWORDs
@@ -298,14 +297,6 @@ VELOCITY_PARAM(DMA_length, "DMA length");
 */
 VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
 
-#define TX_CSUM_DEF     1
-/* txcsum_offload[] is used for setting the checksum offload ability of NIC.
-   (We only support RX checksum offload now)
-   0: disable csum_offload[checksum offload
-   1: enable checksum offload. (Default)
-*/
-VELOCITY_PARAM(txcsum_offload, "Enable transmit packet checksum offload");
-
 #define FLOW_CNTL_DEF   1
 #define FLOW_CNTL_MIN   1
 #define FLOW_CNTL_MAX   5
@@ -354,21 +345,10 @@ VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame");
 */
 VELOCITY_PARAM(wol_opts, "Wake On Lan options");
 
-#define INT_WORKS_DEF   20
-#define INT_WORKS_MIN   10
-#define INT_WORKS_MAX   64
-
-VELOCITY_PARAM(int_works, "Number of packets per interrupt services");
-
 static int rx_copybreak = 200;
 module_param(rx_copybreak, int, 0644);
 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
 
-#ifdef CONFIG_PM
-static DEFINE_SPINLOCK(velocity_dev_list_lock);
-static LIST_HEAD(velocity_dev_list);
-#endif
-
 /*
  *	Internal board variants. At the moment we have only one
  */
@@ -417,14 +397,6 @@ static void __devexit velocity_remove1(struct pci_dev *pdev)
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct velocity_info *vptr = netdev_priv(dev);
 
-#ifdef CONFIG_PM
-	unsigned long flags;
-
-	spin_lock_irqsave(&velocity_dev_list_lock, flags);
-	if (!list_empty(&velocity_dev_list))
-		list_del(&vptr->list);
-	spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
-#endif
 	unregister_netdev(dev);
 	iounmap(vptr->mac_regs);
 	pci_release_regions(pdev);
@@ -510,13 +482,11 @@ static void __devinit velocity_get_options(struct velocity_opt *opts, int index,
 	velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname);
 	velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname);
 
-	velocity_set_bool_opt(&opts->flags, txcsum_offload[index], TX_CSUM_DEF, VELOCITY_FLAGS_TX_CSUM, "txcsum_offload", devname);
 	velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
 	velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
 	velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
 	velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
 	velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
-	velocity_set_int_opt((int *) &opts->int_works, int_works[index], INT_WORKS_MIN, INT_WORKS_MAX, INT_WORKS_DEF, "Interrupt service works", devname);
 	opts->numrx = (opts->numrx & ~3);
 }
 
@@ -925,8 +895,8 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
 
 	/*
 	   Check if new status is consisent with current status
-	   if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE)
-	   || (mii_status==curr_status)) {
+	   if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) ||
+	       (mii_status==curr_status)) {
 	   vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
 	   vptr->mii_status=check_connection_type(vptr->mac_regs);
 	   VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n");
@@ -1162,8 +1132,8 @@ static void velocity_set_multi(struct net_device *dev)
 		writel(0xffffffff, &regs->MARCAM[0]);
 		writel(0xffffffff, &regs->MARCAM[4]);
 		rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
-	} else if ((dev->mc_count > vptr->multicast_limit)
-		   || (dev->flags & IFF_ALLMULTI)) {
+	} else if ((dev->mc_count > vptr->multicast_limit) ||
+		   (dev->flags & IFF_ALLMULTI)) {
 		writel(0xffffffff, &regs->MARCAM[0]);
 		writel(0xffffffff, &regs->MARCAM[4]);
 		rx_mode = (RCR_AM | RCR_AB);
@@ -1259,6 +1229,66 @@ static void mii_init(struct velocity_info *vptr, u32 mii_status)
 	}
 }
 
+/**
+ * setup_queue_timers	-	Setup interrupt timers
+ *
+ * Setup interrupt frequency during suppression (timeout if the frame
+ * count isn't filled).
+ */
+static void setup_queue_timers(struct velocity_info *vptr)
+{
+	/* Only for newer revisions */
+	if (vptr->rev_id >= REV_ID_VT3216_A0) {
+		u8 txqueue_timer = 0;
+		u8 rxqueue_timer = 0;
+
+		if (vptr->mii_status & (VELOCITY_SPEED_1000 |
+				VELOCITY_SPEED_100)) {
+			txqueue_timer = vptr->options.txqueue_timer;
+			rxqueue_timer = vptr->options.rxqueue_timer;
+		}
+
+		writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
+		writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
+	}
+}
+/**
+ * setup_adaptive_interrupts  -  Setup interrupt suppression
+ *
+ * @vptr velocity adapter
+ *
+ * The velocity is able to suppress interrupt during high interrupt load.
+ * This function turns on that feature.
+ */
+static void setup_adaptive_interrupts(struct velocity_info *vptr)
+{
+	struct mac_regs __iomem *regs = vptr->mac_regs;
+	u16 tx_intsup = vptr->options.tx_intsup;
+	u16 rx_intsup = vptr->options.rx_intsup;
+
+	/* Setup default interrupt mask (will be changed below) */
+	vptr->int_mask = INT_MASK_DEF;
+
+	/* Set Tx Interrupt Suppression Threshold */
+	writeb(CAMCR_PS0, &regs->CAMCR);
+	if (tx_intsup != 0) {
+		vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
+				ISR_PTX2I | ISR_PTX3I);
+		writew(tx_intsup, &regs->ISRCTL);
+	} else
+		writew(ISRCTL_TSUPDIS, &regs->ISRCTL);
+
+	/* Set Rx Interrupt Suppression Threshold */
+	writeb(CAMCR_PS1, &regs->CAMCR);
+	if (rx_intsup != 0) {
+		vptr->int_mask &= ~ISR_PRXI;
+		writew(rx_intsup, &regs->ISRCTL);
+	} else
+		writew(ISRCTL_RSUPDIS, &regs->ISRCTL);
+
+	/* Select page to interrupt hold timer */
+	writeb(0, &regs->CAMCR);
+}
 
 /**
  *	velocity_init_registers	-	initialise MAC registers
@@ -1345,7 +1375,7 @@ static void velocity_init_registers(struct velocity_info *vptr,
 		 */
 		enable_mii_autopoll(regs);
 
-		vptr->int_mask = INT_MASK_DEF;
+		setup_adaptive_interrupts(vptr);
 
 		writel(vptr->rx.pool_dma, &regs->RDBaseLo);
 		writew(vptr->options.numrx - 1, &regs->RDCSize);
@@ -1483,7 +1513,8 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
 	 *	Do the gymnastics to get the buffer head for data at
 	 *	64byte alignment.
 	 */
-	skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63);
+	skb_reserve(rd_info->skb,
+			64 - ((unsigned long) rd_info->skb->data & 63));
 	rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
 					vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
 
@@ -1602,12 +1633,10 @@ out:
  */
 static int velocity_init_td_ring(struct velocity_info *vptr)
 {
-	dma_addr_t curr;
 	int j;
 
 	/* Init the TD ring entries */
 	for (j = 0; j < vptr->tx.numq; j++) {
-		curr = vptr->tx.pool_dma[j];
 
 		vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
 					    sizeof(struct velocity_td_info),
@@ -1673,21 +1702,27 @@ err_free_dma_rings_0:
  *	Release an transmit buffer. If the buffer was preallocated then
  *	recycle it, if not then unmap the buffer.
  */
-static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *tdinfo)
+static void velocity_free_tx_buf(struct velocity_info *vptr,
+		struct velocity_td_info *tdinfo, struct tx_desc *td)
 {
 	struct sk_buff *skb = tdinfo->skb;
-	int i;
-	int pktlen;
 
 	/*
 	 *	Don't unmap the pre-allocated tx_bufs
 	 */
 	if (tdinfo->skb_dma) {
+		int i;
 
-		pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
 		for (i = 0; i < tdinfo->nskb_dma; i++) {
-			pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], pktlen, PCI_DMA_TODEVICE);
-			tdinfo->skb_dma[i] = 0;
+			size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
+
+			/* For scatter-gather */
+			if (skb_shinfo(skb)->nr_frags > 0)
+				pktlen = max_t(size_t, pktlen,
+						td->td_buf[i].size & ~TD_QUEUE);
+
+			pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i],
+					le16_to_cpu(pktlen), PCI_DMA_TODEVICE);
 		}
 	}
 	dev_kfree_skb_irq(skb);
@@ -1801,6 +1836,8 @@ static void velocity_error(struct velocity_info *vptr, int status)
 				BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
 			else
 				BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
+
+			setup_queue_timers(vptr);
 		}
 		/*
 		 *	Get link status from PHYSR0
@@ -1887,7 +1924,7 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
 				stats->tx_packets++;
 				stats->tx_bytes += tdinfo->skb->len;
 			}
-			velocity_free_tx_buf(vptr, tdinfo);
+			velocity_free_tx_buf(vptr, tdinfo, td);
 			vptr->tx.used[qnum]--;
 		}
 		vptr->tx.tail[qnum] = idx;
@@ -1899,8 +1936,8 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
 	 *	Look to see if we should kick the transmit network
 	 *	layer for more work.
 	 */
-	if (netif_queue_stopped(vptr->dev) && (full == 0)
-	    && (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
+	if (netif_queue_stopped(vptr->dev) && (full == 0) &&
+	    (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
 		netif_wake_queue(vptr->dev);
 	}
 	return works;
@@ -1949,10 +1986,9 @@ static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
 	if (pkt_size < rx_copybreak) {
 		struct sk_buff *new_skb;
 
-		new_skb = netdev_alloc_skb(vptr->dev, pkt_size + 2);
+		new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size);
 		if (new_skb) {
 			new_skb->ip_summed = rx_skb[0]->ip_summed;
-			skb_reserve(new_skb, 2);
 			skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
 			*rx_skb = new_skb;
 			ret = 0;
@@ -2060,13 +2096,14 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
  *	any received packets from the receive queue. Hand the ring
  *	slots back to the adapter for reuse.
  */
-static int velocity_rx_srv(struct velocity_info *vptr, int status)
+static int velocity_rx_srv(struct velocity_info *vptr, int status,
+		int budget_left)
 {
 	struct net_device_stats *stats = &vptr->dev->stats;
 	int rd_curr = vptr->rx.curr;
 	int works = 0;
 
-	do {
+	while (works < budget_left) {
 		struct rx_desc *rd = vptr->rx.ring + rd_curr;
 
 		if (!vptr->rx.info[rd_curr].skb)
@@ -2097,7 +2134,8 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
 		rd_curr++;
 		if (rd_curr >= vptr->options.numrx)
 			rd_curr = 0;
-	} while (++works <= 15);
+		works++;
+	}
 
 	vptr->rx.curr = rd_curr;
 
@@ -2108,6 +2146,40 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
 	return works;
 }
 
+static int velocity_poll(struct napi_struct *napi, int budget)
+{
+	struct velocity_info *vptr = container_of(napi,
+			struct velocity_info, napi);
+	unsigned int rx_done;
+	u32 isr_status;
+
+	spin_lock(&vptr->lock);
+	isr_status = mac_read_isr(vptr->mac_regs);
+
+	/* Ack the interrupt */
+	mac_write_isr(vptr->mac_regs, isr_status);
+	if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
+		velocity_error(vptr, isr_status);
+
+	/*
+	 * Do rx and tx twice for performance (taken from the VIA
+	 * out-of-tree driver).
+	 */
+	rx_done = velocity_rx_srv(vptr, isr_status, budget / 2);
+	velocity_tx_srv(vptr, isr_status);
+	rx_done += velocity_rx_srv(vptr, isr_status, budget - rx_done);
+	velocity_tx_srv(vptr, isr_status);
+
+	spin_unlock(&vptr->lock);
+
+	/* If budget not fully consumed, exit the polling mode */
+	if (rx_done < budget) {
+		napi_complete(napi);
+		mac_enable_int(vptr->mac_regs);
+	}
+
+	return rx_done;
+}
 
 /**
  *	velocity_intr		-	interrupt callback
@@ -2124,8 +2196,6 @@ static irqreturn_t velocity_intr(int irq, void *dev_instance)
 	struct net_device *dev = dev_instance;
 	struct velocity_info *vptr = netdev_priv(dev);
 	u32 isr_status;
-	int max_count = 0;
-
 
 	spin_lock(&vptr->lock);
 	isr_status = mac_read_isr(vptr->mac_regs);
@@ -2136,32 +2206,13 @@ static irqreturn_t velocity_intr(int irq, void *dev_instance)
 		return IRQ_NONE;
 	}
 
-	mac_disable_int(vptr->mac_regs);
-
-	/*
-	 *	Keep processing the ISR until we have completed
-	 *	processing and the isr_status becomes zero
-	 */
-
-	while (isr_status != 0) {
-		mac_write_isr(vptr->mac_regs, isr_status);
-		if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
-			velocity_error(vptr, isr_status);
-		if (isr_status & (ISR_PRXI | ISR_PPRXI))
-			max_count += velocity_rx_srv(vptr, isr_status);
-		if (isr_status & (ISR_PTXI | ISR_PPTXI))
-			max_count += velocity_tx_srv(vptr, isr_status);
-		isr_status = mac_read_isr(vptr->mac_regs);
-		if (max_count > vptr->options.int_works) {
-			printk(KERN_WARNING "%s: excessive work at interrupt.\n",
-				dev->name);
-			max_count = 0;
-		}
+	if (likely(napi_schedule_prep(&vptr->napi))) {
+		mac_disable_int(vptr->mac_regs);
+		__napi_schedule(&vptr->napi);
 	}
 	spin_unlock(&vptr->lock);
-	mac_enable_int(vptr->mac_regs);
-	return IRQ_HANDLED;
 
+	return IRQ_HANDLED;
 }
 
 /**
@@ -2190,7 +2241,7 @@ static int velocity_open(struct net_device *dev)
 
 	velocity_init_registers(vptr, VELOCITY_INIT_COLD);
 
-	ret = request_irq(vptr->pdev->irq, &velocity_intr, IRQF_SHARED,
+	ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED,
 			  dev->name, dev);
 	if (ret < 0) {
 		/* Power down the chip */
@@ -2201,6 +2252,7 @@ static int velocity_open(struct net_device *dev)
 
 	mac_enable_int(vptr->mac_regs);
 	netif_start_queue(dev);
+	napi_enable(&vptr->napi);
 	vptr->flags |= VELOCITY_FLAGS_OPENED;
 out:
 	return ret;
@@ -2436,6 +2488,7 @@ static int velocity_close(struct net_device *dev)
 {
 	struct velocity_info *vptr = netdev_priv(dev);
 
+	napi_disable(&vptr->napi);
 	netif_stop_queue(dev);
 	velocity_shutdown(vptr);
 
@@ -2470,14 +2523,22 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
 	struct velocity_td_info *tdinfo;
 	unsigned long flags;
 	int pktlen;
-	__le16 len;
-	int index;
+	int index, prev;
+	int i = 0;
 
 	if (skb_padto(skb, ETH_ZLEN))
 		goto out;
-	pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
 
-	len = cpu_to_le16(pktlen);
+	/* The hardware can handle at most 7 memory segments, so merge
+	 * the skb if there are more */
+	if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
+		kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	pktlen = skb_shinfo(skb)->nr_frags == 0 ?
+			max_t(unsigned int, skb->len, ETH_ZLEN) :
+				skb_headlen(skb);
 
 	spin_lock_irqsave(&vptr->lock, flags);
 
@@ -2494,11 +2555,24 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
 	 */
 	tdinfo->skb = skb;
 	tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
-	td_ptr->tdesc0.len = len;
+	td_ptr->tdesc0.len = cpu_to_le16(pktlen);
 	td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
 	td_ptr->td_buf[0].pa_high = 0;
-	td_ptr->td_buf[0].size = len;
-	tdinfo->nskb_dma = 1;
+	td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
+
+	/* Handle fragments */
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		tdinfo->skb_dma[i + 1] = pci_map_page(vptr->pdev, frag->page,
+				frag->page_offset, frag->size,
+				PCI_DMA_TODEVICE);
+
+		td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
+		td_ptr->td_buf[i + 1].pa_high = 0;
+		td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size);
+	}
+	tdinfo->nskb_dma = i + 1;
 
 	td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
 
@@ -2510,8 +2584,8 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
 	/*
 	 *	Handle hardware checksum
 	 */
-	if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM)
-				 && (skb->ip_summed == CHECKSUM_PARTIAL)) {
+	if ((dev->features & NETIF_F_IP_CSUM) &&
+	    (skb->ip_summed == CHECKSUM_PARTIAL)) {
 		const struct iphdr *ip = ip_hdr(skb);
 		if (ip->protocol == IPPROTO_TCP)
 			td_ptr->tdesc1.TCR |= TCR0_TCPCK;
@@ -2519,23 +2593,21 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
 			td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
 		td_ptr->tdesc1.TCR |= TCR0_IPCK;
 	}
-	{
 
-		int prev = index - 1;
+	prev = index - 1;
+	if (prev < 0)
+		prev = vptr->options.numtx - 1;
+	td_ptr->tdesc0.len |= OWNED_BY_NIC;
+	vptr->tx.used[qnum]++;
+	vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
 
-		if (prev < 0)
-			prev = vptr->options.numtx - 1;
-		td_ptr->tdesc0.len |= OWNED_BY_NIC;
-		vptr->tx.used[qnum]++;
-		vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
+	if (AVAIL_TD(vptr, qnum) < 1)
+		netif_stop_queue(dev);
 
-		if (AVAIL_TD(vptr, qnum) < 1)
-			netif_stop_queue(dev);
+	td_ptr = &(vptr->tx.rings[qnum][prev]);
+	td_ptr->td_buf[0].size |= TD_QUEUE;
+	mac_tx_queue_wake(vptr->mac_regs, qnum);
 
-		td_ptr = &(vptr->tx.rings[qnum][prev]);
-		td_ptr->td_buf[0].size |= TD_QUEUE;
-		mac_tx_queue_wake(vptr->mac_regs, qnum);
-	}
 	dev->trans_start = jiffies;
 	spin_unlock_irqrestore(&vptr->lock, flags);
 out:
@@ -2578,7 +2650,6 @@ static void __devinit velocity_init_info(struct pci_dev *pdev,
 	vptr->tx.numq = info->txqueue;
 	vptr->multicast_limit = MCAM_SIZE;
 	spin_lock_init(&vptr->lock);
-	INIT_LIST_HEAD(&vptr->list);
 }
 
 /**
@@ -2755,12 +2826,10 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
 	dev->irq = pdev->irq;
 	dev->netdev_ops = &velocity_netdev_ops;
 	dev->ethtool_ops = &velocity_ethtool_ops;
+	netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
 
 	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
-		NETIF_F_HW_VLAN_RX;
-
-	if (vptr->flags & VELOCITY_FLAGS_TX_CSUM)
-		dev->features |= NETIF_F_IP_CSUM;
+		NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
 
 	ret = register_netdev(dev);
 	if (ret < 0)
@@ -2777,15 +2846,6 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
 	/* and leave the chip powered down */
 
 	pci_set_power_state(pdev, PCI_D3hot);
-#ifdef CONFIG_PM
-	{
-		unsigned long flags;
-
-		spin_lock_irqsave(&velocity_dev_list_lock, flags);
-		list_add(&vptr->list, &velocity_dev_list);
-		spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
-	}
-#endif
 	velocity_nics++;
 out:
 	return ret;
@@ -3222,15 +3282,114 @@ static void velocity_set_msglevel(struct net_device *dev, u32 value)
 	 msglevel = value;
 }
 
+static int get_pending_timer_val(int val)
+{
+	int mult_bits = val >> 6;
+	int mult = 1;
+
+	switch (mult_bits)
+	{
+	case 1:
+		mult = 4; break;
+	case 2:
+		mult = 16; break;
+	case 3:
+		mult = 64; break;
+	case 0:
+	default:
+		break;
+	}
+
+	return (val & 0x3f) * mult;
+}
+
+static void set_pending_timer_val(int *val, u32 us)
+{
+	u8 mult = 0;
+	u8 shift = 0;
+
+	if (us >= 0x3f) {
+		mult = 1; /* mult with 4 */
+		shift = 2;
+	}
+	if (us >= 0x3f * 4) {
+		mult = 2; /* mult with 16 */
+		shift = 4;
+	}
+	if (us >= 0x3f * 16) {
+		mult = 3; /* mult with 64 */
+		shift = 6;
+	}
+
+	*val = (mult << 6) | ((us >> shift) & 0x3f);
+}
+
+
+static int velocity_get_coalesce(struct net_device *dev,
+		struct ethtool_coalesce *ecmd)
+{
+	struct velocity_info *vptr = netdev_priv(dev);
+
+	ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
+	ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
+
+	ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
+	ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
+
+	return 0;
+}
+
+static int velocity_set_coalesce(struct net_device *dev,
+		struct ethtool_coalesce *ecmd)
+{
+	struct velocity_info *vptr = netdev_priv(dev);
+	int max_us = 0x3f * 64;
+
+	/* 6 bits of  */
+	if (ecmd->tx_coalesce_usecs > max_us)
+		return -EINVAL;
+	if (ecmd->rx_coalesce_usecs > max_us)
+		return -EINVAL;
+
+	if (ecmd->tx_max_coalesced_frames > 0xff)
+		return -EINVAL;
+	if (ecmd->rx_max_coalesced_frames > 0xff)
+		return -EINVAL;
+
+	vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
+	vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
+
+	set_pending_timer_val(&vptr->options.rxqueue_timer,
+			ecmd->rx_coalesce_usecs);
+	set_pending_timer_val(&vptr->options.txqueue_timer,
+			ecmd->tx_coalesce_usecs);
+
+	/* Setup the interrupt suppression and queue timers */
+	mac_disable_int(vptr->mac_regs);
+	setup_adaptive_interrupts(vptr);
+	setup_queue_timers(vptr);
+
+	mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
+	mac_clear_isr(vptr->mac_regs);
+	mac_enable_int(vptr->mac_regs);
+
+	return 0;
+}
+
 static const struct ethtool_ops velocity_ethtool_ops = {
 	.get_settings	=	velocity_get_settings,
 	.set_settings	=	velocity_set_settings,
 	.get_drvinfo	=	velocity_get_drvinfo,
+	.set_tx_csum	=	ethtool_op_set_tx_csum,
+	.get_tx_csum	=	ethtool_op_get_tx_csum,
 	.get_wol	=	velocity_ethtool_get_wol,
 	.set_wol	=	velocity_ethtool_set_wol,
 	.get_msglevel	=	velocity_get_msglevel,
 	.set_msglevel	=	velocity_set_msglevel,
+	.set_sg 	=	ethtool_op_set_sg,
 	.get_link	=	velocity_get_link,
+	.get_coalesce	=	velocity_get_coalesce,
+	.set_coalesce	=	velocity_set_coalesce,
 	.begin		=	velocity_ethtool_up,
 	.complete	=	velocity_ethtool_down
 };
@@ -3241,20 +3400,10 @@ static int velocity_netdev_event(struct notifier_block *nb, unsigned long notifi
 {
 	struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
 	struct net_device *dev = ifa->ifa_dev->dev;
-	struct velocity_info *vptr;
-	unsigned long flags;
 
-	if (dev_net(dev) != &init_net)
-		return NOTIFY_DONE;
-
-	spin_lock_irqsave(&velocity_dev_list_lock, flags);
-	list_for_each_entry(vptr, &velocity_dev_list, list) {
-		if (vptr->dev == dev) {
-			velocity_get_ip(vptr);
-			break;
-		}
-	}
-	spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
+	if (dev_net(dev) == &init_net &&
+	    dev->netdev_ops == &velocity_netdev_ops)
+		velocity_get_ip(netdev_priv(dev));
 
 	return NOTIFY_DONE;
 }
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index 2f00c13ab502..ef4a0f64ba16 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -29,9 +29,10 @@
 
 #define VELOCITY_NAME          "via-velocity"
 #define VELOCITY_FULL_DRV_NAM  "VIA Networking Velocity Family Gigabit Ethernet Adapter Driver"
-#define VELOCITY_VERSION       "1.14"
+#define VELOCITY_VERSION       "1.15"
 
 #define VELOCITY_IO_SIZE	256
+#define VELOCITY_NAPI_WEIGHT	64
 
 #define PKT_BUF_SZ          1540
 
@@ -1005,7 +1006,8 @@ struct mac_regs {
 
 	volatile __le32 RDBaseLo;	/* 0x38 */
 	volatile __le16 RDIdx;		/* 0x3C */
-	volatile __le16 reserved_3E;
+	volatile u8 TQETMR;		/* 0x3E, VT3216 and above only */
+	volatile u8 RQETMR;		/* 0x3F, VT3216 and above only */
 
 	volatile __le32 TDBaseLo[4];	/* 0x40 */
 
@@ -1421,7 +1423,6 @@ enum velocity_msg_level {
  */
 
 #define     VELOCITY_FLAGS_TAGGING         0x00000001UL
-#define     VELOCITY_FLAGS_TX_CSUM         0x00000002UL
 #define     VELOCITY_FLAGS_RX_CSUM         0x00000004UL
 #define     VELOCITY_FLAGS_IP_ALIGN        0x00000008UL
 #define     VELOCITY_FLAGS_VAL_PKT_LEN     0x00000010UL
@@ -1491,6 +1492,10 @@ struct velocity_opt {
 	int rx_bandwidth_hi;
 	int rx_bandwidth_lo;
 	int rx_bandwidth_en;
+	int rxqueue_timer;
+	int txqueue_timer;
+	int tx_intsup;
+	int rx_intsup;
 	u32 flags;
 };
 
@@ -1499,8 +1504,6 @@ struct velocity_opt {
 #define GET_RD_BY_IDX(vptr, idx)   (vptr->rd_ring[idx])
 
 struct velocity_info {
-	struct list_head list;
-
 	struct pci_dev *pdev;
 	struct net_device *dev;
 
@@ -1559,6 +1562,8 @@ struct velocity_info {
 	u32 ticks;
 
 	u8 rev_id;
+
+	struct napi_struct napi;
 };
 
 /**
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b9e002fccbca..c708ecc3cb2e 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -282,13 +282,12 @@ static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
 	do {
 		struct skb_vnet_hdr *hdr;
 
-		skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN);
+		skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
 		if (unlikely(!skb)) {
 			oom = true;
 			break;
 		}
 
-		skb_reserve(skb, NET_IP_ALIGN);
 		skb_put(skb, MAX_PACKET_LEN);
 
 		hdr = skb_vnet_hdr(skb);
@@ -343,14 +342,12 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
 	do {
 		skb_frag_t *f;
 
-		skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
+		skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
 		if (unlikely(!skb)) {
 			oom = true;
 			break;
 		}
 
-		skb_reserve(skb, NET_IP_ALIGN);
-
 		f = &skb_shinfo(skb)->frags[0];
 		f->page = get_a_page(vi, gfp);
 		if (!f->page) {
@@ -431,8 +428,8 @@ again:
 	/* Out of packets? */
 	if (received < budget) {
 		napi_complete(napi);
-		if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
-		    && napi_schedule_prep(napi)) {
+		if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) &&
+		    napi_schedule_prep(napi)) {
 			vi->rvq->vq_ops->disable_cb(vi->rvq);
 			__napi_schedule(napi);
 			goto again;
@@ -893,9 +890,9 @@ static int virtnet_probe(struct virtio_device *vdev)
 	INIT_DELAYED_WORK(&vi->refill, refill_work);
 
 	/* If we can receive ANY GSO packets, we must allocate large ones. */
-	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4)
-	    || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)
-	    || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
+	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
+	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
+	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
 		vi->big_packets = true;
 
 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index dc8ee4438a4f..b4889e6c4a57 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -90,23 +90,60 @@ enum {
 	VMXNET3_CMD_GET_CONF_INTR
 };
 
-struct Vmxnet3_TxDesc {
-	u64		addr;
+/*
+ *	Little Endian layout of bitfields -
+ *	Byte 0 :	7.....len.....0
+ *	Byte 1 :	rsvd gen 13.len.8
+ *	Byte 2 : 	5.msscof.0 ext1  dtype
+ *	Byte 3 : 	13...msscof...6
+ *
+ *	Big Endian layout of bitfields -
+ *	Byte 0:		13...msscof...6
+ *	Byte 1 : 	5.msscof.0 ext1  dtype
+ *	Byte 2 :	rsvd gen 13.len.8
+ *	Byte 3 :	7.....len.....0
+ *
+ *	Thus, le32_to_cpu on the dword will allow the big endian driver to read
+ *	the bit fields correctly. And cpu_to_le32 will convert bitfields
+ *	bit fields written by big endian driver to format required by device.
+ */
 
-	u32		len:14;
-	u32		gen:1;      /* generation bit */
-	u32		rsvd:1;
-	u32		dtype:1;    /* descriptor type */
-	u32		ext1:1;
-	u32		msscof:14;  /* MSS, checksum offset, flags */
-
-	u32		hlen:10;    /* header len */
-	u32		om:2;       /* offload mode */
-	u32		eop:1;      /* End Of Packet */
-	u32		cq:1;       /* completion request */
-	u32		ext2:1;
-	u32		ti:1;       /* VLAN Tag Insertion */
-	u32		tci:16;     /* Tag to Insert */
+struct Vmxnet3_TxDesc {
+	__le64 addr;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	u32 msscof:14;  /* MSS, checksum offset, flags */
+	u32 ext1:1;
+	u32 dtype:1;    /* descriptor type */
+	u32 rsvd:1;
+	u32 gen:1;      /* generation bit */
+	u32 len:14;
+#else
+	u32 len:14;
+	u32 gen:1;      /* generation bit */
+	u32 rsvd:1;
+	u32 dtype:1;    /* descriptor type */
+	u32 ext1:1;
+	u32 msscof:14;  /* MSS, checksum offset, flags */
+#endif  /* __BIG_ENDIAN_BITFIELD */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	u32 tci:16;     /* Tag to Insert */
+	u32 ti:1;       /* VLAN Tag Insertion */
+	u32 ext2:1;
+	u32 cq:1;       /* completion request */
+	u32 eop:1;      /* End Of Packet */
+	u32 om:2;       /* offload mode */
+	u32 hlen:10;    /* header len */
+#else
+	u32 hlen:10;    /* header len */
+	u32 om:2;       /* offload mode */
+	u32 eop:1;      /* End Of Packet */
+	u32 cq:1;       /* completion request */
+	u32 ext2:1;
+	u32 ti:1;       /* VLAN Tag Insertion */
+	u32 tci:16;     /* Tag to Insert */
+#endif  /* __BIG_ENDIAN_BITFIELD */
 };
 
 /* TxDesc.OM values */
@@ -118,6 +155,8 @@ struct Vmxnet3_TxDesc {
 #define VMXNET3_TXD_EOP_SHIFT	12
 #define VMXNET3_TXD_CQ_SHIFT	13
 #define VMXNET3_TXD_GEN_SHIFT	14
+#define VMXNET3_TXD_EOP_DWORD_SHIFT 3
+#define VMXNET3_TXD_GEN_DWORD_SHIFT 2
 
 #define VMXNET3_TXD_CQ		(1 << VMXNET3_TXD_CQ_SHIFT)
 #define VMXNET3_TXD_EOP		(1 << VMXNET3_TXD_EOP_SHIFT)
@@ -130,29 +169,40 @@ struct Vmxnet3_TxDataDesc {
 	u8		data[VMXNET3_HDR_COPY_SIZE];
 };
 
+#define VMXNET3_TCD_GEN_SHIFT	31
+#define VMXNET3_TCD_GEN_SIZE	1
+#define VMXNET3_TCD_TXIDX_SHIFT	0
+#define VMXNET3_TCD_TXIDX_SIZE	12
+#define VMXNET3_TCD_GEN_DWORD_SHIFT	3
 
 struct Vmxnet3_TxCompDesc {
 	u32		txdIdx:12;    /* Index of the EOP TxDesc */
 	u32		ext1:20;
 
-	u32		ext2;
-	u32		ext3;
+	__le32		ext2;
+	__le32		ext3;
 
 	u32		rsvd:24;
 	u32		type:7;       /* completion type */
 	u32		gen:1;        /* generation bit */
 };
 
-
 struct Vmxnet3_RxDesc {
-	u64		addr;
+	__le64		addr;
 
+#ifdef __BIG_ENDIAN_BITFIELD
+	u32		gen:1;        /* Generation bit */
+	u32		rsvd:15;
+	u32		dtype:1;      /* Descriptor type */
+	u32		btype:1;      /* Buffer Type */
+	u32		len:14;
+#else
 	u32		len:14;
 	u32		btype:1;      /* Buffer Type */
 	u32		dtype:1;      /* Descriptor type */
 	u32		rsvd:15;
 	u32		gen:1;        /* Generation bit */
-
+#endif
 	u32		ext1;
 };
 
@@ -164,8 +214,17 @@ struct Vmxnet3_RxDesc {
 #define VMXNET3_RXD_BTYPE_SHIFT  14
 #define VMXNET3_RXD_GEN_SHIFT    31
 
-
 struct Vmxnet3_RxCompDesc {
+#ifdef __BIG_ENDIAN_BITFIELD
+	u32		ext2:1;
+	u32		cnc:1;        /* Checksum Not Calculated */
+	u32		rssType:4;    /* RSS hash type used */
+	u32		rqID:10;      /* rx queue/ring ID */
+	u32		sop:1;        /* Start of Packet */
+	u32		eop:1;        /* End of Packet */
+	u32		ext1:2;
+	u32		rxdIdx:12;    /* Index of the RxDesc */
+#else
 	u32		rxdIdx:12;    /* Index of the RxDesc */
 	u32		ext1:2;
 	u32		eop:1;        /* End of Packet */
@@ -174,14 +233,36 @@ struct Vmxnet3_RxCompDesc {
 	u32		rssType:4;    /* RSS hash type used */
 	u32		cnc:1;        /* Checksum Not Calculated */
 	u32		ext2:1;
+#endif  /* __BIG_ENDIAN_BITFIELD */
 
-	u32		rssHash;      /* RSS hash value */
+	__le32		rssHash;      /* RSS hash value */
 
+#ifdef __BIG_ENDIAN_BITFIELD
+	u32		tci:16;       /* Tag stripped */
+	u32		ts:1;         /* Tag is stripped */
+	u32		err:1;        /* Error */
+	u32		len:14;       /* data length */
+#else
 	u32		len:14;       /* data length */
 	u32		err:1;        /* Error */
 	u32		ts:1;         /* Tag is stripped */
 	u32		tci:16;       /* Tag stripped */
+#endif  /* __BIG_ENDIAN_BITFIELD */
+
 
+#ifdef __BIG_ENDIAN_BITFIELD
+	u32		gen:1;        /* generation bit */
+	u32		type:7;       /* completion type */
+	u32		fcs:1;        /* Frame CRC correct */
+	u32		frg:1;        /* IP Fragment */
+	u32		v4:1;         /* IPv4 */
+	u32		v6:1;         /* IPv6 */
+	u32		ipc:1;        /* IP Checksum Correct */
+	u32		tcp:1;        /* TCP packet */
+	u32		udp:1;        /* UDP packet */
+	u32		tuc:1;        /* TCP/UDP Checksum Correct */
+	u32		csum:16;
+#else
 	u32		csum:16;
 	u32		tuc:1;        /* TCP/UDP Checksum Correct */
 	u32		udp:1;        /* UDP packet */
@@ -193,6 +274,7 @@ struct Vmxnet3_RxCompDesc {
 	u32		fcs:1;        /* Frame CRC correct */
 	u32		type:7;       /* completion type */
 	u32		gen:1;        /* generation bit */
+#endif  /* __BIG_ENDIAN_BITFIELD */
 };
 
 /* fields in RxCompDesc we access via Vmxnet3_GenericDesc.dword[3] */
@@ -206,6 +288,8 @@ struct Vmxnet3_RxCompDesc {
 /* csum OK for TCP/UDP pkts over IP */
 #define VMXNET3_RCD_CSUM_OK (1 << VMXNET3_RCD_TUC_SHIFT | \
 			     1 << VMXNET3_RCD_IPC_SHIFT)
+#define VMXNET3_TXD_GEN_SIZE 1
+#define VMXNET3_TXD_EOP_SIZE 1
 
 /* value of RxCompDesc.rssType */
 enum {
@@ -219,9 +303,9 @@ enum {
 
 /* a union for accessing all cmd/completion descriptors */
 union Vmxnet3_GenericDesc {
-	u64				qword[2];
-	u32				dword[4];
-	u16				word[8];
+	__le64				qword[2];
+	__le32				dword[4];
+	__le16				word[8];
 	struct Vmxnet3_TxDesc		txd;
 	struct Vmxnet3_RxDesc		rxd;
 	struct Vmxnet3_TxCompDesc	tcd;
@@ -287,18 +371,24 @@ enum {
 
 
 struct Vmxnet3_GOSInfo {
-	u32				gosBits:2;	/* 32-bit or 64-bit? */
-	u32				gosType:4;   /* which guest */
-	u32				gosVer:16;   /* gos version */
-	u32				gosMisc:10;  /* other info about gos */
+#ifdef __BIG_ENDIAN_BITFIELD
+	u32		gosMisc:10;    /* other info about gos */
+	u32		gosVer:16;     /* gos version */
+	u32		gosType:4;     /* which guest */
+	u32		gosBits:2;    /* 32-bit or 64-bit? */
+#else
+	u32		gosBits:2;     /* 32-bit or 64-bit? */
+	u32		gosType:4;     /* which guest */
+	u32		gosVer:16;     /* gos version */
+	u32		gosMisc:10;    /* other info about gos */
+#endif  /* __BIG_ENDIAN_BITFIELD */
 };
 
-
 struct Vmxnet3_DriverInfo {
-	u32				version;
+	__le32				version;
 	struct Vmxnet3_GOSInfo		gos;
-	u32				vmxnet3RevSpt;
-	u32				uptVerSpt;
+	__le32				vmxnet3RevSpt;
+	__le32				uptVerSpt;
 };
 
 
@@ -315,42 +405,42 @@ struct Vmxnet3_DriverInfo {
 
 struct Vmxnet3_MiscConf {
 	struct Vmxnet3_DriverInfo driverInfo;
-	u64		uptFeatures;
-	u64		ddPA;         /* driver data PA */
-	u64		queueDescPA;  /* queue descriptor table PA */
-	u32		ddLen;        /* driver data len */
-	u32		queueDescLen; /* queue desc. table len in bytes */
-	u32		mtu;
-	u16		maxNumRxSG;
+	__le64		uptFeatures;
+	__le64		ddPA;         /* driver data PA */
+	__le64		queueDescPA;  /* queue descriptor table PA */
+	__le32		ddLen;        /* driver data len */
+	__le32		queueDescLen; /* queue desc. table len in bytes */
+	__le32		mtu;
+	__le16		maxNumRxSG;
 	u8		numTxQueues;
 	u8		numRxQueues;
-	u32		reserved[4];
+	__le32		reserved[4];
 };
 
 
 struct Vmxnet3_TxQueueConf {
-	u64		txRingBasePA;
-	u64		dataRingBasePA;
-	u64		compRingBasePA;
-	u64		ddPA;         /* driver data */
-	u64		reserved;
-	u32		txRingSize;   /* # of tx desc */
-	u32		dataRingSize; /* # of data desc */
-	u32		compRingSize; /* # of comp desc */
-	u32		ddLen;        /* size of driver data */
+	__le64		txRingBasePA;
+	__le64		dataRingBasePA;
+	__le64		compRingBasePA;
+	__le64		ddPA;         /* driver data */
+	__le64		reserved;
+	__le32		txRingSize;   /* # of tx desc */
+	__le32		dataRingSize; /* # of data desc */
+	__le32		compRingSize; /* # of comp desc */
+	__le32		ddLen;        /* size of driver data */
 	u8		intrIdx;
 	u8		_pad[7];
 };
 
 
 struct Vmxnet3_RxQueueConf {
-	u64		rxRingBasePA[2];
-	u64		compRingBasePA;
-	u64		ddPA;            /* driver data */
-	u64		reserved;
-	u32		rxRingSize[2];   /* # of rx desc */
-	u32		compRingSize;    /* # of rx comp desc */
-	u32		ddLen;           /* size of driver data */
+	__le64		rxRingBasePA[2];
+	__le64		compRingBasePA;
+	__le64		ddPA;            /* driver data */
+	__le64		reserved;
+	__le32		rxRingSize[2];   /* # of rx desc */
+	__le32		compRingSize;    /* # of rx comp desc */
+	__le32		ddLen;           /* size of driver data */
 	u8		intrIdx;
 	u8		_pad[7];
 };
@@ -381,7 +471,7 @@ struct Vmxnet3_IntrConf {
 	u8		eventIntrIdx;
 	u8		modLevels[VMXNET3_MAX_INTRS];	/* moderation level for
 							 * each intr */
-	u32		reserved[3];
+	__le32		reserved[3];
 };
 
 /* one bit per VLAN ID, the size is in the units of u32	*/
@@ -391,21 +481,21 @@ struct Vmxnet3_IntrConf {
 struct Vmxnet3_QueueStatus {
 	bool		stopped;
 	u8		_pad[3];
-	u32		error;
+	__le32		error;
 };
 
 
 struct Vmxnet3_TxQueueCtrl {
-	u32		txNumDeferred;
-	u32		txThreshold;
-	u64		reserved;
+	__le32		txNumDeferred;
+	__le32		txThreshold;
+	__le64		reserved;
 };
 
 
 struct Vmxnet3_RxQueueCtrl {
 	bool		updateRxProd;
 	u8		_pad[7];
-	u64		reserved;
+	__le64		reserved;
 };
 
 enum {
@@ -417,11 +507,11 @@ enum {
 };
 
 struct Vmxnet3_RxFilterConf {
-	u32		rxMode;       /* VMXNET3_RXM_xxx */
-	u16		mfTableLen;   /* size of the multicast filter table */
-	u16		_pad1;
-	u64		mfTablePA;    /* PA of the multicast filters table */
-	u32		vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */
+	__le32		rxMode;       /* VMXNET3_RXM_xxx */
+	__le16		mfTableLen;   /* size of the multicast filter table */
+	__le16		_pad1;
+	__le64		mfTablePA;    /* PA of the multicast filters table */
+	__le32		vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */
 };
 
 
@@ -444,7 +534,7 @@ struct Vmxnet3_PM_PktFilter {
 
 
 struct Vmxnet3_PMConf {
-	u16		wakeUpEvents;  /* VMXNET3_PM_WAKEUP_xxx */
+	__le16		wakeUpEvents;  /* VMXNET3_PM_WAKEUP_xxx */
 	u8		numFilters;
 	u8		pad[5];
 	struct Vmxnet3_PM_PktFilter filters[VMXNET3_PM_MAX_FILTERS];
@@ -452,9 +542,9 @@ struct Vmxnet3_PMConf {
 
 
 struct Vmxnet3_VariableLenConfDesc {
-	u32		confVer;
-	u32		confLen;
-	u64		confPA;
+	__le32		confVer;
+	__le32		confLen;
+	__le64		confPA;
 };
 
 
@@ -491,12 +581,12 @@ struct Vmxnet3_DSDevRead {
 
 /* All structures in DriverShared are padded to multiples of 8 bytes */
 struct Vmxnet3_DriverShared {
-	u32				magic;
+	__le32				magic;
 	/* make devRead start at 64bit boundaries */
-	u32					pad;
-	struct Vmxnet3_DSDevRead		devRead;
-	u32					ecr;
-	u32					reserved[5];
+	__le32				pad;
+	struct Vmxnet3_DSDevRead	devRead;
+	__le32				ecr;
+	__le32				reserved[5];
 };
 
 
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 004353a46af0..1ceb9d0f8b97 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -24,12 +24,13 @@
  *
  */
 
+#include <net/ip6_checksum.h>
+
 #include "vmxnet3_int.h"
 
 char vmxnet3_driver_name[] = "vmxnet3";
 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
 
-
 /*
  * PCI Device ID Table
  * Last entry must be all 0s
@@ -151,11 +152,10 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter)
 	}
 }
 
-
 static void
 vmxnet3_process_events(struct vmxnet3_adapter *adapter)
 {
-	u32 events = adapter->shared->ecr;
+	u32 events = le32_to_cpu(adapter->shared->ecr);
 	if (!events)
 		return;
 
@@ -173,7 +173,7 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
 		if (adapter->tqd_start->status.stopped) {
 			printk(KERN_ERR "%s: tq error 0x%x\n",
 			       adapter->netdev->name,
-			       adapter->tqd_start->status.error);
+			       le32_to_cpu(adapter->tqd_start->status.error));
 		}
 		if (adapter->rqd_start->status.stopped) {
 			printk(KERN_ERR "%s: rq error 0x%x\n",
@@ -185,6 +185,106 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
 	}
 }
 
+#ifdef __BIG_ENDIAN_BITFIELD
+/*
+ * The device expects the bitfields in shared structures to be written in
+ * little endian. When CPU is big endian, the following routines are used to
+ * correctly read and write into ABI.
+ * The general technique used here is : double word bitfields are defined in
+ * opposite order for big endian architecture. Then before reading them in
+ * driver the complete double word is translated using le32_to_cpu. Similarly
+ * After the driver writes into bitfields, cpu_to_le32 is used to translate the
+ * double words into required format.
+ * In order to avoid touching bits in shared structure more than once, temporary
+ * descriptors are used. These are passed as srcDesc to following functions.
+ */
+static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
+				struct Vmxnet3_RxDesc *dstDesc)
+{
+	u32 *src = (u32 *)srcDesc + 2;
+	u32 *dst = (u32 *)dstDesc + 2;
+	dstDesc->addr = le64_to_cpu(srcDesc->addr);
+	*dst = le32_to_cpu(*src);
+	dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
+}
+
+static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
+			       struct Vmxnet3_TxDesc *dstDesc)
+{
+	int i;
+	u32 *src = (u32 *)(srcDesc + 1);
+	u32 *dst = (u32 *)(dstDesc + 1);
+
+	/* Working backwards so that the gen bit is set at the end. */
+	for (i = 2; i > 0; i--) {
+		src--;
+		dst--;
+		*dst = cpu_to_le32(*src);
+	}
+}
+
+
+static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
+				struct Vmxnet3_RxCompDesc *dstDesc)
+{
+	int i = 0;
+	u32 *src = (u32 *)srcDesc;
+	u32 *dst = (u32 *)dstDesc;
+	for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
+		*dst = le32_to_cpu(*src);
+		src++;
+		dst++;
+	}
+}
+
+
+/* Used to read bitfield values from double words. */
+static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
+{
+	u32 temp = le32_to_cpu(*bitfield);
+	u32 mask = ((1 << size) - 1) << pos;
+	temp &= mask;
+	temp >>= pos;
+	return temp;
+}
+
+
+
+#endif  /* __BIG_ENDIAN_BITFIELD */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+
+#   define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
+			txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
+			VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
+#   define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
+			txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
+			VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
+#   define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
+			VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
+			VMXNET3_TCD_GEN_SIZE)
+#   define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
+			VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
+#   define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
+			(dstrcd) = (tmp); \
+			vmxnet3_RxCompToCPU((rcd), (tmp)); \
+		} while (0)
+#   define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
+			(dstrxd) = (tmp); \
+			vmxnet3_RxDescToCPU((rxd), (tmp)); \
+		} while (0)
+
+#else
+
+#   define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
+#   define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
+#   define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
+#   define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
+#   define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
+#   define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
+
+#endif /* __BIG_ENDIAN_BITFIELD  */
+
 
 static void
 vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
@@ -212,7 +312,7 @@ vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
 
 	/* no out of order completion */
 	BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
-	BUG_ON(tq->tx_ring.base[eop_idx].txd.eop != 1);
+	BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
 
 	skb = tq->buf_info[eop_idx].skb;
 	BUG_ON(skb == NULL);
@@ -246,9 +346,10 @@ vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
 	union Vmxnet3_GenericDesc *gdesc;
 
 	gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
-	while (gdesc->tcd.gen == tq->comp_ring.gen) {
-		completed += vmxnet3_unmap_pkt(gdesc->tcd.txdIdx, tq,
-					       adapter->pdev, adapter);
+	while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
+		completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
+					       &gdesc->tcd), tq, adapter->pdev,
+					       adapter);
 
 		vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
 		gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
@@ -472,9 +573,9 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
 		}
 
 		BUG_ON(rbi->dma_addr == 0);
-		gd->rxd.addr = rbi->dma_addr;
-		gd->dword[2] = (ring->gen << VMXNET3_RXD_GEN_SHIFT) | val |
-				rbi->len;
+		gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
+		gd->dword[2] = cpu_to_le32((ring->gen << VMXNET3_RXD_GEN_SHIFT)
+					   | val | rbi->len);
 
 		num_allocated++;
 		vmxnet3_cmd_ring_adv_next2fill(ring);
@@ -531,10 +632,10 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
 
 	/* no need to map the buffer if headers are copied */
 	if (ctx->copy_size) {
-		ctx->sop_txd->txd.addr = tq->data_ring.basePA +
+		ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
 					tq->tx_ring.next2fill *
-					sizeof(struct Vmxnet3_TxDataDesc);
-		ctx->sop_txd->dword[2] = dw2 | ctx->copy_size;
+					sizeof(struct Vmxnet3_TxDataDesc));
+		ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
 		ctx->sop_txd->dword[3] = 0;
 
 		tbi = tq->buf_info + tq->tx_ring.next2fill;
@@ -542,7 +643,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
 
 		dev_dbg(&adapter->netdev->dev,
 			"txd[%u]: 0x%Lx 0x%x 0x%x\n",
-			tq->tx_ring.next2fill, ctx->sop_txd->txd.addr,
+			tq->tx_ring.next2fill,
+			le64_to_cpu(ctx->sop_txd->txd.addr),
 			ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
 		vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
 
@@ -570,14 +672,14 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
 		gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
 		BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
 
-		gdesc->txd.addr = tbi->dma_addr;
-		gdesc->dword[2] = dw2 | buf_size;
+		gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
+		gdesc->dword[2] = cpu_to_le32(dw2 | buf_size);
 		gdesc->dword[3] = 0;
 
 		dev_dbg(&adapter->netdev->dev,
 			"txd[%u]: 0x%Lx 0x%x 0x%x\n",
-			tq->tx_ring.next2fill, gdesc->txd.addr,
-			gdesc->dword[2], gdesc->dword[3]);
+			tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
+			le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
 		vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
 		dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
 
@@ -599,14 +701,14 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
 		gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
 		BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
 
-		gdesc->txd.addr = tbi->dma_addr;
-		gdesc->dword[2] = dw2 | frag->size;
+		gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
+		gdesc->dword[2] = cpu_to_le32(dw2 | frag->size);
 		gdesc->dword[3] = 0;
 
 		dev_dbg(&adapter->netdev->dev,
 			"txd[%u]: 0x%llu %u %u\n",
-			tq->tx_ring.next2fill, gdesc->txd.addr,
-			gdesc->dword[2], gdesc->dword[3]);
+			tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
+			le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
 		vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
 		dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
 	}
@@ -751,6 +853,10 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
 	unsigned long flags;
 	struct vmxnet3_tx_ctx ctx;
 	union Vmxnet3_GenericDesc *gdesc;
+#ifdef __BIG_ENDIAN_BITFIELD
+	/* Use temporary descriptor to avoid touching bits multiple times */
+	union Vmxnet3_GenericDesc tempTxDesc;
+#endif
 
 	/* conservatively estimate # of descriptors to use */
 	count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
@@ -827,16 +933,22 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
 	vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
 
 	/* setup the EOP desc */
-	ctx.eop_txd->dword[3] = VMXNET3_TXD_CQ | VMXNET3_TXD_EOP;
+	ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
 
 	/* setup the SOP desc */
+#ifdef __BIG_ENDIAN_BITFIELD
+	gdesc = &tempTxDesc;
+	gdesc->dword[2] = ctx.sop_txd->dword[2];
+	gdesc->dword[3] = ctx.sop_txd->dword[3];
+#else
 	gdesc = ctx.sop_txd;
+#endif
 	if (ctx.mss) {
 		gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
 		gdesc->txd.om = VMXNET3_OM_TSO;
 		gdesc->txd.msscof = ctx.mss;
-		tq->shared->txNumDeferred += (skb->len - gdesc->txd.hlen +
-					     ctx.mss - 1) / ctx.mss;
+		le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
+			     gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
 	} else {
 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
 			gdesc->txd.hlen = ctx.eth_ip_hdr_size;
@@ -847,7 +959,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
 			gdesc->txd.om = 0;
 			gdesc->txd.msscof = 0;
 		}
-		tq->shared->txNumDeferred++;
+		le32_add_cpu(&tq->shared->txNumDeferred, 1);
 	}
 
 	if (vlan_tx_tag_present(skb)) {
@@ -855,19 +967,27 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
 		gdesc->txd.tci = vlan_tx_tag_get(skb);
 	}
 
-	wmb();
-
-	/* finally flips the GEN bit of the SOP desc */
-	gdesc->dword[2] ^= VMXNET3_TXD_GEN;
+	/* finally flips the GEN bit of the SOP desc. */
+	gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
+						  VMXNET3_TXD_GEN);
+#ifdef __BIG_ENDIAN_BITFIELD
+	/* Finished updating in bitfields of Tx Desc, so write them in original
+	 * place.
+	 */
+	vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
+			   (struct Vmxnet3_TxDesc *)ctx.sop_txd);
+	gdesc = ctx.sop_txd;
+#endif
 	dev_dbg(&adapter->netdev->dev,
 		"txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
 		(u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
-		tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2],
-		gdesc->dword[3]);
+		tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
+		le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
 
 	spin_unlock_irqrestore(&tq->tx_lock, flags);
 
-	if (tq->shared->txNumDeferred >= tq->shared->txThreshold) {
+	if (le32_to_cpu(tq->shared->txNumDeferred) >=
+					le32_to_cpu(tq->shared->txThreshold)) {
 		tq->shared->txNumDeferred = 0;
 		VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD,
 				       tq->tx_ring.next2fill);
@@ -889,9 +1009,8 @@ static netdev_tx_t
 vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 {
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
-	struct vmxnet3_tx_queue *tq = &adapter->tx_queue;
 
-	return vmxnet3_tq_xmit(skb, tq, adapter, netdev);
+	return vmxnet3_tq_xmit(skb, &adapter->tx_queue, adapter, netdev);
 }
 
 
@@ -902,7 +1021,7 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
 {
 	if (!gdesc->rcd.cnc && adapter->rxcsum) {
 		/* typical case: TCP/UDP over IP and both csums are correct */
-		if ((gdesc->dword[3] & VMXNET3_RCD_CSUM_OK) ==
+		if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
 							VMXNET3_RCD_CSUM_OK) {
 			skb->ip_summed = CHECKSUM_UNNECESSARY;
 			BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
@@ -957,8 +1076,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
 	u32 num_rxd = 0;
 	struct Vmxnet3_RxCompDesc *rcd;
 	struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
-
-	rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd;
+#ifdef __BIG_ENDIAN_BITFIELD
+	struct Vmxnet3_RxDesc rxCmdDesc;
+	struct Vmxnet3_RxCompDesc rxComp;
+#endif
+	vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
+			  &rxComp);
 	while (rcd->gen == rq->comp_ring.gen) {
 		struct vmxnet3_rx_buf_info *rbi;
 		struct sk_buff *skb;
@@ -976,11 +1099,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
 
 		idx = rcd->rxdIdx;
 		ring_idx = rcd->rqID == rq->qid ? 0 : 1;
-
-		rxd = &rq->rx_ring[ring_idx].base[idx].rxd;
+		vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
+				  &rxCmdDesc);
 		rbi = rq->buf_info[ring_idx] + idx;
 
-		BUG_ON(rxd->addr != rbi->dma_addr || rxd->len != rbi->len);
+		BUG_ON(rxd->addr != rbi->dma_addr ||
+		       rxd->len != rbi->len);
 
 		if (unlikely(rcd->eop && rcd->err)) {
 			vmxnet3_rx_error(rq, rcd, ctx, adapter);
@@ -1078,7 +1202,8 @@ rcd_done:
 		}
 
 		vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
-		rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd;
+		vmxnet3_getRxComp(rcd,
+		     &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
 	}
 
 	return num_rxd;
@@ -1094,7 +1219,11 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
 
 	for (ring_idx = 0; ring_idx < 2; ring_idx++) {
 		for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
-			rxd = &rq->rx_ring[ring_idx].base[i].rxd;
+#ifdef __BIG_ENDIAN_BITFIELD
+			struct Vmxnet3_RxDesc rxDesc;
+#endif
+			vmxnet3_getRxDesc(rxd,
+				&rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
 
 			if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
 					rq->buf_info[ring_idx][i].skb) {
@@ -1346,12 +1475,12 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
 		err = request_irq(adapter->intr.msix_entries[0].vector,
 				  vmxnet3_intr, 0, adapter->netdev->name,
 				  adapter->netdev);
-	} else
-#endif
-	if (adapter->intr.type == VMXNET3_IT_MSI) {
+	} else if (adapter->intr.type == VMXNET3_IT_MSI) {
 		err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
 				  adapter->netdev->name, adapter->netdev);
-	} else {
+	} else
+#endif
+	{
 		err = request_irq(adapter->pdev->irq, vmxnet3_intr,
 				  IRQF_SHARED, adapter->netdev->name,
 				  adapter->netdev);
@@ -1412,6 +1541,22 @@ vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
 }
 
 
+inline void set_flag_le16(__le16 *data, u16 flag)
+{
+	*data = cpu_to_le16(le16_to_cpu(*data) | flag);
+}
+
+inline void set_flag_le64(__le64 *data, u64 flag)
+{
+	*data = cpu_to_le64(le64_to_cpu(*data) | flag);
+}
+
+inline void reset_flag_le64(__le64 *data, u64 flag)
+{
+	*data = cpu_to_le64(le64_to_cpu(*data) & ~flag);
+}
+
+
 static void
 vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
 {
@@ -1427,7 +1572,8 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
 			adapter->vlan_grp = grp;
 
 			/* update FEATURES to device */
-			devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
+			set_flag_le64(&devRead->misc.uptFeatures,
+				      UPT1_F_RXVLAN);
 			VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 					       VMXNET3_CMD_UPDATE_FEATURE);
 			/*
@@ -1450,7 +1596,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
 		struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
 		adapter->vlan_grp = NULL;
 
-		if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) {
+		if (le64_to_cpu(devRead->misc.uptFeatures) & UPT1_F_RXVLAN) {
 			int i;
 
 			for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
@@ -1463,7 +1609,8 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
 					       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
 
 			/* update FEATURES to device */
-			devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
+			reset_flag_le64(&devRead->misc.uptFeatures,
+					UPT1_F_RXVLAN);
 			VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 					       VMXNET3_CMD_UPDATE_FEATURE);
 		}
@@ -1565,9 +1712,10 @@ vmxnet3_set_mc(struct net_device *netdev)
 			new_table = vmxnet3_copy_mc(netdev);
 			if (new_table) {
 				new_mode |= VMXNET3_RXM_MCAST;
-				rxConf->mfTableLen = netdev->mc_count *
-						     ETH_ALEN;
-				rxConf->mfTablePA = virt_to_phys(new_table);
+				rxConf->mfTableLen = cpu_to_le16(
+						netdev->mc_count * ETH_ALEN);
+				rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
+						    new_table));
 			} else {
 				printk(KERN_INFO "%s: failed to copy mcast list"
 				       ", setting ALL_MULTI\n", netdev->name);
@@ -1582,7 +1730,7 @@ vmxnet3_set_mc(struct net_device *netdev)
 	}
 
 	if (new_mode != rxConf->rxMode) {
-		rxConf->rxMode = new_mode;
+		rxConf->rxMode = cpu_to_le32(new_mode);
 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 				       VMXNET3_CMD_UPDATE_RX_MODE);
 	}
@@ -1610,63 +1758,69 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
 	memset(shared, 0, sizeof(*shared));
 
 	/* driver settings */
-	shared->magic = VMXNET3_REV1_MAGIC;
-	devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
+	shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
+	devRead->misc.driverInfo.version = cpu_to_le32(
+						VMXNET3_DRIVER_VERSION_NUM);
 	devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
 				VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
 	devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
-	devRead->misc.driverInfo.vmxnet3RevSpt = 1;
-	devRead->misc.driverInfo.uptVerSpt = 1;
+	*((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
+				*((u32 *)&devRead->misc.driverInfo.gos));
+	devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
+	devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
 
-	devRead->misc.ddPA = virt_to_phys(adapter);
-	devRead->misc.ddLen = sizeof(struct vmxnet3_adapter);
+	devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter));
+	devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
 
 	/* set up feature flags */
 	if (adapter->rxcsum)
-		devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
+		set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXCSUM);
 
 	if (adapter->lro) {
-		devRead->misc.uptFeatures |= UPT1_F_LRO;
-		devRead->misc.maxNumRxSG = 1 + MAX_SKB_FRAGS;
+		set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_LRO);
+		devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
 	}
-	if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX)
-			&& adapter->vlan_grp) {
-		devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
+	if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) &&
+	    adapter->vlan_grp) {
+		set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXVLAN);
 	}
 
-	devRead->misc.mtu = adapter->netdev->mtu;
-	devRead->misc.queueDescPA = adapter->queue_desc_pa;
-	devRead->misc.queueDescLen = sizeof(struct Vmxnet3_TxQueueDesc) +
-				     sizeof(struct Vmxnet3_RxQueueDesc);
+	devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
+	devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
+	devRead->misc.queueDescLen = cpu_to_le32(
+				     sizeof(struct Vmxnet3_TxQueueDesc) +
+				     sizeof(struct Vmxnet3_RxQueueDesc));
 
 	/* tx queue settings */
 	BUG_ON(adapter->tx_queue.tx_ring.base == NULL);
 
 	devRead->misc.numTxQueues = 1;
 	tqc = &adapter->tqd_start->conf;
-	tqc->txRingBasePA   = adapter->tx_queue.tx_ring.basePA;
-	tqc->dataRingBasePA = adapter->tx_queue.data_ring.basePA;
-	tqc->compRingBasePA = adapter->tx_queue.comp_ring.basePA;
-	tqc->ddPA           = virt_to_phys(adapter->tx_queue.buf_info);
-	tqc->txRingSize     = adapter->tx_queue.tx_ring.size;
-	tqc->dataRingSize   = adapter->tx_queue.data_ring.size;
-	tqc->compRingSize   = adapter->tx_queue.comp_ring.size;
-	tqc->ddLen          = sizeof(struct vmxnet3_tx_buf_info) *
-			      tqc->txRingSize;
+	tqc->txRingBasePA   = cpu_to_le64(adapter->tx_queue.tx_ring.basePA);
+	tqc->dataRingBasePA = cpu_to_le64(adapter->tx_queue.data_ring.basePA);
+	tqc->compRingBasePA = cpu_to_le64(adapter->tx_queue.comp_ring.basePA);
+	tqc->ddPA           = cpu_to_le64(virt_to_phys(
+						adapter->tx_queue.buf_info));
+	tqc->txRingSize     = cpu_to_le32(adapter->tx_queue.tx_ring.size);
+	tqc->dataRingSize   = cpu_to_le32(adapter->tx_queue.data_ring.size);
+	tqc->compRingSize   = cpu_to_le32(adapter->tx_queue.comp_ring.size);
+	tqc->ddLen          = cpu_to_le32(sizeof(struct vmxnet3_tx_buf_info) *
+			      tqc->txRingSize);
 	tqc->intrIdx        = adapter->tx_queue.comp_ring.intr_idx;
 
 	/* rx queue settings */
 	devRead->misc.numRxQueues = 1;
 	rqc = &adapter->rqd_start->conf;
-	rqc->rxRingBasePA[0] = adapter->rx_queue.rx_ring[0].basePA;
-	rqc->rxRingBasePA[1] = adapter->rx_queue.rx_ring[1].basePA;
-	rqc->compRingBasePA  = adapter->rx_queue.comp_ring.basePA;
-	rqc->ddPA            = virt_to_phys(adapter->rx_queue.buf_info);
-	rqc->rxRingSize[0]   = adapter->rx_queue.rx_ring[0].size;
-	rqc->rxRingSize[1]   = adapter->rx_queue.rx_ring[1].size;
-	rqc->compRingSize    = adapter->rx_queue.comp_ring.size;
-	rqc->ddLen           = sizeof(struct vmxnet3_rx_buf_info) *
-			       (rqc->rxRingSize[0] + rqc->rxRingSize[1]);
+	rqc->rxRingBasePA[0] = cpu_to_le64(adapter->rx_queue.rx_ring[0].basePA);
+	rqc->rxRingBasePA[1] = cpu_to_le64(adapter->rx_queue.rx_ring[1].basePA);
+	rqc->compRingBasePA  = cpu_to_le64(adapter->rx_queue.comp_ring.basePA);
+	rqc->ddPA            = cpu_to_le64(virt_to_phys(
+						adapter->rx_queue.buf_info));
+	rqc->rxRingSize[0]   = cpu_to_le32(adapter->rx_queue.rx_ring[0].size);
+	rqc->rxRingSize[1]   = cpu_to_le32(adapter->rx_queue.rx_ring[1].size);
+	rqc->compRingSize    = cpu_to_le32(adapter->rx_queue.comp_ring.size);
+	rqc->ddLen           = cpu_to_le32(sizeof(struct vmxnet3_rx_buf_info) *
+			       (rqc->rxRingSize[0] + rqc->rxRingSize[1]));
 	rqc->intrIdx         = adapter->rx_queue.comp_ring.intr_idx;
 
 	/* intr settings */
@@ -1715,11 +1869,10 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
 
 	vmxnet3_setup_driver_shared(adapter);
 
-	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL,
-			       VMXNET3_GET_ADDR_LO(adapter->shared_pa));
-	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH,
-			       VMXNET3_GET_ADDR_HI(adapter->shared_pa));
-
+	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
+			       adapter->shared_pa));
+	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
+			       adapter->shared_pa));
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 			       VMXNET3_CMD_ACTIVATE_DEV);
 	ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
@@ -2425,7 +2578,7 @@ vmxnet3_suspend(struct device *device)
 		memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
 		pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
 
-		pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
+		set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
 		i++;
 	}
 
@@ -2467,19 +2620,21 @@ vmxnet3_suspend(struct device *device)
 		pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
 		in_dev_put(in_dev);
 
-		pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
+		set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
 		i++;
 	}
 
 skip_arp:
 	if (adapter->wol & WAKE_MAGIC)
-		pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
+		set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_MAGIC);
 
 	pmConf->numFilters = i;
 
-	adapter->shared->devRead.pmConfDesc.confVer = 1;
-	adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf);
-	adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf);
+	adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
+	adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
+								  *pmConf));
+	adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
+								 pmConf));
 
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 			       VMXNET3_CMD_UPDATE_PMCFG);
@@ -2510,9 +2665,11 @@ vmxnet3_resume(struct device *device)
 	pmConf = adapter->pm_conf;
 	memset(pmConf, 0, sizeof(*pmConf));
 
-	adapter->shared->devRead.pmConfDesc.confVer = 1;
-	adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf);
-	adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf);
+	adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
+	adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
+								  *pmConf));
+	adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le32(virt_to_phys(
+								 pmConf));
 
 	netif_device_attach(netdev);
 	pci_set_power_state(pdev, PCI_D0);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index c2c15e4cafc7..3935c4493fb7 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -50,11 +50,13 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
 		adapter->rxcsum = val;
 		if (netif_running(netdev)) {
 			if (val)
-				adapter->shared->devRead.misc.uptFeatures |=
-								UPT1_F_RXCSUM;
+				set_flag_le64(
+				&adapter->shared->devRead.misc.uptFeatures,
+				UPT1_F_RXCSUM);
 			else
-				adapter->shared->devRead.misc.uptFeatures &=
-								~UPT1_F_RXCSUM;
+				reset_flag_le64(
+				&adapter->shared->devRead.misc.uptFeatures,
+				UPT1_F_RXCSUM);
 
 			VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 					       VMXNET3_CMD_UPDATE_FEATURE);
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 3c0d70d58111..34f392f46fb1 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -27,16 +27,11 @@
 #ifndef _VMXNET3_INT_H
 #define _VMXNET3_INT_H
 
-#include <linux/types.h>
 #include <linux/ethtool.h>
 #include <linux/delay.h>
-#include <linux/device.h>
 #include <linux/netdevice.h>
 #include <linux/pci.h>
-#include <linux/ethtool.h>
 #include <linux/compiler.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/ioport.h>
@@ -335,14 +330,14 @@ struct vmxnet3_adapter {
 };
 
 #define VMXNET3_WRITE_BAR0_REG(adapter, reg, val)  \
-	writel((val), (adapter)->hw_addr0 + (reg))
+	writel(cpu_to_le32(val), (adapter)->hw_addr0 + (reg))
 #define VMXNET3_READ_BAR0_REG(adapter, reg)        \
-	readl((adapter)->hw_addr0 + (reg))
+	le32_to_cpu(readl((adapter)->hw_addr0 + (reg)))
 
 #define VMXNET3_WRITE_BAR1_REG(adapter, reg, val)  \
-	writel((val), (adapter)->hw_addr1 + (reg))
+	writel(cpu_to_le32(val), (adapter)->hw_addr1 + (reg))
 #define VMXNET3_READ_BAR1_REG(adapter, reg)        \
-	readl((adapter)->hw_addr1 + (reg))
+	le32_to_cpu(readl((adapter)->hw_addr1 + (reg)))
 
 #define VMXNET3_WAKE_QUEUE_THRESHOLD(tq)  (5)
 #define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \
@@ -358,6 +353,10 @@ struct vmxnet3_adapter {
 #define VMXNET3_MAX_ETH_HDR_SIZE    22
 #define VMXNET3_MAX_SKB_BUF_SIZE    (3*1024)
 
+void set_flag_le16(__le16 *data, u16 flag);
+void set_flag_le64(__le64 *data, u64 flag);
+void reset_flag_le64(__le64 *data, u64 flag);
+
 int
 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
 
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 9e94c4b0fb18..32a75fa935ed 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -356,10 +356,8 @@ __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
 
 	switch (host_type) {
 	case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
-		if (func_id == 0) {
-			access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
-					VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
-		}
+		access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
+				VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
 		break;
 	case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
 		access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
@@ -382,6 +380,22 @@ __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
 	return access_rights;
 }
 /*
+ * __vxge_hw_device_is_privilaged
+ * This routine checks if the device function is privilaged or not
+ */
+
+enum vxge_hw_status
+__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
+{
+	if (__vxge_hw_device_access_rights_get(host_type,
+		func_id) &
+		VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
+		return VXGE_HW_OK;
+	else
+		return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
+}
+
+/*
  * __vxge_hw_device_host_info_get
  * This routine returns the host type assignments
  */
@@ -446,220 +460,6 @@ __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
 	return VXGE_HW_OK;
 }
 
-enum vxge_hw_status
-__vxge_hw_device_is_privilaged(struct __vxge_hw_device *hldev)
-{
-	if ((hldev->host_type == VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION ||
-	hldev->host_type == VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION ||
-	hldev->host_type == VXGE_HW_NO_MR_SR_VH0_FUNCTION0) &&
-	(hldev->func_id == 0))
-		return VXGE_HW_OK;
-	else
-		return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
-}
-
-/*
- * vxge_hw_wrr_rebalance - Rebalance the RX_WRR and KDFC_WRR calandars.
- * Rebalance the RX_WRR and KDFC_WRR calandars.
- */
-static enum
-vxge_hw_status vxge_hw_wrr_rebalance(struct __vxge_hw_device *hldev)
-{
-	u64 val64;
-	u32 wrr_states[VXGE_HW_WEIGHTED_RR_SERVICE_STATES];
-	u32 i, j, how_often = 1;
-	enum vxge_hw_status status = VXGE_HW_OK;
-
-	status = __vxge_hw_device_is_privilaged(hldev);
-	if (status != VXGE_HW_OK)
-		goto exit;
-
-	/* Reset the priorities assigned to the WRR arbitration
-	phases for the receive traffic */
-	for (i = 0; i < VXGE_HW_WRR_RING_COUNT; i++)
-		writeq(0, ((&hldev->mrpcim_reg->rx_w_round_robin_0) + i));
-
-	/* Reset the transmit FIFO servicing calendar for FIFOs */
-	for (i = 0; i < VXGE_HW_WRR_FIFO_COUNT; i++) {
-		writeq(0, ((&hldev->mrpcim_reg->kdfc_w_round_robin_0) + i));
-		writeq(0, ((&hldev->mrpcim_reg->kdfc_w_round_robin_20) + i));
-	}
-
-	/* Assign WRR priority  0 for all FIFOs */
-	for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-		writeq(VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(0),
-				((&hldev->mrpcim_reg->kdfc_fifo_0_ctrl)  + i));
-
-		writeq(VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(0),
-			((&hldev->mrpcim_reg->kdfc_fifo_17_ctrl) + i));
-	}
-
-	/* Reset to service non-offload doorbells */
-	writeq(0, &hldev->mrpcim_reg->kdfc_entry_type_sel_0);
-	writeq(0, &hldev->mrpcim_reg->kdfc_entry_type_sel_1);
-
-	/* Set priority 0 to all receive queues */
-	writeq(0, &hldev->mrpcim_reg->rx_queue_priority_0);
-	writeq(0, &hldev->mrpcim_reg->rx_queue_priority_1);
-	writeq(0, &hldev->mrpcim_reg->rx_queue_priority_2);
-
-	/* Initialize all the slots as unused */
-	for (i = 0; i < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; i++)
-		wrr_states[i] = -1;
-
-	/* Prepare the Fifo service states */
-	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
-		if (!hldev->config.vp_config[i].min_bandwidth)
-			continue;
-
-		how_often = VXGE_HW_VPATH_BANDWIDTH_MAX /
-				hldev->config.vp_config[i].min_bandwidth;
-		if (how_often) {
-
-			for (j = 0; j < VXGE_HW_WRR_FIFO_SERVICE_STATES;) {
-				if (wrr_states[j] == -1) {
-					wrr_states[j] = i;
-					/* Make sure each fifo is serviced
-					 * atleast once */
-					if (i == j)
-						j += VXGE_HW_MAX_VIRTUAL_PATHS;
-					else
-						j += how_often;
-				} else
-					j++;
-			}
-		}
-	}
-
-	/* Fill the unused slots with 0 */
-	for (j = 0; j < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; j++) {
-		if (wrr_states[j] == -1)
-			wrr_states[j] = 0;
-	}
-
-	/* Assign WRR priority number for FIFOs */
-	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-		writeq(VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(i),
-				((&hldev->mrpcim_reg->kdfc_fifo_0_ctrl) + i));
-
-		writeq(VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(i),
-			((&hldev->mrpcim_reg->kdfc_fifo_17_ctrl) + i));
-	}
-
-	/* Modify the servicing algorithm applied to the 3 types of doorbells.
-	i.e, none-offload, message and offload */
-	writeq(VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_0(0) |
-				VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_1(0) |
-				VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_2(0) |
-				VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_3(0) |
-				VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_4(1) |
-				VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_5(0) |
-				VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_6(0) |
-				VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_7(0),
-				&hldev->mrpcim_reg->kdfc_entry_type_sel_0);
-
-	writeq(VXGE_HW_KDFC_ENTRY_TYPE_SEL_1_NUMBER_8(1),
-				&hldev->mrpcim_reg->kdfc_entry_type_sel_1);
-
-	for (i = 0, j = 0; i < VXGE_HW_WRR_FIFO_COUNT; i++) {
-
-		val64 = VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_0(wrr_states[j++]);
-		val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_1(wrr_states[j++]);
-		val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_2(wrr_states[j++]);
-		val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_3(wrr_states[j++]);
-		val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_4(wrr_states[j++]);
-		val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_5(wrr_states[j++]);
-		val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_6(wrr_states[j++]);
-		val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_7(wrr_states[j++]);
-
-		writeq(val64, (&hldev->mrpcim_reg->kdfc_w_round_robin_0 + i));
-		writeq(val64, (&hldev->mrpcim_reg->kdfc_w_round_robin_20 + i));
-	}
-
-	/* Set up the priorities assigned to receive queues */
-	writeq(VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_0(0) |
-			VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_1(1) |
-			VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_2(2) |
-			VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_3(3) |
-			VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_4(4) |
-			VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_5(5) |
-			VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_6(6) |
-			VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_7(7),
-			&hldev->mrpcim_reg->rx_queue_priority_0);
-
-	writeq(VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_8(8) |
-			VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_9(9) |
-			VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_10(10) |
-			VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_11(11) |
-			VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_12(12) |
-			VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_13(13) |
-			VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_14(14) |
-			VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_15(15),
-			&hldev->mrpcim_reg->rx_queue_priority_1);
-
-	writeq(VXGE_HW_RX_QUEUE_PRIORITY_2_RX_Q_NUMBER_16(16),
-				&hldev->mrpcim_reg->rx_queue_priority_2);
-
-	/* Initialize all the slots as unused */
-	for (i = 0; i < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; i++)
-		wrr_states[i] = -1;
-
-	/* Prepare the Ring service states */
-	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
-		if (!hldev->config.vp_config[i].min_bandwidth)
-			continue;
-
-		how_often = VXGE_HW_VPATH_BANDWIDTH_MAX /
-				hldev->config.vp_config[i].min_bandwidth;
-
-		if (how_often) {
-			for (j = 0; j < VXGE_HW_WRR_RING_SERVICE_STATES;) {
-				if (wrr_states[j] == -1) {
-					wrr_states[j] = i;
-					/* Make sure each ring is
-					 * serviced atleast once */
-					if (i == j)
-						j += VXGE_HW_MAX_VIRTUAL_PATHS;
-					else
-						j += how_often;
-				} else
-					j++;
-			}
-		}
-	}
-
-	/* Fill the unused slots with 0 */
-	for (j = 0; j < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; j++) {
-		if (wrr_states[j] == -1)
-			wrr_states[j] = 0;
-	}
-
-	for (i = 0, j = 0; i < VXGE_HW_WRR_RING_COUNT; i++) {
-		val64 =  VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_0(
-				wrr_states[j++]);
-		val64 |=  VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_1(
-				wrr_states[j++]);
-		val64 |=  VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_2(
-				wrr_states[j++]);
-		val64 |=  VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_3(
-				wrr_states[j++]);
-		val64 |=  VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_4(
-				wrr_states[j++]);
-		val64 |=  VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_5(
-				wrr_states[j++]);
-		val64 |=  VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_6(
-				wrr_states[j++]);
-		val64 |=  VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_7(
-				wrr_states[j++]);
-
-		writeq(val64, ((&hldev->mrpcim_reg->rx_w_round_robin_0) + i));
-	}
-exit:
-	return status;
-}
-
 /*
  * __vxge_hw_device_initialize
  * Initialize Titan-V hardware.
@@ -668,14 +468,14 @@ enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
 {
 	enum vxge_hw_status status = VXGE_HW_OK;
 
-	if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev)) {
+	if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
+				hldev->func_id)) {
 		/* Validate the pci-e link width and speed */
 		status = __vxge_hw_verify_pci_e_info(hldev);
 		if (status != VXGE_HW_OK)
 			goto exit;
 	}
 
-	vxge_hw_wrr_rebalance(hldev);
 exit:
 	return status;
 }
@@ -953,7 +753,8 @@ vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
 	u64 val64;
 	enum vxge_hw_status status = VXGE_HW_OK;
 
-	status = __vxge_hw_device_is_privilaged(hldev);
+	status = __vxge_hw_device_is_privilaged(hldev->host_type,
+			hldev->func_id);
 	if (status != VXGE_HW_OK)
 		goto exit;
 
@@ -990,7 +791,8 @@ vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
 
 	val64 = (u64 *)aggr_stats;
 
-	status = __vxge_hw_device_is_privilaged(hldev);
+	status = __vxge_hw_device_is_privilaged(hldev->host_type,
+			hldev->func_id);
 	if (status != VXGE_HW_OK)
 		goto exit;
 
@@ -1023,7 +825,8 @@ vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
 	u32 offset = 0x0;
 	val64 = (u64 *) port_stats;
 
-	status = __vxge_hw_device_is_privilaged(hldev);
+	status = __vxge_hw_device_is_privilaged(hldev->host_type,
+			hldev->func_id);
 	if (status != VXGE_HW_OK)
 		goto exit;
 
@@ -1221,7 +1024,8 @@ enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
 		goto exit;
 	}
 
-	status = __vxge_hw_device_is_privilaged(hldev);
+	status = __vxge_hw_device_is_privilaged(hldev->host_type,
+			hldev->func_id);
 	if (status != VXGE_HW_OK)
 		goto exit;
 
@@ -2353,6 +2157,28 @@ exit:
 }
 
 /*
+ * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
+ */
+enum vxge_hw_status
+vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
+{
+	struct vxge_hw_vpmgmt_reg       __iomem *vpmgmt_reg;
+	enum vxge_hw_status status = VXGE_HW_OK;
+	int i = 0, j = 0;
+
+	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+		if (!((vpath_mask) & vxge_mBIT(i)))
+			continue;
+		vpmgmt_reg = hldev->vpmgmt_reg[i];
+		for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
+			if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
+			& VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
+				return VXGE_HW_FAIL;
+		}
+	}
+	return status;
+}
+/*
  * vxge_hw_mgmt_reg_Write - Write Titan register.
  */
 enum vxge_hw_status
@@ -4056,6 +3882,30 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
 	return status;
 }
 
+void
+vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
+{
+	struct __vxge_hw_virtualpath *vpath;
+	struct vxge_hw_vpath_reg __iomem *vp_reg;
+	struct vxge_hw_vp_config *config;
+	u64 val64;
+
+	vpath = &hldev->virtual_paths[vp_id];
+	vp_reg = vpath->vp_reg;
+	config = vpath->vp_config;
+
+	if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
+		val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+
+		if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
+			config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
+			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
+			writeq(val64,
+			&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+		}
+	}
+	return;
+}
 /*
  * __vxge_hw_vpath_initialize
  * This routine is the final phase of init which initializes the
@@ -4098,8 +3948,6 @@ __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
 	if (status != VXGE_HW_OK)
 		goto exit;
 
-	writeq(0, &vp_reg->gendma_int);
-
 	val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
 
 	/* Get MRRS value from device control */
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 3e94f0ce0900..e7877df092f3 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -2201,6 +2201,8 @@ __vxge_hw_vpath_func_id_get(
 enum vxge_hw_status
 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
 
+enum vxge_hw_status
+vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
 /**
  * vxge_debug
  * @level: level of debug verbosity.
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 068d7a9d3e36..f1c4b2a1e867 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -2435,7 +2435,6 @@ static int vxge_add_isr(struct vxgedev *vdev)
 	int ret = 0;
 #ifdef CONFIG_PCI_MSI
 	int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
-	u64 function_mode = vdev->config.device_hw_info.function_mode;
 	int pci_fun = PCI_FUNC(vdev->pdev->devfn);
 
 	if (vdev->config.intr_type == MSI_X)
@@ -2444,20 +2443,9 @@ static int vxge_add_isr(struct vxgedev *vdev)
 	if (ret) {
 		vxge_debug_init(VXGE_ERR,
 			"%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME);
-		if ((function_mode == VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) &&
-			test_and_set_bit(__VXGE_STATE_CARD_UP,
-				&driver_config->inta_dev_open))
-			return VXGE_HW_FAIL;
-		else {
-			vxge_debug_init(VXGE_ERR,
-				"%s: Defaulting to INTA", VXGE_DRIVER_NAME);
-			vdev->config.intr_type = INTA;
-			vxge_hw_device_set_intr_type(vdev->devh,
-				VXGE_HW_INTR_MODE_IRQLINE);
-			vxge_close_vpaths(vdev, 1);
-			vdev->no_of_vpath = 1;
-			vdev->stats.vpaths_open = 1;
-		}
+		vxge_debug_init(VXGE_ERR,
+			"%s: Defaulting to INTA", VXGE_DRIVER_NAME);
+		vdev->config.intr_type = INTA;
 	}
 
 	if (vdev->config.intr_type == MSI_X) {
@@ -2505,24 +2493,11 @@ static int vxge_add_isr(struct vxgedev *vdev)
 					"%s: MSIX - %d  Registration failed",
 					vdev->ndev->name, intr_cnt);
 				vxge_rem_msix_isr(vdev);
-				if ((function_mode ==
-					VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) &&
-					test_and_set_bit(__VXGE_STATE_CARD_UP,
-						&driver_config->inta_dev_open))
-					return VXGE_HW_FAIL;
-				else {
-					vxge_hw_device_set_intr_type(
-						vdev->devh,
-						VXGE_HW_INTR_MODE_IRQLINE);
-						vdev->config.intr_type = INTA;
-					vxge_debug_init(VXGE_ERR,
-						"%s: Defaulting to INTA"
-						, vdev->ndev->name);
-					vxge_close_vpaths(vdev, 1);
-					vdev->no_of_vpath = 1;
-					vdev->stats.vpaths_open = 1;
+				vdev->config.intr_type = INTA;
+				vxge_debug_init(VXGE_ERR,
+					"%s: Defaulting to INTA"
+					, vdev->ndev->name);
 					goto INTA_MODE;
-				}
 			}
 
 			if (irq_req) {
@@ -2535,9 +2510,9 @@ static int vxge_add_isr(struct vxgedev *vdev)
 			}
 
 			/* Point to next vpath handler */
-			if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0)
-				&& (vp_idx < (vdev->no_of_vpath - 1)))
-					vp_idx++;
+			if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) &&
+			    (vp_idx < (vdev->no_of_vpath - 1)))
+				vp_idx++;
 		}
 
 		intr_cnt = vdev->max_vpath_supported * 2;
@@ -2555,23 +2530,11 @@ static int vxge_add_isr(struct vxgedev *vdev)
 				"%s: MSIX - %d Registration failed",
 				vdev->ndev->name, intr_cnt);
 			vxge_rem_msix_isr(vdev);
-			if ((function_mode ==
-				VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) &&
-				test_and_set_bit(__VXGE_STATE_CARD_UP,
-						&driver_config->inta_dev_open))
-				return VXGE_HW_FAIL;
-			else {
-				vxge_hw_device_set_intr_type(vdev->devh,
-						VXGE_HW_INTR_MODE_IRQLINE);
-				vdev->config.intr_type = INTA;
-				vxge_debug_init(VXGE_ERR,
-					"%s: Defaulting to INTA",
-					vdev->ndev->name);
-				vxge_close_vpaths(vdev, 1);
-				vdev->no_of_vpath = 1;
-				vdev->stats.vpaths_open = 1;
+			vdev->config.intr_type = INTA;
+			vxge_debug_init(VXGE_ERR,
+				"%s: Defaulting to INTA",
+				vdev->ndev->name);
 				goto INTA_MODE;
-			}
 		}
 
 		vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
@@ -2584,6 +2547,10 @@ INTA_MODE:
 	snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge", vdev->ndev->name);
 
 	if (vdev->config.intr_type == INTA) {
+		vxge_hw_device_set_intr_type(vdev->devh,
+			VXGE_HW_INTR_MODE_IRQLINE);
+		vxge_hw_vpath_tti_ci_set(vdev->devh,
+			vdev->vpaths[0].device_id);
 		ret = request_irq((int) vdev->pdev->irq,
 			vxge_isr_napi,
 			IRQF_SHARED, vdev->desc[0], vdev);
@@ -2688,13 +2655,6 @@ vxge_open(struct net_device *dev)
 	 * initialized */
 	netif_carrier_off(dev);
 
-	/* Check for another device already opn with INTA */
-	if ((function_mode == VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) &&
-		test_bit(__VXGE_STATE_CARD_UP, &driver_config->inta_dev_open)) {
-		ret = -EPERM;
-		goto out0;
-	}
-
 	/* Open VPATHs */
 	status = vxge_open_vpaths(vdev);
 	if (status != VXGE_HW_OK) {
@@ -2983,7 +2943,6 @@ int do_vxge_close(struct net_device *dev, int do_io)
 	vxge_debug_entryexit(VXGE_TRACE,
 		"%s: %s:%d  Exiting...", dev->name, __func__, __LINE__);
 
-	clear_bit(__VXGE_STATE_CARD_UP, &driver_config->inta_dev_open);
 	clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
 
 	return 0;
@@ -3653,11 +3612,12 @@ static int __devinit vxge_config_vpaths(
 		device_config->vp_config[i].fifo.enable =
 						VXGE_HW_FIFO_ENABLE;
 		device_config->vp_config[i].fifo.max_frags =
-				MAX_SKB_FRAGS;
+				MAX_SKB_FRAGS + 1;
 		device_config->vp_config[i].fifo.memblock_size =
 			VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
 
-		txdl_size = MAX_SKB_FRAGS * sizeof(struct vxge_hw_fifo_txd);
+		txdl_size = device_config->vp_config[i].fifo.max_frags *
+				sizeof(struct vxge_hw_fifo_txd);
 		txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
 
 		device_config->vp_config[i].fifo.fifo_blocks =
@@ -4088,9 +4048,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
 		driver_config->config_dev_cnt = 0;
 		driver_config->total_dev_cnt = 0;
 		driver_config->g_no_cpus = 0;
-		driver_config->vpath_per_dev = max_config_vpath;
 	}
 
+	driver_config->vpath_per_dev = max_config_vpath;
+
 	driver_config->total_dev_cnt++;
 	if (++driver_config->config_dev_cnt > max_config_dev) {
 		ret = 0;
@@ -4243,6 +4204,15 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
 			goto _exit3;
 	}
 
+	/* if FCS stripping is not disabled in MAC fail driver load */
+	if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) {
+		vxge_debug_init(VXGE_ERR,
+			"%s: FCS stripping is not disabled in MAC"
+			" failing driver load", VXGE_DRIVER_NAME);
+		ret = -EINVAL;
+		goto _exit4;
+	}
+
 	vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
 
 	/* set private device info */
@@ -4387,6 +4357,27 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
 	}
 
 	kfree(device_config);
+
+	/*
+	 * INTA is shared in multi-function mode. This is unlike the INTA
+	 * implementation in MR mode, where each VH has its own INTA message.
+	 * - INTA is masked (disabled) as long as at least one function sets
+	 * its TITAN_MASK_ALL_INT.ALARM bit.
+	 * - INTA is unmasked (enabled) when all enabled functions have cleared
+	 * their own TITAN_MASK_ALL_INT.ALARM bit.
+	 * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up.
+	 * Though this driver leaves the top level interrupts unmasked while
+	 * leaving the required module interrupt bits masked on exit, there
+	 * could be a rougue driver around that does not follow this procedure
+	 * resulting in a failure to generate interrupts. The following code is
+	 * present to prevent such a failure.
+	 */
+
+	if (ll_config.device_hw_info.function_mode ==
+		VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
+		if (vdev->config.intr_type == INTA)
+			vxge_hw_device_unmask_all(hldev);
+
 	vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d  Exiting...",
 		vdev->ndev->name, __func__, __LINE__);
 
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index 9c36b3a9a63d..7c83ba4be9d7 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -112,7 +112,6 @@ enum vxge_mac_addr_state {
 struct vxge_drv_config {
 	int config_dev_cnt;
 	int total_dev_cnt;
-	unsigned long inta_dev_open;
 	int g_no_cpus;
 	unsigned int vpath_per_dev;
 };
diff --git a/drivers/net/vxge/vxge-reg.h b/drivers/net/vxge/vxge-reg.h
index 9a3b823e08d4..9a0cf8eaa328 100644
--- a/drivers/net/vxge/vxge-reg.h
+++ b/drivers/net/vxge/vxge-reg.h
@@ -4326,10 +4326,6 @@ struct vxge_hw_vpath_reg {
 /*0x011e0*/	u64	umq_bwr_init_byte;
 #define VXGE_HW_UMQ_BWR_INIT_BYTE_COUNT(val) vxge_vBIT(val, 0, 32)
 /*0x011e8*/	u64	gendma_int;
-#define	VXGE_HW_GENDMA_INT_IMMED_ENABLE	vxge_mBIT(6)
-#define	VXGE_HW_GENDMA_INT_EVENT_ENABLE	vxge_mBIT(7)
-#define VXGE_HW_GENDMA_INT_NUMBER(val) vxge_vBIT(val, 9, 7)
-#define VXGE_HW_GENDMA_INT_BITMAP(val) vxge_vBIT(val, 16, 16)
 /*0x011f0*/	u64	umqdmq_ir_init_notify;
 #define	VXGE_HW_UMQDMQ_IR_INIT_NOTIFY_PULSE	vxge_mBIT(3)
 /*0x011f8*/	u64	dmq_init_notify;
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index fe3ae518c69c..2c012f4ce465 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -295,6 +295,8 @@ void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
 	u64 val64;
 	u32 val32;
 
+	vxge_hw_device_mask_all(hldev);
+
 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
 
 		if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
@@ -1232,7 +1234,7 @@ void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
 	vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
 
 	__vxge_hw_non_offload_db_post(fifo,
-		(u64)(size_t)txdl_priv->dma_addr,
+		(u64)txdl_priv->dma_addr,
 		txdl_priv->frags - 1,
 		fifo->no_snoop_bits);
 
@@ -1961,14 +1963,14 @@ enum vxge_hw_status __vxge_hw_vpath_alarm_process(
 			val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
 
 			if (((val64 &
-				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
-			    (!(val64 &
+			      VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
+			     (!(val64 &
 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
 			    ((val64 &
-				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
-				&& (!(val64 &
+			      VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
+			     (!(val64 &
 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
-			))) {
+				     ))) {
 				sw_stats->error_stats.network_sustained_fault++;
 
 				writeq(
@@ -1981,14 +1983,14 @@ enum vxge_hw_status __vxge_hw_vpath_alarm_process(
 			}
 
 			if (((val64 &
-				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
-			    (!(val64 &
+			      VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
+			     (!(val64 &
 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
 			    ((val64 &
-				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
-				&& (!(val64 &
+			      VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
+			     (!(val64 &
 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
-			))) {
+				     ))) {
 
 				sw_stats->error_stats.network_sustained_ok++;
 
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 461742b4442b..861c853e3e84 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -2389,6 +2389,8 @@ vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
 
 int
 vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
+void
+vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id);
 
 /* ========================== PRIVATE API ================================= */
 
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index 8fbce7552035..77c2a754b7b8 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -17,7 +17,7 @@
 
 #define VXGE_VERSION_MAJOR	"2"
 #define VXGE_VERSION_MINOR	"0"
-#define VXGE_VERSION_FIX	"5"
-#define VXGE_VERSION_BUILD	"18053"
+#define VXGE_VERSION_FIX	"6"
+#define VXGE_VERSION_BUILD	"18937"
 #define VXGE_VERSION_FOR	"k"
 #endif
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 8e25ca7080c7..b36bf96eb502 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -297,8 +297,8 @@ static ssize_t cosa_write(struct file *file,
 static unsigned int cosa_poll(struct file *file, poll_table *poll);
 static int cosa_open(struct inode *inode, struct file *file);
 static int cosa_release(struct inode *inode, struct file *file);
-static int cosa_chardev_ioctl(struct inode *inode, struct file *file,
-	unsigned int cmd, unsigned long arg);
+static long cosa_chardev_ioctl(struct file *file, unsigned int cmd,
+				unsigned long arg);
 #ifdef COSA_FASYNC_WORKING
 static int cosa_fasync(struct inode *inode, struct file *file, int on);
 #endif
@@ -309,7 +309,7 @@ static const struct file_operations cosa_fops = {
 	.read		= cosa_read,
 	.write		= cosa_write,
 	.poll		= cosa_poll,
-	.ioctl		= cosa_chardev_ioctl,
+	.unlocked_ioctl	= cosa_chardev_ioctl,
 	.open		= cosa_open,
 	.release	= cosa_release,
 #ifdef COSA_FASYNC_WORKING
@@ -1205,12 +1205,18 @@ static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 	return hdlc_ioctl(dev, ifr, cmd);
 }
 
-static int cosa_chardev_ioctl(struct inode *inode, struct file *file,
-	unsigned int cmd, unsigned long arg)
+static long cosa_chardev_ioctl(struct file *file, unsigned int cmd,
+							unsigned long arg)
 {
 	struct channel_data *channel = file->private_data;
-	struct cosa_data *cosa = channel->cosa;
-	return cosa_ioctl_common(cosa, channel, cmd, arg);
+	struct cosa_data *cosa;
+	long ret;
+
+	lock_kernel();
+	cosa = channel->cosa;
+	ret = cosa_ioctl_common(cosa, channel, cmd, arg);
+	unlock_kernel();
+	return ret;
 }
 
 
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 15d353f268b5..421d0715310e 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -77,7 +77,7 @@ static int dlci_header(struct sk_buff *skb, struct net_device *dev,
 	dlp = netdev_priv(dev);
 
 	hdr.control = FRAD_I_UI;
-	switch(type)
+	switch (type)
 	{
 		case ETH_P_IP:
 			hdr.IP_NLPID = FRAD_P_IP;
@@ -130,7 +130,7 @@ static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
 		dev->stats.rx_errors++;
 	}
 	else
-		switch(hdr->IP_NLPID)
+		switch (hdr->IP_NLPID)
 		{
 			case FRAD_P_PADDING:
 				if (hdr->NLPID != FRAD_P_SNAP)
@@ -208,7 +208,7 @@ static int dlci_config(struct net_device *dev, struct dlci_conf __user *conf, in
 
 	if (!get)
 	{
-		if(copy_from_user(&config, conf, sizeof(struct dlci_conf)))
+		if (copy_from_user(&config, conf, sizeof(struct dlci_conf)))
 			return -EFAULT;
 		if (config.flags & ~DLCI_VALID_FLAGS)
 			return(-EINVAL);
@@ -222,7 +222,7 @@ static int dlci_config(struct net_device *dev, struct dlci_conf __user *conf, in
 
 	if (get)
 	{
-		if(copy_to_user(conf, &dlp->config, sizeof(struct dlci_conf)))
+		if (copy_to_user(conf, &dlp->config, sizeof(struct dlci_conf)))
 			return -EFAULT;
 	}
 
@@ -238,7 +238,7 @@ static int dlci_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 
 	dlp = netdev_priv(dev);
 
-	switch(cmd)
+	switch (cmd)
 	{
 		case DLCI_GET_SLAVE:
 			if (!*(short *)(dev->dev_addr))
@@ -417,7 +417,7 @@ static int dlci_ioctl(unsigned int cmd, void __user *arg)
 	if (!capable(CAP_NET_ADMIN))
 		return(-EPERM);
 
-	if(copy_from_user(&add, arg, sizeof(struct dlci_add)))
+	if (copy_from_user(&add, arg, sizeof(struct dlci_add)))
 		return -EFAULT;
 
 	switch (cmd)
@@ -426,7 +426,7 @@ static int dlci_ioctl(unsigned int cmd, void __user *arg)
 			err = dlci_add(&add);
 
 			if (!err)
-				if(copy_to_user(arg, &add, sizeof(struct dlci_add)))
+				if (copy_to_user(arg, &add, sizeof(struct dlci_add)))
 					return -EFAULT;
 			break;
 
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 07d00b4cf48a..3f759daf3ca4 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -1128,7 +1128,7 @@ done:
         init_timer(&dpriv->timer);
         dpriv->timer.expires = jiffies + 10*HZ;
         dpriv->timer.data = (unsigned long)dev;
-        dpriv->timer.function = &dscc4_timer;
+	dpriv->timer.function = dscc4_timer;
         add_timer(&dpriv->timer);
 	netif_carrier_on(dev);
 
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index beda387f2fc7..9bc2e3649157 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -1346,8 +1346,8 @@ do_bottom_half_tx(struct fst_card_info *card)
 
 		dev = port_to_dev(port);
 		while (!(FST_RDB(card, txDescrRing[pi][port->txpos].bits) &
-			 DMA_OWN)
-		       && !(card->dmatx_in_progress)) {
+			 DMA_OWN) &&
+		       !(card->dmatx_in_progress)) {
 			/*
 			 * There doesn't seem to be a txdone event per-se
 			 * We seem to have to deduce it, by checking the DMA_OWN
@@ -1379,8 +1379,8 @@ do_bottom_half_tx(struct fst_card_info *card)
 				 */
 				FST_WRW(card, txDescrRing[pi][port->txpos].bcnt,
 					cnv_bcnt(skb->len));
-				if ((skb->len < FST_MIN_DMA_LEN)
-				    || (card->family == FST_FAMILY_TXP)) {
+				if ((skb->len < FST_MIN_DMA_LEN) ||
+				    (card->family == FST_FAMILY_TXP)) {
 					/* Enqueue the packet with normal io */
 					memcpy_toio(card->mem +
 						    BUF_OFFSET(txBuffer[pi]
@@ -2030,8 +2030,8 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 		/* Sanity check the parameters. We don't support partial writes
 		 * when going over the top
 		 */
-		if (wrthdr.size > FST_MEMSIZE || wrthdr.offset > FST_MEMSIZE
-		    || wrthdr.size + wrthdr.offset > FST_MEMSIZE) {
+		if (wrthdr.size > FST_MEMSIZE || wrthdr.offset > FST_MEMSIZE ||
+		    wrthdr.size + wrthdr.offset > FST_MEMSIZE) {
 			return -ENXIO;
 		}
 
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index cc07236ea734..9937bbab938d 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -57,7 +57,7 @@ static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
 {
 	struct hdlc_device *hdlc = dev_to_hdlc(dev);
 
-	if (dev_net(dev) != &init_net) {
+	if (!net_eq(dev_net(dev), &init_net)) {
 		kfree_skb(skb);
 		return 0;
 	}
@@ -102,7 +102,7 @@ static int hdlc_device_event(struct notifier_block *this, unsigned long event,
 	unsigned long flags;
 	int on;
 
-	if (dev_net(dev) != &init_net)
+	if (!net_eq(dev_net(dev), &init_net))
 		return NOTIFY_DONE;
 
 	if (!(dev->priv_flags & IFF_WAN_HDLC))
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 840cff72a0f1..a7d4fc1a03a2 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -1214,10 +1214,10 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
 		return 0;
 
 	case IF_PROTO_FR:
-		if(!capable(CAP_NET_ADMIN))
+		if (!capable(CAP_NET_ADMIN))
 			return -EPERM;
 
-		if(dev->flags & IFF_UP)
+		if (dev->flags & IFF_UP)
 			return -EBUSY;
 
 		if (copy_from_user(&new_settings, fr_s, size))
@@ -1263,7 +1263,7 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
 		if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
 			return -EINVAL;
 
-		if(!capable(CAP_NET_ADMIN))
+		if (!capable(CAP_NET_ADMIN))
 			return -EPERM;
 
 		if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc,
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index 15002c3d0d95..74164d29524c 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -218,7 +218,7 @@ static struct z8530_dev *sv11_init(int iobase, int irq)
 	/* We want a fast IRQ for this device. Actually we'd like an even faster
 	   IRQ ;) - This is one driver RtLinux is made for */
 
-	if (request_irq(irq, &z8530_interrupt, IRQF_DISABLED,
+	if (request_irq(irq, z8530_interrupt, IRQF_DISABLED,
 			"Hostess SV11", sv) < 0) {
 		printk(KERN_WARNING "hostess: IRQ %d already in use.\n", irq);
 		goto err_irq;
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 7ea71b33d2e9..2ebe935d1058 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1028,7 +1028,7 @@ static int lmc_open(struct net_device *dev)
     lmc_softreset (sc);
 
     /* Since we have to use PCI bus, this should work on x86,alpha,ppc */
-    if (request_irq (dev->irq, &lmc_interrupt, IRQF_SHARED, dev->name, dev)){
+    if (request_irq (dev->irq, lmc_interrupt, IRQF_SHARED, dev->name, dev)){
         printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq);
         lmc_trace(dev, "lmc_open irq failed out");
         return -EAGAIN;
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index 58c66819f39b..5394b51bdb2f 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -376,7 +376,7 @@ static int __init n2_run(unsigned long io, unsigned long irq,
 	}
 	card->io = io;
 
-	if (request_irq(irq, &sca_intr, 0, devname, card)) {
+	if (request_irq(irq, sca_intr, 0, devname, card)) {
 		printk(KERN_ERR "n2: could not allocate IRQ\n");
 		n2_destroy_card(card);
 		return(-EBUSY);
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index 79dabc557bd3..aec4d3955420 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -514,8 +514,8 @@ static int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb)
 				  RX_BD_ADDR(ch, chan->rx_first_bd));
 	while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) {
 		nchar = cpc_readw(&ptdescr->len);
-		if ((status & (DST_OVR | DST_CRC | DST_RBIT | DST_SHRT | DST_ABT))
-		    || (nchar > BD_DEF_LEN)) {
+		if ((status & (DST_OVR | DST_CRC | DST_RBIT | DST_SHRT | DST_ABT)) ||
+		    (nchar > BD_DEF_LEN)) {
 
 			if (nchar > BD_DEF_LEN)
 				status |= DST_RBIT;
@@ -1428,8 +1428,7 @@ static void falc_update_stats(pc300_t * card, int ch)
 
 		if (((conf->media == IF_IFACE_T1) &&
 		     (cpc_readb(falcbase + F_REG(FRS1, ch)) & FRS1_LLBAD) &&
-		     (!(cpc_readb(falcbase + F_REG(FRS1, ch)) & FRS1_PDEN)))
-		    ||
+		     (!(cpc_readb(falcbase + F_REG(FRS1, ch)) & FRS1_PDEN))) ||
 		    ((conf->media == IF_IFACE_E1) &&
 		     (cpc_readb(falcbase + F_REG(RSP, ch)) & RSP_LLBAD))) {
 			pfalc->prbs = 2;
@@ -2285,8 +2284,8 @@ static void falc_e1_intr(pc300_t * card, int ch)
 		if (gis & GIS_ISR1) {
 			isr1 = cpc_readb(falcbase + F_REG(FISR1, ch));
 			if (isr1 & FISR1_XMB) {
-				if ((pfalc->xmb_cause & 2)
-				    && pfalc->multiframe_mode) {
+				if ((pfalc->xmb_cause & 2) &&
+				    pfalc->multiframe_mode) {
 					if (cpc_readb (falcbase + F_REG(FRS0, ch)) & 
 									(FRS0_LOS | FRS0_AIS | FRS0_LFA)) {
 						cpc_writeb(falcbase + F_REG(XSP, ch),
@@ -2639,9 +2638,9 @@ static int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 						!(cpc_readb (scabase + M_REG(CTL, ch)) & CTL_DTR);
 					/* There is no DSR in HD64572 */
 				}
-				if (!arg
-				    || copy_to_user(arg, &pc300status, sizeof(pc300status_t)))
-						return -EINVAL;
+				if (!arg ||
+				    copy_to_user(arg, &pc300status, sizeof(pc300status_t)))
+					return -EINVAL;
 				return 0;
 			}
 
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 1cc24a45f003..25477b5cde47 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -195,9 +195,9 @@ static unsigned int  netcard_portlist[ ] __initdata = {
 static inline int __init
 sbni_isa_probe( struct net_device  *dev )
 {
-	if( dev->base_addr > 0x1ff
-	    &&  request_region( dev->base_addr, SBNI_IO_EXTENT, dev->name )
-	    &&  sbni_probe1( dev, dev->base_addr, dev->irq ) )
+	if( dev->base_addr > 0x1ff &&
+	    request_region( dev->base_addr, SBNI_IO_EXTENT, dev->name ) &&
+	    sbni_probe1( dev, dev->base_addr, dev->irq ) )
 
 		return  0;
 	else {
@@ -286,8 +286,8 @@ static int __init sbni_init(struct net_device *dev)
 
 	for( i = 0;  netcard_portlist[ i ];  ++i ) {
 		int  ioaddr = netcard_portlist[ i ];
-		if( request_region( ioaddr, SBNI_IO_EXTENT, dev->name )
-		    &&  sbni_probe1( dev, ioaddr, 0 ))
+		if( request_region( ioaddr, SBNI_IO_EXTENT, dev->name ) &&
+		    sbni_probe1( dev, ioaddr, 0 ))
 			return 0;
 	}
 
@@ -306,9 +306,9 @@ sbni_pci_probe( struct net_device  *dev )
 		unsigned long  pci_ioaddr;
 		u16  subsys;
 
-		if( pdev->vendor != SBNI_PCI_VENDOR
-		    &&  pdev->device != SBNI_PCI_DEVICE )
-				continue;
+		if( pdev->vendor != SBNI_PCI_VENDOR &&
+		    pdev->device != SBNI_PCI_DEVICE )
+			continue;
 
 		pci_ioaddr = pci_resource_start( pdev, 0 );
 		pci_irq_line = pdev->irq;
@@ -977,8 +977,8 @@ check_fhdr( u32  ioaddr,  u32  *framelen,  u32  *frameno,  u32  *ack,
 	*ack = *framelen & FRAME_ACK_MASK;
 	*is_first = (*framelen & FRAME_FIRST) != 0;
 
-	if( (*framelen &= FRAME_LEN_MASK) < 6
-	    ||  *framelen > SBNI_MAX_FRAME - 3 )
+	if( (*framelen &= FRAME_LEN_MASK) < 6 ||
+	    *framelen > SBNI_MAX_FRAME - 3 )
 		return  0;
 
 	value = inb( ioaddr + DAT );
@@ -1173,10 +1173,10 @@ sbni_open( struct net_device  *dev )
 	if( dev->base_addr < 0x400 ) {		/* ISA only */
 		struct net_device  **p = sbni_cards;
 		for( ;  *p  &&  p < sbni_cards + SBNI_MAX_NUM_CARDS;  ++p )
-			if( (*p)->irq == dev->irq
-			    &&  ((*p)->base_addr == dev->base_addr + 4
-				 ||  (*p)->base_addr == dev->base_addr - 4)
-			    &&  (*p)->flags & IFF_UP ) {
+			if( (*p)->irq == dev->irq &&
+			    ((*p)->base_addr == dev->base_addr + 4 ||
+			     (*p)->base_addr == dev->base_addr - 4) &&
+			    (*p)->flags & IFF_UP ) {
 
 				((struct net_local *) (netdev_priv(*p)))
 					->second = dev;
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 2b15a7e40d5b..31c41af2246d 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -1457,7 +1457,7 @@ got_type:
 	}
 
 	err = -EAGAIN;
-	if (request_irq(dev->irq, &sdla_isr, 0, dev->name, dev)) 
+	if (request_irq(dev->irq, sdla_isr, 0, dev->name, dev)) 
 		goto fail;
 
 	if (flp->type == SDLA_S507) {
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 0c525e24b247..61249f489e37 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -84,8 +84,7 @@ static int sealevel_open(struct net_device *d)
 	 *	Link layer up.
 	 */
 
-	switch (unit)
-	{
+	switch (unit) {
 		case 0:
 			err = z8530_sync_dma_open(d, slvl->chan);
 			break;
@@ -133,8 +132,7 @@ static int sealevel_close(struct net_device *d)
 	hdlc_close(d);
 	netif_stop_queue(d);
 
-	switch (unit)
-	{
+	switch (unit) {
 		case 0:
 			z8530_sync_dma_close(d, slvl->chan);
 			break;
@@ -266,7 +264,7 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
 	/* We want a fast IRQ for this device. Actually we'd like an even faster
 	   IRQ ;) - This is one driver RtLinux is made for */
 
-	if (request_irq(irq, &z8530_interrupt, IRQF_DISABLED,
+	if (request_irq(irq, z8530_interrupt, IRQF_DISABLED,
 			"SeaLevel", dev) < 0) {
 		printk(KERN_WARNING "sealevel: IRQ %d already in use.\n", irq);
 		goto err_request_irq;
@@ -342,8 +340,7 @@ static void __exit slvl_shutdown(struct slvl_board *b)
 
 	z8530_shutdown(&b->board);
 
-	for (u = 0; u < 2; u++)
-	{
+	for (u = 0; u < 2; u++) {
 		struct net_device *d = b->dev[u].chan->netdevice;
 		unregister_hdlc_device(d);
 		free_netdev(d);
@@ -391,7 +388,7 @@ static int __init slvl_init_module(void)
 
 static void __exit slvl_cleanup_module(void)
 {
-	if(slvl_unit)
+	if (slvl_unit)
 		slvl_shutdown(slvl_unit);
 }
 
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 27945049c9e1..b9f520b7db6a 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -33,6 +33,7 @@
 #include <linux/lapb.h>
 #include <linux/init.h>
 #include <linux/rtnetlink.h>
+#include <linux/compat.h>
 #include "x25_asy.h"
 
 #include <net/x25device.h>
@@ -656,8 +657,8 @@ static void x25_asy_unesc(struct x25_asy *sl, unsigned char s)
 
 	switch (s) {
 	case X25_END:
-		if (!test_and_clear_bit(SLF_ERROR, &sl->flags)
-			&& sl->rcount > 2)
+		if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
+		    sl->rcount > 2)
 			x25_asy_bump(sl);
 		clear_bit(SLF_ESCAPE, &sl->flags);
 		sl->rcount = 0;
@@ -705,6 +706,21 @@ static int x25_asy_ioctl(struct tty_struct *tty, struct file *file,
 	}
 }
 
+#ifdef CONFIG_COMPAT
+static long x25_asy_compat_ioctl(struct tty_struct *tty, struct file *file,
+			 unsigned int cmd,  unsigned long arg)
+{
+	switch (cmd) {
+	case SIOCGIFNAME:
+	case SIOCSIFHWADDR:
+		return x25_asy_ioctl(tty, file, cmd,
+				     (unsigned long)compat_ptr(arg));
+	}
+
+	return -ENOIOCTLCMD;
+}
+#endif
+
 static int x25_asy_open_dev(struct net_device *dev)
 {
 	struct x25_asy *sl = netdev_priv(dev);
@@ -754,6 +770,9 @@ static struct tty_ldisc_ops x25_ldisc = {
 	.open		= x25_asy_open_tty,
 	.close		= x25_asy_close_tty,
 	.ioctl		= x25_asy_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= x25_asy_compat_ioctl,
+#endif
 	.receive_buf	= x25_asy_receive_buf,
 	.write_wakeup	= x25_asy_write_wakeup,
 };
diff --git a/drivers/net/wimax/i2400m/Kconfig b/drivers/net/wimax/i2400m/Kconfig
index d623b3d99a4b..3f703384295e 100644
--- a/drivers/net/wimax/i2400m/Kconfig
+++ b/drivers/net/wimax/i2400m/Kconfig
@@ -31,6 +31,14 @@ config WIMAX_I2400M_SDIO
 
 	  If unsure, it is safe to select M (module).
 
+config WIMAX_IWMC3200_SDIO
+	bool "Intel Wireless Multicom WiMAX Connection 3200 over SDIO"
+	depends on WIMAX_I2400M_SDIO
+	select IWMC3200TOP
+	help
+	  Select if you have a device based on the Intel Multicom WiMAX
+          Connection 3200 over SDIO.
+
 config WIMAX_I2400M_DEBUG_LEVEL
 	int "WiMAX i2400m debug level"
 	depends on WIMAX_I2400M
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 07308686dbcf..944945540391 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -54,7 +54,7 @@
  *   i2400m_set_init_config()
  *   i2400m_cmd_get_state()
  * i2400m_dev_shutdown()        Called by i2400m_dev_stop()
- *   i2400m->bus_reset()
+ *   i2400m_reset()
  *
  * i2400m_{cmd,get,set}_*()
  *   i2400m_msg_to_dev()
@@ -82,6 +82,13 @@
 #define D_SUBMODULE control
 #include "debug-levels.h"
 
+int i2400m_passive_mode;	/* 0 (passive mode disabled) by default */
+module_param_named(passive_mode, i2400m_passive_mode, int, 0644);
+MODULE_PARM_DESC(passive_mode,
+		 "If true, the driver will not do any device setup "
+		 "and leave it up to user space, who must be properly "
+		 "setup.");
+
 
 /*
  * Return if a TLV is of a give type and size
@@ -263,7 +270,7 @@ int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *l3l4_hdr,
 
 	if (status == 0)
 		return 0;
-	if (status > ARRAY_SIZE(ms_to_errno)) {
+	if (status >= ARRAY_SIZE(ms_to_errno)) {
 		str = "unknown status code";
 		result = -EBADR;
 	} else {
@@ -336,7 +343,7 @@ void i2400m_report_tlv_system_state(struct i2400m *i2400m,
 		/* Huh? just in case, shut it down */
 		dev_err(dev, "HW BUG? unknown state %u: shutting down\n",
 			i2400m_state);
-		i2400m->bus_reset(i2400m, I2400M_RT_WARM);
+		i2400m_reset(i2400m, I2400M_RT_WARM);
 		break;
 	};
 	d_fnend(3, dev, "(i2400m %p ss %p [%u]) = void\n",
@@ -1335,6 +1342,8 @@ int i2400m_dev_initialize(struct i2400m *i2400m)
 	unsigned argc = 0;
 
 	d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+	if (i2400m_passive_mode)
+		goto out_passive;
 	/* Disable idle mode? (enabled by default) */
 	if (i2400m_idle_mode_disabled) {
 		if (i2400m_le_v1_3(i2400m)) {
@@ -1377,6 +1386,7 @@ int i2400m_dev_initialize(struct i2400m *i2400m)
 	result = i2400m_set_init_config(i2400m, args, argc);
 	if (result < 0)
 		goto error;
+out_passive:
 	/*
 	 * Update state: Here it just calls a get state; parsing the
 	 * result (System State TLV and RF Status TLV [done in the rx
diff --git a/drivers/net/wimax/i2400m/debugfs.c b/drivers/net/wimax/i2400m/debugfs.c
index 9b81af3f80a9..b1aec3e1892f 100644
--- a/drivers/net/wimax/i2400m/debugfs.c
+++ b/drivers/net/wimax/i2400m/debugfs.c
@@ -214,7 +214,7 @@ int debugfs_i2400m_reset_set(void *data, u64 val)
 	case I2400M_RT_WARM:
 	case I2400M_RT_COLD:
 	case I2400M_RT_BUS:
-		result = i2400m->bus_reset(i2400m, rt);
+		result = i2400m_reset(i2400m, rt);
 		if (result >= 0)
 			result = 0;
 	default:
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 304f0443ca4b..96a615fe09de 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -41,8 +41,10 @@
  *     __i2400m_dev_start()
  *
  * i2400m_setup()
+ *   i2400m->bus_setup()
  *   i2400m_bootrom_init()
  *   register_netdev()
+ *   wimax_dev_add()
  *   i2400m_dev_start()
  *     __i2400m_dev_start()
  *       i2400m_dev_bootstrap()
@@ -50,15 +52,15 @@
  *       i2400m->bus_dev_start()
  *       i2400m_firmware_check()
  *       i2400m_check_mac_addr()
- *   wimax_dev_add()
  *
  * i2400m_release()
- *   wimax_dev_rm()
  *   i2400m_dev_stop()
  *     __i2400m_dev_stop()
  *       i2400m_dev_shutdown()
  *       i2400m->bus_dev_stop()
  *       i2400m_tx_release()
+ *   i2400m->bus_release()
+ *   wimax_dev_rm()
  *   unregister_netdev()
  */
 #include "i2400m.h"
@@ -66,6 +68,7 @@
 #include <linux/wimax/i2400m.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
+#include <linux/suspend.h>
 
 #define D_SUBMODULE driver
 #include "debug-levels.h"
@@ -90,76 +93,39 @@ MODULE_PARM_DESC(power_save_disabled,
 		 "False by default (so the device is told to do power "
 		 "saving).");
 
-/**
- * i2400m_queue_work - schedule work on a i2400m's queue
- *
- * @i2400m: device descriptor
- *
- * @fn: function to run to execute work. It gets passed a 'struct
- *     work_struct' that is wrapped in a 'struct i2400m_work'. Once
- *     done, you have to (1) i2400m_put(i2400m_work->i2400m) and then
- *     (2) kfree(i2400m_work).
- *
- * @gfp_flags: GFP flags for memory allocation.
- *
- * @pl: pointer to a payload buffer that you want to pass to the _work
- *     function. Use this to pack (for example) a struct with extra
- *     arguments.
- *
- * @pl_size: size of the payload buffer.
- *
- * We do this quite often, so this just saves typing; allocate a
- * wrapper for a i2400m, get a ref to it, pack arguments and launch
- * the work.
- *
- * A usual workflow is:
- *
- * struct my_work_args {
- *         void *something;
- *         int whatever;
- * };
- * ...
- *
- * struct my_work_args my_args = {
- *         .something = FOO,
- *         .whaetever = BLAH
- * };
- * i2400m_queue_work(i2400m, 1, my_work_function, GFP_KERNEL,
- *                   &args, sizeof(args))
- *
- * And now the work function can unpack the arguments and call the
- * real function (or do the job itself):
- *
- * static
- * void my_work_fn((struct work_struct *ws)
- * {
- *         struct i2400m_work *iw =
- *	           container_of(ws, struct i2400m_work, ws);
- *	   struct my_work_args *my_args = (void *) iw->pl;
- *
- *	   my_work(iw->i2400m, my_args->something, my_args->whatevert);
- * }
- */
-int i2400m_queue_work(struct i2400m *i2400m,
-		      void (*fn)(struct work_struct *), gfp_t gfp_flags,
-		      const void *pl, size_t pl_size)
+static char i2400m_debug_params[128];
+module_param_string(debug, i2400m_debug_params, sizeof(i2400m_debug_params),
+		    0644);
+MODULE_PARM_DESC(debug,
+		 "String of space-separated NAME:VALUE pairs, where NAMEs "
+		 "are the different debug submodules and VALUE are the "
+		 "initial debug value to set.");
+
+static char i2400m_barkers_params[128];
+module_param_string(barkers, i2400m_barkers_params,
+		    sizeof(i2400m_barkers_params), 0644);
+MODULE_PARM_DESC(barkers,
+		 "String of comma-separated 32-bit values; each is "
+		 "recognized as the value the device sends as a reboot "
+		 "signal; values are appended to a list--setting one value "
+		 "as zero cleans the existing list and starts a new one.");
+
+static
+struct i2400m_work *__i2400m_work_setup(
+	struct i2400m *i2400m, void (*fn)(struct work_struct *),
+	gfp_t gfp_flags, const void *pl, size_t pl_size)
 {
-	int result;
 	struct i2400m_work *iw;
 
-	BUG_ON(i2400m->work_queue == NULL);
-	result = -ENOMEM;
 	iw = kzalloc(sizeof(*iw) + pl_size, gfp_flags);
 	if (iw == NULL)
-		goto error_kzalloc;
+		return NULL;
 	iw->i2400m = i2400m_get(i2400m);
+	iw->pl_size = pl_size;
 	memcpy(iw->pl, pl, pl_size);
 	INIT_WORK(&iw->ws, fn);
-	result = queue_work(i2400m->work_queue, &iw->ws);
-error_kzalloc:
-	return result;
+	return iw;
 }
-EXPORT_SYMBOL_GPL(i2400m_queue_work);
 
 
 /*
@@ -175,21 +141,19 @@ EXPORT_SYMBOL_GPL(i2400m_queue_work);
  * it should not happen.
  */
 int i2400m_schedule_work(struct i2400m *i2400m,
-			 void (*fn)(struct work_struct *), gfp_t gfp_flags)
+			 void (*fn)(struct work_struct *), gfp_t gfp_flags,
+			 const void *pl, size_t pl_size)
 {
 	int result;
 	struct i2400m_work *iw;
 
 	result = -ENOMEM;
-	iw = kzalloc(sizeof(*iw), gfp_flags);
-	if (iw == NULL)
-		goto error_kzalloc;
-	iw->i2400m = i2400m_get(i2400m);
-	INIT_WORK(&iw->ws, fn);
-	result = schedule_work(&iw->ws);
-	if (result == 0)
-		result = -ENXIO;
-error_kzalloc:
+	iw = __i2400m_work_setup(i2400m, fn, gfp_flags, pl, pl_size);
+	if (iw != NULL) {
+		result = schedule_work(&iw->ws);
+		if (WARN_ON(result == 0))
+			result = -ENXIO;
+	}
 	return result;
 }
 
@@ -291,7 +255,7 @@ int i2400m_op_reset(struct wimax_dev *wimax_dev)
 	mutex_lock(&i2400m->init_mutex);
 	i2400m->reset_ctx = &ctx;
 	mutex_unlock(&i2400m->init_mutex);
-	result = i2400m->bus_reset(i2400m, I2400M_RT_WARM);
+	result = i2400m_reset(i2400m, I2400M_RT_WARM);
 	if (result < 0)
 		goto out;
 	result = wait_for_completion_timeout(&ctx.completion, 4*HZ);
@@ -420,9 +384,15 @@ retry:
 		dev_err(dev, "cannot create workqueue\n");
 		goto error_create_workqueue;
 	}
-	result = i2400m->bus_dev_start(i2400m);
-	if (result < 0)
-		goto error_bus_dev_start;
+	if (i2400m->bus_dev_start) {
+		result = i2400m->bus_dev_start(i2400m);
+		if (result < 0)
+			goto error_bus_dev_start;
+	}
+	i2400m->ready = 1;
+	wmb();		/* see i2400m->ready's documentation  */
+	/* process pending reports from the device */
+	queue_work(i2400m->work_queue, &i2400m->rx_report_ws);
 	result = i2400m_firmware_check(i2400m);	/* fw versions ok? */
 	if (result < 0)
 		goto error_fw_check;
@@ -430,8 +400,6 @@ retry:
 	result = i2400m_check_mac_addr(i2400m);
 	if (result < 0)
 		goto error_check_mac_addr;
-	i2400m->ready = 1;
-	wimax_state_change(wimax_dev, WIMAX_ST_UNINITIALIZED);
 	result = i2400m_dev_initialize(i2400m);
 	if (result < 0)
 		goto error_dev_initialize;
@@ -443,8 +411,12 @@ retry:
 
 error_dev_initialize:
 error_check_mac_addr:
+	i2400m->ready = 0;
+	wmb();		/* see i2400m->ready's documentation  */
+	flush_workqueue(i2400m->work_queue);
 error_fw_check:
-	i2400m->bus_dev_stop(i2400m);
+	if (i2400m->bus_dev_stop)
+		i2400m->bus_dev_stop(i2400m);
 error_bus_dev_start:
 	destroy_workqueue(i2400m->work_queue);
 error_create_workqueue:
@@ -466,11 +438,15 @@ error_bootstrap:
 static
 int i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri bm_flags)
 {
-	int result;
+	int result = 0;
 	mutex_lock(&i2400m->init_mutex);	/* Well, start the device */
-	result = __i2400m_dev_start(i2400m, bm_flags);
-	if (result >= 0)
-		i2400m->updown = 1;
+	if (i2400m->updown == 0) {
+		result = __i2400m_dev_start(i2400m, bm_flags);
+		if (result >= 0) {
+			i2400m->updown = 1;
+			wmb();	/* see i2400m->updown's documentation */
+		}
+	}
 	mutex_unlock(&i2400m->init_mutex);
 	return result;
 }
@@ -495,9 +471,20 @@ void __i2400m_dev_stop(struct i2400m *i2400m)
 
 	d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
 	wimax_state_change(wimax_dev, __WIMAX_ST_QUIESCING);
+	i2400m_msg_to_dev_cancel_wait(i2400m, -EL3RST);
+	complete(&i2400m->msg_completion);
+	i2400m_net_wake_stop(i2400m);
 	i2400m_dev_shutdown(i2400m);
-	i2400m->ready = 0;
-	i2400m->bus_dev_stop(i2400m);
+	/*
+	 * Make sure no report hooks are running *before* we stop the
+	 * communication infrastructure with the device.
+	 */
+	i2400m->ready = 0;	/* nobody can queue work anymore */
+	wmb();		/* see i2400m->ready's documentation  */
+	flush_workqueue(i2400m->work_queue);
+
+	if (i2400m->bus_dev_stop)
+		i2400m->bus_dev_stop(i2400m);
 	destroy_workqueue(i2400m->work_queue);
 	i2400m_rx_release(i2400m);
 	i2400m_tx_release(i2400m);
@@ -518,12 +505,139 @@ void i2400m_dev_stop(struct i2400m *i2400m)
 	if (i2400m->updown) {
 		__i2400m_dev_stop(i2400m);
 		i2400m->updown = 0;
+		wmb();	/* see i2400m->updown's documentation  */
 	}
 	mutex_unlock(&i2400m->init_mutex);
 }
 
 
 /*
+ * Listen to PM events to cache the firmware before suspend/hibernation
+ *
+ * When the device comes out of suspend, it might go into reset and
+ * firmware has to be uploaded again. At resume, most of the times, we
+ * can't load firmware images from disk, so we need to cache it.
+ *
+ * i2400m_fw_cache() will allocate a kobject and attach the firmware
+ * to it; that way we don't have to worry too much about the fw loader
+ * hitting a race condition.
+ *
+ * Note: modus operandi stolen from the Orinoco driver; thx.
+ */
+static
+int i2400m_pm_notifier(struct notifier_block *notifier,
+		       unsigned long pm_event,
+		       void *unused)
+{
+	struct i2400m *i2400m =
+		container_of(notifier, struct i2400m, pm_notifier);
+	struct device *dev = i2400m_dev(i2400m);
+
+	d_fnstart(3, dev, "(i2400m %p pm_event %lx)\n", i2400m, pm_event);
+	switch (pm_event) {
+	case PM_HIBERNATION_PREPARE:
+	case PM_SUSPEND_PREPARE:
+		i2400m_fw_cache(i2400m);
+		break;
+	case PM_POST_RESTORE:
+		/* Restore from hibernation failed. We need to clean
+		 * up in exactly the same way, so fall through. */
+	case PM_POST_HIBERNATION:
+	case PM_POST_SUSPEND:
+		i2400m_fw_uncache(i2400m);
+		break;
+
+	case PM_RESTORE_PREPARE:
+	default:
+		break;
+	}
+	d_fnend(3, dev, "(i2400m %p pm_event %lx) = void\n", i2400m, pm_event);
+	return NOTIFY_DONE;
+}
+
+
+/*
+ * pre-reset is called before a device is going on reset
+ *
+ * This has to be followed by a call to i2400m_post_reset(), otherwise
+ * bad things might happen.
+ */
+int i2400m_pre_reset(struct i2400m *i2400m)
+{
+	int result;
+	struct device *dev = i2400m_dev(i2400m);
+
+	d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+	d_printf(1, dev, "pre-reset shut down\n");
+
+	result = 0;
+	mutex_lock(&i2400m->init_mutex);
+	if (i2400m->updown) {
+		netif_tx_disable(i2400m->wimax_dev.net_dev);
+		__i2400m_dev_stop(i2400m);
+		result = 0;
+		/* down't set updown to zero -- this way
+		 * post_reset can restore properly */
+	}
+	mutex_unlock(&i2400m->init_mutex);
+	if (i2400m->bus_release)
+		i2400m->bus_release(i2400m);
+	d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
+	return result;
+}
+EXPORT_SYMBOL_GPL(i2400m_pre_reset);
+
+
+/*
+ * Restore device state after a reset
+ *
+ * Do the work needed after a device reset to bring it up to the same
+ * state as it was before the reset.
+ *
+ * NOTE: this requires i2400m->init_mutex taken
+ */
+int i2400m_post_reset(struct i2400m *i2400m)
+{
+	int result = 0;
+	struct device *dev = i2400m_dev(i2400m);
+
+	d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+	d_printf(1, dev, "post-reset start\n");
+	if (i2400m->bus_setup) {
+		result = i2400m->bus_setup(i2400m);
+		if (result < 0) {
+			dev_err(dev, "bus-specific setup failed: %d\n",
+				result);
+			goto error_bus_setup;
+		}
+	}
+	mutex_lock(&i2400m->init_mutex);
+	if (i2400m->updown) {
+		result = __i2400m_dev_start(
+			i2400m, I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT);
+		if (result < 0)
+			goto error_dev_start;
+	}
+	mutex_unlock(&i2400m->init_mutex);
+	d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
+	return result;
+
+error_dev_start:
+	if (i2400m->bus_release)
+		i2400m->bus_release(i2400m);
+error_bus_setup:
+	/* even if the device was up, it could not be recovered, so we
+	 * mark it as down. */
+	i2400m->updown = 0;
+	wmb();		/* see i2400m->updown's documentation  */
+	mutex_unlock(&i2400m->init_mutex);
+	d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
+	return result;
+}
+EXPORT_SYMBOL_GPL(i2400m_post_reset);
+
+
+/*
  * The device has rebooted; fix up the device and the driver
  *
  * Tear down the driver communication with the device, reload the
@@ -542,56 +656,69 @@ void i2400m_dev_stop(struct i2400m *i2400m)
  *       _stop()], don't do anything, let it fail and handle it.
  *
  * This function is ran always in a thread context
+ *
+ * This function gets passed, as payload to i2400m_work() a 'const
+ * char *' ptr with a "reason" why the reset happened (for messages).
  */
 static
 void __i2400m_dev_reset_handle(struct work_struct *ws)
 {
 	int result;
 	struct i2400m_work *iw = container_of(ws, struct i2400m_work, ws);
+	const char *reason;
 	struct i2400m *i2400m = iw->i2400m;
 	struct device *dev = i2400m_dev(i2400m);
-	enum wimax_st wimax_state;
 	struct i2400m_reset_ctx *ctx = i2400m->reset_ctx;
 
-	d_fnstart(3, dev, "(ws %p i2400m %p)\n", ws, i2400m);
+	if (WARN_ON(iw->pl_size != sizeof(reason)))
+		reason = "SW BUG: reason n/a";
+	else
+		memcpy(&reason, iw->pl, sizeof(reason));
+
+	d_fnstart(3, dev, "(ws %p i2400m %p reason %s)\n", ws, i2400m, reason);
+
 	result = 0;
 	if (mutex_trylock(&i2400m->init_mutex) == 0) {
 		/* We are still in i2400m_dev_start() [let it fail] or
 		 * i2400m_dev_stop() [we are shutting down anyway, so
 		 * ignore it] or we are resetting somewhere else. */
-		dev_err(dev, "device rebooted\n");
+		dev_err(dev, "device rebooted somewhere else?\n");
 		i2400m_msg_to_dev_cancel_wait(i2400m, -EL3RST);
 		complete(&i2400m->msg_completion);
 		goto out;
 	}
-	wimax_state = wimax_state_get(&i2400m->wimax_dev);
-	if (wimax_state < WIMAX_ST_UNINITIALIZED) {
-		dev_info(dev, "device rebooted: it is down, ignoring\n");
-		goto out_unlock;	/* ifconfig up/down wasn't called */
+	if (i2400m->updown == 0)  {
+		dev_info(dev, "%s: device is down, doing nothing\n", reason);
+		goto out_unlock;
 	}
-	dev_err(dev, "device rebooted: reinitializing driver\n");
+	dev_err(dev, "%s: reinitializing driver\n", reason);
 	__i2400m_dev_stop(i2400m);
-	i2400m->updown = 0;
 	result = __i2400m_dev_start(i2400m,
 				    I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT);
 	if (result < 0) {
-		dev_err(dev, "device reboot: cannot start the device: %d\n",
-			result);
-		result = i2400m->bus_reset(i2400m, I2400M_RT_BUS);
-		if (result >= 0)
-			result = -ENODEV;
-	} else
-		i2400m->updown = 1;
+		i2400m->updown = 0;
+		wmb();		/* see i2400m->updown's documentation  */
+		dev_err(dev, "%s: cannot start the device: %d\n",
+			reason, result);
+		result = -EUCLEAN;
+	}
 out_unlock:
 	if (i2400m->reset_ctx) {
 		ctx->result = result;
 		complete(&ctx->completion);
 	}
 	mutex_unlock(&i2400m->init_mutex);
+	if (result == -EUCLEAN) {
+		/* ops, need to clean up [w/ init_mutex not held] */
+		result = i2400m_reset(i2400m, I2400M_RT_BUS);
+		if (result >= 0)
+			result = -ENODEV;
+	}
 out:
 	i2400m_put(i2400m);
 	kfree(iw);
-	d_fnend(3, dev, "(ws %p i2400m %p) = void\n", ws, i2400m);
+	d_fnend(3, dev, "(ws %p i2400m %p reason %s) = void\n",
+		ws, i2400m, reason);
 	return;
 }
 
@@ -608,16 +735,104 @@ out:
  * reinitializing the driver to handle the reset, calling into the
  * bus-specific functions ops as needed.
  */
-int i2400m_dev_reset_handle(struct i2400m *i2400m)
+int i2400m_dev_reset_handle(struct i2400m *i2400m, const char *reason)
 {
 	i2400m->boot_mode = 1;
 	wmb();		/* Make sure i2400m_msg_to_dev() sees boot_mode */
 	return i2400m_schedule_work(i2400m, __i2400m_dev_reset_handle,
-				    GFP_ATOMIC);
+				    GFP_ATOMIC, &reason, sizeof(reason));
 }
 EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle);
 
 
+/*
+ * Alloc the command and ack buffers for boot mode
+ *
+ * Get the buffers needed to deal with boot mode messages.  These
+ * buffers need to be allocated before the sdio recieve irq is setup.
+ */
+static
+int i2400m_bm_buf_alloc(struct i2400m *i2400m)
+{
+	int result;
+
+	result = -ENOMEM;
+	i2400m->bm_cmd_buf = kzalloc(I2400M_BM_CMD_BUF_SIZE, GFP_KERNEL);
+	if (i2400m->bm_cmd_buf == NULL)
+		goto error_bm_cmd_kzalloc;
+	i2400m->bm_ack_buf = kzalloc(I2400M_BM_ACK_BUF_SIZE, GFP_KERNEL);
+	if (i2400m->bm_ack_buf == NULL)
+		goto error_bm_ack_buf_kzalloc;
+	return 0;
+
+error_bm_ack_buf_kzalloc:
+	kfree(i2400m->bm_cmd_buf);
+error_bm_cmd_kzalloc:
+	return result;
+}
+
+
+/*
+ * Free boot mode command and ack buffers.
+ */
+static
+void i2400m_bm_buf_free(struct i2400m *i2400m)
+{
+	kfree(i2400m->bm_ack_buf);
+	kfree(i2400m->bm_cmd_buf);
+}
+
+
+/**
+ * i2400m_init - Initialize a 'struct i2400m' from all zeroes
+ *
+ * This is a bus-generic API call.
+ */
+void i2400m_init(struct i2400m *i2400m)
+{
+	wimax_dev_init(&i2400m->wimax_dev);
+
+	i2400m->boot_mode = 1;
+	i2400m->rx_reorder = 1;
+	init_waitqueue_head(&i2400m->state_wq);
+
+	spin_lock_init(&i2400m->tx_lock);
+	i2400m->tx_pl_min = UINT_MAX;
+	i2400m->tx_size_min = UINT_MAX;
+
+	spin_lock_init(&i2400m->rx_lock);
+	i2400m->rx_pl_min = UINT_MAX;
+	i2400m->rx_size_min = UINT_MAX;
+	INIT_LIST_HEAD(&i2400m->rx_reports);
+	INIT_WORK(&i2400m->rx_report_ws, i2400m_report_hook_work);
+
+	mutex_init(&i2400m->msg_mutex);
+	init_completion(&i2400m->msg_completion);
+
+	mutex_init(&i2400m->init_mutex);
+	/* wake_tx_ws is initialized in i2400m_tx_setup() */
+}
+EXPORT_SYMBOL_GPL(i2400m_init);
+
+
+int i2400m_reset(struct i2400m *i2400m, enum i2400m_reset_type rt)
+{
+	struct net_device *net_dev = i2400m->wimax_dev.net_dev;
+
+	/*
+	 * Make sure we stop TXs and down the carrier before
+	 * resetting; this is needed to avoid things like
+	 * i2400m_wake_tx() scheduling stuff in parallel.
+	 */
+	if (net_dev->reg_state == NETREG_REGISTERED) {
+		netif_tx_disable(net_dev);
+		netif_carrier_off(net_dev);
+	}
+	return i2400m->bus_reset(i2400m, rt);
+}
+EXPORT_SYMBOL_GPL(i2400m_reset);
+
+
 /**
  * i2400m_setup - bus-generic setup function for the i2400m device
  *
@@ -625,13 +840,9 @@ EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle);
  *
  * Returns: 0 if ok, < 0 errno code on error.
  *
- * Initializes the bus-generic parts of the i2400m driver; the
- * bus-specific parts have been initialized, function pointers filled
- * out by the bus-specific probe function.
- *
- * As well, this registers the WiMAX and net device nodes. Once this
- * function returns, the device is operative and has to be ready to
- * receive and send network traffic and WiMAX control operations.
+ * Sets up basic device comunication infrastructure, boots the ROM to
+ * read the MAC address, registers with the WiMAX and network stacks
+ * and then brings up the device.
  */
 int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
 {
@@ -645,16 +856,21 @@ int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
 	snprintf(wimax_dev->name, sizeof(wimax_dev->name),
 		 "i2400m-%s:%s", dev->bus->name, dev_name(dev));
 
-	i2400m->bm_cmd_buf = kzalloc(I2400M_BM_CMD_BUF_SIZE, GFP_KERNEL);
-	if (i2400m->bm_cmd_buf == NULL) {
-		dev_err(dev, "cannot allocate USB command buffer\n");
-		goto error_bm_cmd_kzalloc;
+	result = i2400m_bm_buf_alloc(i2400m);
+	if (result < 0) {
+		dev_err(dev, "cannot allocate bootmode scratch buffers\n");
+		goto error_bm_buf_alloc;
 	}
-	i2400m->bm_ack_buf = kzalloc(I2400M_BM_ACK_BUF_SIZE, GFP_KERNEL);
-	if (i2400m->bm_ack_buf == NULL) {
-		dev_err(dev, "cannot allocate USB ack buffer\n");
-		goto error_bm_ack_buf_kzalloc;
+
+	if (i2400m->bus_setup) {
+		result = i2400m->bus_setup(i2400m);
+		if (result < 0) {
+			dev_err(dev, "bus-specific setup failed: %d\n",
+				result);
+			goto error_bus_setup;
+		}
 	}
+
 	result = i2400m_bootrom_init(i2400m, bm_flags);
 	if (result < 0) {
 		dev_err(dev, "read mac addr: bootrom init "
@@ -666,6 +882,9 @@ int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
 		goto error_read_mac_addr;
 	random_ether_addr(i2400m->src_mac_addr);
 
+	i2400m->pm_notifier.notifier_call = i2400m_pm_notifier;
+	register_pm_notifier(&i2400m->pm_notifier);
+
 	result = register_netdev(net_dev);	/* Okey dokey, bring it up */
 	if (result < 0) {
 		dev_err(dev, "cannot register i2400m network device: %d\n",
@@ -674,18 +893,13 @@ int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
 	}
 	netif_carrier_off(net_dev);
 
-	result = i2400m_dev_start(i2400m, bm_flags);
-	if (result < 0)
-		goto error_dev_start;
-
 	i2400m->wimax_dev.op_msg_from_user = i2400m_op_msg_from_user;
 	i2400m->wimax_dev.op_rfkill_sw_toggle = i2400m_op_rfkill_sw_toggle;
 	i2400m->wimax_dev.op_reset = i2400m_op_reset;
+
 	result = wimax_dev_add(&i2400m->wimax_dev, net_dev);
 	if (result < 0)
 		goto error_wimax_dev_add;
-	/* User space needs to do some init stuff */
-	wimax_state_change(wimax_dev, WIMAX_ST_UNINITIALIZED);
 
 	/* Now setup all that requires a registered net and wimax device. */
 	result = sysfs_create_group(&net_dev->dev.kobj, &i2400m_dev_attr_group);
@@ -693,30 +907,37 @@ int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
 		dev_err(dev, "cannot setup i2400m's sysfs: %d\n", result);
 		goto error_sysfs_setup;
 	}
+
 	result = i2400m_debugfs_add(i2400m);
 	if (result < 0) {
 		dev_err(dev, "cannot setup i2400m's debugfs: %d\n", result);
 		goto error_debugfs_setup;
 	}
+
+	result = i2400m_dev_start(i2400m, bm_flags);
+	if (result < 0)
+		goto error_dev_start;
 	d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
 	return result;
 
+error_dev_start:
+	i2400m_debugfs_rm(i2400m);
 error_debugfs_setup:
 	sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj,
 			   &i2400m_dev_attr_group);
 error_sysfs_setup:
 	wimax_dev_rm(&i2400m->wimax_dev);
 error_wimax_dev_add:
-	i2400m_dev_stop(i2400m);
-error_dev_start:
 	unregister_netdev(net_dev);
 error_register_netdev:
+	unregister_pm_notifier(&i2400m->pm_notifier);
 error_read_mac_addr:
 error_bootrom_init:
-	kfree(i2400m->bm_ack_buf);
-error_bm_ack_buf_kzalloc:
-	kfree(i2400m->bm_cmd_buf);
-error_bm_cmd_kzalloc:
+	if (i2400m->bus_release)
+		i2400m->bus_release(i2400m);
+error_bus_setup:
+	i2400m_bm_buf_free(i2400m);
+error_bm_buf_alloc:
 	d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
 	return result;
 }
@@ -735,14 +956,17 @@ void i2400m_release(struct i2400m *i2400m)
 	d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
 	netif_stop_queue(i2400m->wimax_dev.net_dev);
 
+	i2400m_dev_stop(i2400m);
+
 	i2400m_debugfs_rm(i2400m);
 	sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj,
 			   &i2400m_dev_attr_group);
 	wimax_dev_rm(&i2400m->wimax_dev);
-	i2400m_dev_stop(i2400m);
 	unregister_netdev(i2400m->wimax_dev.net_dev);
-	kfree(i2400m->bm_ack_buf);
-	kfree(i2400m->bm_cmd_buf);
+	unregister_pm_notifier(&i2400m->pm_notifier);
+	if (i2400m->bus_release)
+		i2400m->bus_release(i2400m);
+	i2400m_bm_buf_free(i2400m);
 	d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
 }
 EXPORT_SYMBOL_GPL(i2400m_release);
@@ -759,6 +983,7 @@ struct d_level D_LEVEL[] = {
 	D_SUBMODULE_DEFINE(netdev),
 	D_SUBMODULE_DEFINE(rfkill),
 	D_SUBMODULE_DEFINE(rx),
+	D_SUBMODULE_DEFINE(sysfs),
 	D_SUBMODULE_DEFINE(tx),
 };
 size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
@@ -767,7 +992,9 @@ size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
 static
 int __init i2400m_driver_init(void)
 {
-	return 0;
+	d_parse_params(D_LEVEL, D_LEVEL_SIZE, i2400m_debug_params,
+		       "i2400m.debug");
+	return i2400m_barker_db_init(i2400m_barkers_params);
 }
 module_init(i2400m_driver_init);
 
@@ -776,6 +1003,7 @@ void __exit i2400m_driver_exit(void)
 {
 	/* for scheds i2400m_dev_reset_handle() */
 	flush_scheduled_work();
+	i2400m_barker_db_exit();
 	return;
 }
 module_exit(i2400m_driver_exit);
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index e81750e54452..64cdfeb299ca 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -40,11 +40,9 @@
  *
  * THE PROCEDURE
  *
- * (this is decribed for USB, but for SDIO is similar)
- *
- * The 2400m works in two modes: boot-mode or normal mode. In boot
- * mode we can execute only a handful of commands targeted at
- * uploading the firmware and launching it.
+ * The 2400m and derived devices work in two modes: boot-mode or
+ * normal mode. In boot mode we can execute only a handful of commands
+ * targeted at uploading the firmware and launching it.
  *
  * The 2400m enters boot mode when it is first connected to the
  * system, when it crashes and when you ask it to reboot. There are
@@ -52,18 +50,26 @@
  * firmwares signed with a certain private key, non-signed takes any
  * firmware. Normal hardware takes only signed firmware.
  *
- * Upon entrance to boot mode, the device sends a few zero length
- * packets (ZLPs) on the notification endpoint, then a reboot barker
- * (4 le32 words with value I2400M_{S,N}BOOT_BARKER). We ack it by
- * sending the same barker on the bulk out endpoint. The device acks
- * with a reboot ack barker (4 le32 words with value 0xfeedbabe) and
- * then the device is fully rebooted. At this point we can upload the
- * firmware.
+ * On boot mode, in USB, we write to the device using the bulk out
+ * endpoint and read from it in the notification endpoint. In SDIO we
+ * talk to it via the write address and read from the read address.
+ *
+ * Upon entrance to boot mode, the device sends (preceeded with a few
+ * zero length packets (ZLPs) on the notification endpoint in USB) a
+ * reboot barker (4 le32 words with the same value). We ack it by
+ * sending the same barker to the device. The device acks with a
+ * reboot ack barker (4 le32 words with value I2400M_ACK_BARKER) and
+ * then is fully booted. At this point we can upload the firmware.
+ *
+ * Note that different iterations of the device and EEPROM
+ * configurations will send different [re]boot barkers; these are
+ * collected in i2400m_barker_db along with the firmware
+ * characteristics they require.
  *
  * This process is accomplished by the i2400m_bootrom_init()
  * function. All the device interaction happens through the
  * i2400m_bm_cmd() [boot mode command]. Special return values will
- * indicate if the device resets.
+ * indicate if the device did reset during the process.
  *
  * After this, we read the MAC address and then (if needed)
  * reinitialize the device. We need to read it ahead of time because
@@ -72,11 +78,11 @@
  *
  * We can then upload the firmware file. The file is composed of a BCF
  * header (basic data, keys and signatures) and a list of write
- * commands and payloads. We first upload the header
- * [i2400m_dnload_init()] and then pass the commands and payloads
- * verbatim to the i2400m_bm_cmd() function
- * [i2400m_dnload_bcf()]. Then we tell the device to jump to the new
- * firmware [i2400m_dnload_finalize()].
+ * commands and payloads. Optionally more BCF headers might follow the
+ * main payload. We first upload the header [i2400m_dnload_init()] and
+ * then pass the commands and payloads verbatim to the i2400m_bm_cmd()
+ * function [i2400m_dnload_bcf()]. Then we tell the device to jump to
+ * the new firmware [i2400m_dnload_finalize()].
  *
  * Once firmware is uploaded, we are good to go :)
  *
@@ -99,18 +105,32 @@
  * read an acknolwedgement from it (or an asynchronous notification)
  * from it.
  *
+ * FIRMWARE LOADING
+ *
+ * Note that in some cases, we can't just load a firmware file (for
+ * example, when resuming). For that, we might cache the firmware
+ * file. Thus, when doing the bootstrap, if there is a cache firmware
+ * file, it is used; if not, loading from disk is attempted.
+ *
  * ROADMAP
  *
+ * i2400m_barker_db_init              Called by i2400m_driver_init()
+ *   i2400m_barker_db_add
+ *
+ * i2400m_barker_db_exit              Called by i2400m_driver_exit()
+ *
  * i2400m_dev_bootstrap               Called by __i2400m_dev_start()
  *   request_firmware
- *   i2400m_fw_check
- *   i2400m_fw_dnload
+ *   i2400m_fw_bootstrap
+ *     i2400m_fw_check
+ *       i2400m_fw_hdr_check
+ *     i2400m_fw_dnload
  *   release_firmware
  *
  * i2400m_fw_dnload
  *   i2400m_bootrom_init
  *     i2400m_bm_cmd
- *     i2400m->bus_reset
+ *     i2400m_reset
  *   i2400m_dnload_init
  *     i2400m_dnload_init_signed
  *     i2400m_dnload_init_nonsigned
@@ -125,9 +145,14 @@
  *   i2400m->bus_bm_cmd_send()
  *   i2400m->bus_bm_wait_for_ack
  *   __i2400m_bm_ack_verify
+ *     i2400m_is_boot_barker
  *
  * i2400m_bm_cmd_prepare              Used by bus-drivers to prep
  *                                    commands before sending
+ *
+ * i2400m_pm_notifier                 Called on Power Management events
+ *   i2400m_fw_cache
+ *   i2400m_fw_uncache
  */
 #include <linux/firmware.h>
 #include <linux/sched.h>
@@ -175,6 +200,240 @@ EXPORT_SYMBOL_GPL(i2400m_bm_cmd_prepare);
 
 
 /*
+ * Database of known barkers.
+ *
+ * A barker is what the device sends indicating he is ready to be
+ * bootloaded. Different versions of the device will send different
+ * barkers. Depending on the barker, it might mean the device wants
+ * some kind of firmware or the other.
+ */
+static struct i2400m_barker_db {
+	__le32 data[4];
+} *i2400m_barker_db;
+static size_t i2400m_barker_db_used, i2400m_barker_db_size;
+
+
+static
+int i2400m_zrealloc_2x(void **ptr, size_t *_count, size_t el_size,
+		       gfp_t gfp_flags)
+{
+	size_t old_count = *_count,
+		new_count = old_count ? 2 * old_count : 2,
+		old_size = el_size * old_count,
+		new_size = el_size * new_count;
+	void *nptr = krealloc(*ptr, new_size, gfp_flags);
+	if (nptr) {
+		/* zero the other half or the whole thing if old_count
+		 * was zero */
+		if (old_size == 0)
+			memset(nptr, 0, new_size);
+		else
+			memset(nptr + old_size, 0, old_size);
+		*_count = new_count;
+		*ptr = nptr;
+		return 0;
+	} else
+		return -ENOMEM;
+}
+
+
+/*
+ * Add a barker to the database
+ *
+ * This cannot used outside of this module and only at at module_init
+ * time. This is to avoid the need to do locking.
+ */
+static
+int i2400m_barker_db_add(u32 barker_id)
+{
+	int result;
+
+	struct i2400m_barker_db *barker;
+	if (i2400m_barker_db_used >= i2400m_barker_db_size) {
+		result = i2400m_zrealloc_2x(
+			(void **) &i2400m_barker_db, &i2400m_barker_db_size,
+			sizeof(i2400m_barker_db[0]), GFP_KERNEL);
+		if (result < 0)
+			return result;
+	}
+	barker = i2400m_barker_db + i2400m_barker_db_used++;
+	barker->data[0] = le32_to_cpu(barker_id);
+	barker->data[1] = le32_to_cpu(barker_id);
+	barker->data[2] = le32_to_cpu(barker_id);
+	barker->data[3] = le32_to_cpu(barker_id);
+	return 0;
+}
+
+
+void i2400m_barker_db_exit(void)
+{
+	kfree(i2400m_barker_db);
+	i2400m_barker_db = NULL;
+	i2400m_barker_db_size = 0;
+	i2400m_barker_db_used = 0;
+}
+
+
+/*
+ * Helper function to add all the known stable barkers to the barker
+ * database.
+ */
+static
+int i2400m_barker_db_known_barkers(void)
+{
+	int result;
+
+	result = i2400m_barker_db_add(I2400M_NBOOT_BARKER);
+	if (result < 0)
+		goto error_add;
+	result = i2400m_barker_db_add(I2400M_SBOOT_BARKER);
+	if (result < 0)
+		goto error_add;
+	result = i2400m_barker_db_add(I2400M_SBOOT_BARKER_6050);
+	if (result < 0)
+		goto error_add;
+error_add:
+       return result;
+}
+
+
+/*
+ * Initialize the barker database
+ *
+ * This can only be used from the module_init function for this
+ * module; this is to avoid the need to do locking.
+ *
+ * @options: command line argument with extra barkers to
+ *     recognize. This is a comma-separated list of 32-bit hex
+ *     numbers. They are appended to the existing list. Setting 0
+ *     cleans the existing list and starts a new one.
+ */
+int i2400m_barker_db_init(const char *_options)
+{
+	int result;
+	char *options = NULL, *options_orig, *token;
+
+	i2400m_barker_db = NULL;
+	i2400m_barker_db_size = 0;
+	i2400m_barker_db_used = 0;
+
+	result = i2400m_barker_db_known_barkers();
+	if (result < 0)
+		goto error_add;
+	/* parse command line options from i2400m.barkers */
+	if (_options != NULL) {
+		unsigned barker;
+
+		options_orig = kstrdup(_options, GFP_KERNEL);
+		if (options_orig == NULL)
+			goto error_parse;
+		options = options_orig;
+
+		while ((token = strsep(&options, ",")) != NULL) {
+			if (*token == '\0')	/* eat joint commas */
+				continue;
+			if (sscanf(token, "%x", &barker) != 1
+			    || barker > 0xffffffff) {
+				printk(KERN_ERR "%s: can't recognize "
+				       "i2400m.barkers value '%s' as "
+				       "a 32-bit number\n",
+				       __func__, token);
+				result = -EINVAL;
+				goto error_parse;
+			}
+			if (barker == 0) {
+				/* clean list and start new */
+				i2400m_barker_db_exit();
+				continue;
+			}
+			result = i2400m_barker_db_add(barker);
+			if (result < 0)
+				goto error_add;
+		}
+		kfree(options_orig);
+	}
+	return 0;
+
+error_parse:
+error_add:
+	kfree(i2400m_barker_db);
+	return result;
+}
+
+
+/*
+ * Recognize a boot barker
+ *
+ * @buf: buffer where the boot barker.
+ * @buf_size: size of the buffer (has to be 16 bytes). It is passed
+ *     here so the function can check it for the caller.
+ *
+ * Note that as a side effect, upon identifying the obtained boot
+ * barker, this function will set i2400m->barker to point to the right
+ * barker database entry. Subsequent calls to the function will result
+ * in verifying that the same type of boot barker is returned when the
+ * device [re]boots (as long as the same device instance is used).
+ *
+ * Return: 0 if @buf matches a known boot barker. -ENOENT if the
+ *     buffer in @buf doesn't match any boot barker in the database or
+ *     -EILSEQ if the buffer doesn't have the right size.
+ */
+int i2400m_is_boot_barker(struct i2400m *i2400m,
+			  const void *buf, size_t buf_size)
+{
+	int result;
+	struct device *dev = i2400m_dev(i2400m);
+	struct i2400m_barker_db *barker;
+	int i;
+
+	result = -ENOENT;
+	if (buf_size != sizeof(i2400m_barker_db[i].data))
+		return result;
+
+	/* Short circuit if we have already discovered the barker
+	 * associated with the device. */
+	if (i2400m->barker
+	    && !memcmp(buf, i2400m->barker, sizeof(i2400m->barker->data))) {
+		unsigned index = (i2400m->barker - i2400m_barker_db)
+			/ sizeof(*i2400m->barker);
+		d_printf(2, dev, "boot barker cache-confirmed #%u/%08x\n",
+			 index, le32_to_cpu(i2400m->barker->data[0]));
+		return 0;
+	}
+
+	for (i = 0; i < i2400m_barker_db_used; i++) {
+		barker = &i2400m_barker_db[i];
+		BUILD_BUG_ON(sizeof(barker->data) != 16);
+		if (memcmp(buf, barker->data, sizeof(barker->data)))
+			continue;
+
+		if (i2400m->barker == NULL) {
+			i2400m->barker = barker;
+			d_printf(1, dev, "boot barker set to #%u/%08x\n",
+				 i, le32_to_cpu(barker->data[0]));
+			if (barker->data[0] == le32_to_cpu(I2400M_NBOOT_BARKER))
+				i2400m->sboot = 0;
+			else
+				i2400m->sboot = 1;
+		} else if (i2400m->barker != barker) {
+			dev_err(dev, "HW inconsistency: device "
+				"reports a different boot barker "
+				"than set (from %08x to %08x)\n",
+				le32_to_cpu(i2400m->barker->data[0]),
+				le32_to_cpu(barker->data[0]));
+			result = -EIO;
+		} else
+			d_printf(2, dev, "boot barker confirmed #%u/%08x\n",
+				 i, le32_to_cpu(barker->data[0]));
+		result = 0;
+		break;
+	}
+	return result;
+}
+EXPORT_SYMBOL_GPL(i2400m_is_boot_barker);
+
+
+/*
  * Verify the ack data received
  *
  * Given a reply to a boot mode command, chew it and verify everything
@@ -204,20 +463,10 @@ ssize_t __i2400m_bm_ack_verify(struct i2400m *i2400m, int opcode,
 			opcode, ack_size, sizeof(*ack));
 		goto error_ack_short;
 	}
-	if (ack_size == sizeof(i2400m_NBOOT_BARKER)
-		 && memcmp(ack, i2400m_NBOOT_BARKER, sizeof(*ack)) == 0) {
+	result = i2400m_is_boot_barker(i2400m, ack, ack_size);
+	if (result >= 0) {
 		result = -ERESTARTSYS;
-		i2400m->sboot = 0;
-		d_printf(6, dev, "boot-mode cmd %d: "
-			 "HW non-signed boot barker\n", opcode);
-		goto error_reboot;
-	}
-	if (ack_size == sizeof(i2400m_SBOOT_BARKER)
-		 && memcmp(ack, i2400m_SBOOT_BARKER, sizeof(*ack)) == 0) {
-		result = -ERESTARTSYS;
-		i2400m->sboot = 1;
-		d_printf(6, dev, "boot-mode cmd %d: HW signed reboot barker\n",
-			 opcode);
+		d_printf(6, dev, "boot-mode cmd %d: HW boot barker\n", opcode);
 		goto error_reboot;
 	}
 	if (ack_size == sizeof(i2400m_ACK_BARKER)
@@ -343,7 +592,6 @@ ssize_t i2400m_bm_cmd(struct i2400m *i2400m,
 	BUG_ON(i2400m->boot_mode == 0);
 
 	if (cmd != NULL) {		/* send the command */
-		memcpy(i2400m->bm_cmd_buf, cmd, cmd_size);
 		result = i2400m->bus_bm_cmd_send(i2400m, cmd, cmd_size, flags);
 		if (result < 0)
 			goto error_cmd_send;
@@ -432,8 +680,8 @@ static int i2400m_download_chunk(struct i2400m *i2400m, const void *chunk,
  * Download a BCF file's sections to the device
  *
  * @i2400m: device descriptor
- * @bcf: pointer to firmware data (followed by the payloads). Assumed
- *       verified and consistent.
+ * @bcf: pointer to firmware data (first header followed by the
+ *     payloads). Assumed verified and consistent.
  * @bcf_len: length (in bytes) of the @bcf buffer.
  *
  * Returns: < 0 errno code on error or the offset to the jump instruction.
@@ -472,14 +720,17 @@ ssize_t i2400m_dnload_bcf(struct i2400m *i2400m,
 			 "downloading section #%zu (@%zu %zu B) to 0x%08x\n",
 			 section, offset, sizeof(*bh) + data_size,
 			 le32_to_cpu(bh->target_addr));
-		if (i2400m_brh_get_opcode(bh) == I2400M_BRH_SIGNED_JUMP) {
-			/* Secure boot needs to stop here */
-			d_printf(5, dev,  "signed jump found @%zu\n", offset);
+		/*
+		 * We look for JUMP cmd from the bootmode header,
+		 * either I2400M_BRH_SIGNED_JUMP for secure boot
+		 * or I2400M_BRH_JUMP for unsecure boot, the last chunk
+		 * should be the bootmode header with JUMP cmd.
+		 */
+		if (i2400m_brh_get_opcode(bh) == I2400M_BRH_SIGNED_JUMP ||
+			i2400m_brh_get_opcode(bh) == I2400M_BRH_JUMP) {
+			d_printf(5, dev,  "jump found @%zu\n", offset);
 			break;
 		}
-		if (offset + section_size == bcf_len)
-			/* Non-secure boot stops here */
-			break;
 		if (offset + section_size > bcf_len) {
 			dev_err(dev, "fw %s: bad section #%zu, "
 				"end (@%zu) beyond EOF (@%zu)\n",
@@ -510,13 +761,30 @@ error_send:
 
 
 /*
+ * Indicate if the device emitted a reboot barker that indicates
+ * "signed boot"
+ */
+static
+unsigned i2400m_boot_is_signed(struct i2400m *i2400m)
+{
+	return likely(i2400m->sboot);
+}
+
+
+/*
  * Do the final steps of uploading firmware
  *
+ * @bcf_hdr: BCF header we are actually using
+ * @bcf: pointer to the firmware image (which matches the first header
+ *     that is followed by the actual payloads).
+ * @offset: [byte] offset into @bcf for the command we need to send.
+ *
  * Depending on the boot mode (signed vs non-signed), different
  * actions need to be taken.
  */
 static
 int i2400m_dnload_finalize(struct i2400m *i2400m,
+			   const struct i2400m_bcf_hdr *bcf_hdr,
 			   const struct i2400m_bcf_hdr *bcf, size_t offset)
 {
 	int ret = 0;
@@ -530,10 +798,14 @@ int i2400m_dnload_finalize(struct i2400m *i2400m,
 
 	d_fnstart(3, dev, "offset %zu\n", offset);
 	cmd = (void *) bcf + offset;
-	if (i2400m->sboot == 0) {
+	if (i2400m_boot_is_signed(i2400m) == 0) {
 		struct i2400m_bootrom_header jump_ack;
 		d_printf(1, dev, "unsecure boot, jumping to 0x%08x\n",
 			le32_to_cpu(cmd->target_addr));
+		cmd_buf = i2400m->bm_cmd_buf;
+		memcpy(&cmd_buf->cmd, cmd, sizeof(*cmd));
+		cmd = &cmd_buf->cmd;
+		/* now cmd points to the actual bootrom_header in cmd_buf */
 		i2400m_brh_set_opcode(cmd, I2400M_BRH_JUMP);
 		cmd->data_size = 0;
 		ret = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd),
@@ -544,12 +816,13 @@ int i2400m_dnload_finalize(struct i2400m *i2400m,
 		cmd_buf = i2400m->bm_cmd_buf;
 		memcpy(&cmd_buf->cmd, cmd, sizeof(*cmd));
 		signature_block_offset =
-			sizeof(*bcf)
-			+ le32_to_cpu(bcf->key_size) * sizeof(u32)
-			+ le32_to_cpu(bcf->exponent_size) * sizeof(u32);
+			sizeof(*bcf_hdr)
+			+ le32_to_cpu(bcf_hdr->key_size) * sizeof(u32)
+			+ le32_to_cpu(bcf_hdr->exponent_size) * sizeof(u32);
 		signature_block_size =
-			le32_to_cpu(bcf->modulus_size) * sizeof(u32);
-		memcpy(cmd_buf->cmd_pl, (void *) bcf + signature_block_offset,
+			le32_to_cpu(bcf_hdr->modulus_size) * sizeof(u32);
+		memcpy(cmd_buf->cmd_pl,
+		       (void *) bcf_hdr + signature_block_offset,
 		       signature_block_size);
 		ret = i2400m_bm_cmd(i2400m, &cmd_buf->cmd,
 				    sizeof(cmd_buf->cmd) + signature_block_size,
@@ -565,7 +838,7 @@ int i2400m_dnload_finalize(struct i2400m *i2400m,
  *
  * @i2400m: device descriptor
  * @flags:
- *      I2400M_BRI_SOFT: a reboot notification has been seen
+ *      I2400M_BRI_SOFT: a reboot barker has been seen
  *          already, so don't wait for it.
  *
  *      I2400M_BRI_NO_REBOOT: Don't send a reboot command, but wait
@@ -576,17 +849,15 @@ int i2400m_dnload_finalize(struct i2400m *i2400m,
  *
  *     < 0 errno code on error, 0 if ok.
  *
- *     i2400m->sboot set to 0 for unsecure boot process, 1 for secure
- *     boot process.
- *
  * Description:
  *
  * Tries hard enough to put the device in boot-mode. There are two
  * main phases to this:
  *
  * a. (1) send a reboot command and (2) get a reboot barker
- * b. (1) ack the reboot sending a reboot barker and (2) getting an
- *        ack barker in return
+ *
+ * b. (1) echo/ack the reboot sending the reboot barker back and (2)
+ *        getting an ack barker in return
  *
  * We want to skip (a) in some cases [soft]. The state machine is
  * horrible, but it is basically: on each phase, send what has to be
@@ -594,6 +865,16 @@ int i2400m_dnload_finalize(struct i2400m *i2400m,
  * have to backtrack and retry, so we keep a max tries counter for
  * that.
  *
+ * It sucks because we don't know ahead of time which is going to be
+ * the reboot barker (the device might send different ones depending
+ * on its EEPROM config) and once the device reboots and waits for the
+ * echo/ack reboot barker being sent back, it doesn't understand
+ * anything else. So we can be left at the point where we don't know
+ * what to send to it -- cold reset and bus reset seem to have little
+ * effect. So the function iterates (in this case) through all the
+ * known barkers and tries them all until an ACK is
+ * received. Otherwise, it gives up.
+ *
  * If we get a timeout after sending a warm reset, we do it again.
  */
 int i2400m_bootrom_init(struct i2400m *i2400m, enum i2400m_bri flags)
@@ -602,10 +883,11 @@ int i2400m_bootrom_init(struct i2400m *i2400m, enum i2400m_bri flags)
 	struct device *dev = i2400m_dev(i2400m);
 	struct i2400m_bootrom_header *cmd;
 	struct i2400m_bootrom_header ack;
-	int count = I2400M_BOOT_RETRIES;
+	int count = i2400m->bus_bm_retries;
 	int ack_timeout_cnt = 1;
+	unsigned i;
 
-	BUILD_BUG_ON(sizeof(*cmd) != sizeof(i2400m_NBOOT_BARKER));
+	BUILD_BUG_ON(sizeof(*cmd) != sizeof(i2400m_barker_db[0].data));
 	BUILD_BUG_ON(sizeof(ack) != sizeof(i2400m_ACK_BARKER));
 
 	d_fnstart(4, dev, "(i2400m %p flags 0x%08x)\n", i2400m, flags);
@@ -614,27 +896,59 @@ int i2400m_bootrom_init(struct i2400m *i2400m, enum i2400m_bri flags)
 	if (flags & I2400M_BRI_SOFT)
 		goto do_reboot_ack;
 do_reboot:
+	ack_timeout_cnt = 1;
 	if (--count < 0)
 		goto error_timeout;
 	d_printf(4, dev, "device reboot: reboot command [%d # left]\n",
 		 count);
 	if ((flags & I2400M_BRI_NO_REBOOT) == 0)
-		i2400m->bus_reset(i2400m, I2400M_RT_WARM);
+		i2400m_reset(i2400m, I2400M_RT_WARM);
 	result = i2400m_bm_cmd(i2400m, NULL, 0, &ack, sizeof(ack),
 			       I2400M_BM_CMD_RAW);
 	flags &= ~I2400M_BRI_NO_REBOOT;
 	switch (result) {
 	case -ERESTARTSYS:
+		/*
+		 * at this point, i2400m_bm_cmd(), through
+		 * __i2400m_bm_ack_process(), has updated
+		 * i2400m->barker and we are good to go.
+		 */
 		d_printf(4, dev, "device reboot: got reboot barker\n");
 		break;
 	case -EISCONN:	/* we don't know how it got here...but we follow it */
 		d_printf(4, dev, "device reboot: got ack barker - whatever\n");
 		goto do_reboot;
-	case -ETIMEDOUT:	/* device has timed out, we might be in boot
-				 * mode already and expecting an ack, let's try
-				 * that */
-		dev_info(dev, "warm reset timed out, trying an ack\n");
-		goto do_reboot_ack;
+	case -ETIMEDOUT:
+		/*
+		 * Device has timed out, we might be in boot mode
+		 * already and expecting an ack; if we don't know what
+		 * the barker is, we just send them all. Cold reset
+		 * and bus reset don't work. Beats me.
+		 */
+		if (i2400m->barker != NULL) {
+			dev_err(dev, "device boot: reboot barker timed out, "
+				"trying (set) %08x echo/ack\n",
+				le32_to_cpu(i2400m->barker->data[0]));
+			goto do_reboot_ack;
+		}
+		for (i = 0; i < i2400m_barker_db_used; i++) {
+			struct i2400m_barker_db *barker = &i2400m_barker_db[i];
+			memcpy(cmd, barker->data, sizeof(barker->data));
+			result = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd),
+					       &ack, sizeof(ack),
+					       I2400M_BM_CMD_RAW);
+			if (result == -EISCONN) {
+				dev_warn(dev, "device boot: got ack barker "
+					 "after sending echo/ack barker "
+					 "#%d/%08x; rebooting j.i.c.\n",
+					 i, le32_to_cpu(barker->data[0]));
+				flags &= ~I2400M_BRI_NO_REBOOT;
+				goto do_reboot;
+			}
+		}
+		dev_err(dev, "device boot: tried all the echo/acks, could "
+			"not get device to respond; giving up");
+		result = -ESHUTDOWN;
 	case -EPROTO:
 	case -ESHUTDOWN:	/* dev is gone */
 	case -EINTR:		/* user cancelled */
@@ -642,6 +956,7 @@ do_reboot:
 	default:
 		dev_err(dev, "device reboot: error %d while waiting "
 			"for reboot barker - rebooting\n", result);
+		d_dump(1, dev, &ack, result);
 		goto do_reboot;
 	}
 	/* At this point we ack back with 4 REBOOT barkers and expect
@@ -650,12 +965,7 @@ do_reboot:
 	 * notification and report it as -EISCONN. */
 do_reboot_ack:
 	d_printf(4, dev, "device reboot ack: sending ack [%d # left]\n", count);
-	if (i2400m->sboot == 0)
-		memcpy(cmd, i2400m_NBOOT_BARKER,
-		       sizeof(i2400m_NBOOT_BARKER));
-	else
-		memcpy(cmd, i2400m_SBOOT_BARKER,
-		       sizeof(i2400m_SBOOT_BARKER));
+	memcpy(cmd, i2400m->barker->data, sizeof(i2400m->barker->data));
 	result = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd),
 			       &ack, sizeof(ack), I2400M_BM_CMD_RAW);
 	switch (result) {
@@ -668,10 +978,8 @@ do_reboot_ack:
 		d_printf(4, dev, "reboot ack: got ack barker - good\n");
 		break;
 	case -ETIMEDOUT:	/* no response, maybe it is the other type? */
-		if (ack_timeout_cnt-- >= 0) {
-			d_printf(4, dev, "reboot ack timedout: "
-				 "trying the other type?\n");
-			i2400m->sboot = !i2400m->sboot;
+		if (ack_timeout_cnt-- < 0) {
+			d_printf(4, dev, "reboot ack timedout: retrying\n");
 			goto do_reboot_ack;
 		} else {
 			dev_err(dev, "reboot ack timedout too long: "
@@ -839,32 +1147,29 @@ int i2400m_dnload_init_signed(struct i2400m *i2400m,
  * (signed or non-signed).
  */
 static
-int i2400m_dnload_init(struct i2400m *i2400m, const struct i2400m_bcf_hdr *bcf)
+int i2400m_dnload_init(struct i2400m *i2400m,
+		       const struct i2400m_bcf_hdr *bcf_hdr)
 {
 	int result;
 	struct device *dev = i2400m_dev(i2400m);
-	u32 module_id = le32_to_cpu(bcf->module_id);
 
-	if (i2400m->sboot == 0
-	    && (module_id & I2400M_BCF_MOD_ID_POKES) == 0) {
-		/* non-signed boot process without pokes */
-		result = i2400m_dnload_init_nonsigned(i2400m);
+	if (i2400m_boot_is_signed(i2400m)) {
+		d_printf(1, dev, "signed boot\n");
+		result = i2400m_dnload_init_signed(i2400m, bcf_hdr);
 		if (result == -ERESTARTSYS)
 			return result;
 		if (result < 0)
-			dev_err(dev, "fw %s: non-signed download "
+			dev_err(dev, "firmware %s: signed boot download "
 				"initialization failed: %d\n",
 				i2400m->fw_name, result);
-	} else if (i2400m->sboot == 0
-		 && (module_id & I2400M_BCF_MOD_ID_POKES)) {
-		/* non-signed boot process with pokes, nothing to do */
-		result = 0;
-	} else {		 /* signed boot process */
-		result = i2400m_dnload_init_signed(i2400m, bcf);
+	} else {
+		/* non-signed boot process without pokes */
+		d_printf(1, dev, "non-signed boot\n");
+		result = i2400m_dnload_init_nonsigned(i2400m);
 		if (result == -ERESTARTSYS)
 			return result;
 		if (result < 0)
-			dev_err(dev, "fw %s: signed boot download "
+			dev_err(dev, "firmware %s: non-signed download "
 				"initialization failed: %d\n",
 				i2400m->fw_name, result);
 	}
@@ -873,74 +1178,201 @@ int i2400m_dnload_init(struct i2400m *i2400m, const struct i2400m_bcf_hdr *bcf)
 
 
 /*
- * Run quick consistency tests on the firmware file
+ * Run consistency tests on the firmware file and load up headers
  *
  * Check for the firmware being made for the i2400m device,
  * etc...These checks are mostly informative, as the device will make
  * them too; but the driver's response is more informative on what
  * went wrong.
+ *
+ * This will also look at all the headers present on the firmware
+ * file, and update i2400m->fw_bcf_hdr to point to them.
  */
 static
-int i2400m_fw_check(struct i2400m *i2400m,
-		    const struct i2400m_bcf_hdr *bcf,
-		    size_t bcf_size)
+int i2400m_fw_hdr_check(struct i2400m *i2400m,
+			const struct i2400m_bcf_hdr *bcf_hdr,
+			size_t index, size_t offset)
 {
-	int result;
 	struct device *dev = i2400m_dev(i2400m);
+
 	unsigned module_type, header_len, major_version, minor_version,
 		module_id, module_vendor, date, size;
 
-	/* Check hard errors */
-	result = -EINVAL;
-	if (bcf_size < sizeof(*bcf)) {	/* big enough header? */
-		dev_err(dev, "firmware %s too short: "
-			"%zu B vs %zu (at least) expected\n",
-			i2400m->fw_name, bcf_size, sizeof(*bcf));
-		goto error;
-	}
+	module_type = bcf_hdr->module_type;
+	header_len = sizeof(u32) * le32_to_cpu(bcf_hdr->header_len);
+	major_version = (le32_to_cpu(bcf_hdr->header_version) & 0xffff0000)
+		>> 16;
+	minor_version = le32_to_cpu(bcf_hdr->header_version) & 0x0000ffff;
+	module_id = le32_to_cpu(bcf_hdr->module_id);
+	module_vendor = le32_to_cpu(bcf_hdr->module_vendor);
+	date = le32_to_cpu(bcf_hdr->date);
+	size = sizeof(u32) * le32_to_cpu(bcf_hdr->size);
 
-	module_type = bcf->module_type;
-	header_len = sizeof(u32) * le32_to_cpu(bcf->header_len);
-	major_version = le32_to_cpu(bcf->header_version) & 0xffff0000 >> 16;
-	minor_version = le32_to_cpu(bcf->header_version) & 0x0000ffff;
-	module_id = le32_to_cpu(bcf->module_id);
-	module_vendor = le32_to_cpu(bcf->module_vendor);
-	date = le32_to_cpu(bcf->date);
-	size = sizeof(u32) * le32_to_cpu(bcf->size);
-
-	if (bcf_size != size) {		/* annoyingly paranoid */
-		dev_err(dev, "firmware %s: bad size, got "
-			"%zu B vs %u expected\n",
-			i2400m->fw_name, bcf_size, size);
-		goto error;
-	}
+	d_printf(1, dev, "firmware %s #%zd@%08zx: BCF header "
+		 "type:vendor:id 0x%x:%x:%x v%u.%u (%u/%u B) built %08x\n",
+		 i2400m->fw_name, index, offset,
+		 module_type, module_vendor, module_id,
+		 major_version, minor_version, header_len, size, date);
 
-	d_printf(2, dev, "type 0x%x id 0x%x vendor 0x%x; header v%u.%u (%zu B) "
-		 "date %08x (%zu B)\n",
-		 module_type, module_id, module_vendor,
-		 major_version, minor_version, (size_t) header_len,
-		 date, (size_t) size);
+	/* Hard errors */
+	if (major_version != 1) {
+		dev_err(dev, "firmware %s #%zd@%08zx: major header version "
+			"v%u.%u not supported\n",
+			i2400m->fw_name, index, offset,
+			major_version, minor_version);
+		return -EBADF;
+	}
 
 	if (module_type != 6) {		/* built for the right hardware? */
-		dev_err(dev, "bad fw %s: unexpected module type 0x%x; "
-			"aborting\n", i2400m->fw_name, module_type);
-		goto error;
+		dev_err(dev, "firmware %s #%zd@%08zx: unexpected module "
+			"type 0x%x; aborting\n",
+			i2400m->fw_name, index, offset,
+			module_type);
+		return -EBADF;
+	}
+
+	if (module_vendor != 0x8086) {
+		dev_err(dev, "firmware %s #%zd@%08zx: unexpected module "
+			"vendor 0x%x; aborting\n",
+			i2400m->fw_name, index, offset, module_vendor);
+		return -EBADF;
 	}
 
-	/* Check soft-er errors */
-	result = 0;
-	if (module_vendor != 0x8086)
-		dev_err(dev, "bad fw %s? unexpected vendor 0x%04x\n",
-			i2400m->fw_name, module_vendor);
 	if (date < 0x20080300)
-		dev_err(dev, "bad fw %s? build date too old %08x\n",
-			i2400m->fw_name, date);
-error:
+		dev_warn(dev, "firmware %s #%zd@%08zx: build date %08x "
+			 "too old; unsupported\n",
+			 i2400m->fw_name, index, offset, date);
+	return 0;
+}
+
+
+/*
+ * Run consistency tests on the firmware file and load up headers
+ *
+ * Check for the firmware being made for the i2400m device,
+ * etc...These checks are mostly informative, as the device will make
+ * them too; but the driver's response is more informative on what
+ * went wrong.
+ *
+ * This will also look at all the headers present on the firmware
+ * file, and update i2400m->fw_hdrs to point to them.
+ */
+static
+int i2400m_fw_check(struct i2400m *i2400m, const void *bcf, size_t bcf_size)
+{
+	int result;
+	struct device *dev = i2400m_dev(i2400m);
+	size_t headers = 0;
+	const struct i2400m_bcf_hdr *bcf_hdr;
+	const void *itr, *next, *top;
+	size_t slots = 0, used_slots = 0;
+
+	for (itr = bcf, top = itr + bcf_size;
+	     itr < top;
+	     headers++, itr = next) {
+		size_t leftover, offset, header_len, size;
+
+		leftover = top - itr;
+		offset = itr - (const void *) bcf;
+		if (leftover <= sizeof(*bcf_hdr)) {
+			dev_err(dev, "firmware %s: %zu B left at @%zx, "
+				"not enough for BCF header\n",
+				i2400m->fw_name, leftover, offset);
+			break;
+		}
+		bcf_hdr = itr;
+		/* Only the first header is supposed to be followed by
+		 * payload */
+		header_len = sizeof(u32) * le32_to_cpu(bcf_hdr->header_len);
+		size = sizeof(u32) * le32_to_cpu(bcf_hdr->size);
+		if (headers == 0)
+			next = itr + size;
+		else
+			next = itr + header_len;
+
+		result = i2400m_fw_hdr_check(i2400m, bcf_hdr, headers, offset);
+		if (result < 0)
+			continue;
+		if (used_slots + 1 >= slots) {
+			/* +1 -> we need to account for the one we'll
+			 * occupy and at least an extra one for
+			 * always being NULL */
+			result = i2400m_zrealloc_2x(
+				(void **) &i2400m->fw_hdrs, &slots,
+				sizeof(i2400m->fw_hdrs[0]),
+				GFP_KERNEL);
+			if (result < 0)
+				goto error_zrealloc;
+		}
+		i2400m->fw_hdrs[used_slots] = bcf_hdr;
+		used_slots++;
+	}
+	if (headers == 0) {
+		dev_err(dev, "firmware %s: no usable headers found\n",
+			i2400m->fw_name);
+		result = -EBADF;
+	} else
+		result = 0;
+error_zrealloc:
 	return result;
 }
 
 
 /*
+ * Match a barker to a BCF header module ID
+ *
+ * The device sends a barker which tells the firmware loader which
+ * header in the BCF file has to be used. This does the matching.
+ */
+static
+unsigned i2400m_bcf_hdr_match(struct i2400m *i2400m,
+			      const struct i2400m_bcf_hdr *bcf_hdr)
+{
+	u32 barker = le32_to_cpu(i2400m->barker->data[0])
+		& 0x7fffffff;
+	u32 module_id = le32_to_cpu(bcf_hdr->module_id)
+		& 0x7fffffff;	/* high bit used for something else */
+
+	/* special case for 5x50 */
+	if (barker == I2400M_SBOOT_BARKER && module_id == 0)
+		return 1;
+	if (module_id == barker)
+		return 1;
+	return 0;
+}
+
+static
+const struct i2400m_bcf_hdr *i2400m_bcf_hdr_find(struct i2400m *i2400m)
+{
+	struct device *dev = i2400m_dev(i2400m);
+	const struct i2400m_bcf_hdr **bcf_itr, *bcf_hdr;
+	unsigned i = 0;
+	u32 barker = le32_to_cpu(i2400m->barker->data[0]);
+
+	d_printf(2, dev, "finding BCF header for barker %08x\n", barker);
+	if (barker == I2400M_NBOOT_BARKER) {
+		bcf_hdr = i2400m->fw_hdrs[0];
+		d_printf(1, dev, "using BCF header #%u/%08x for non-signed "
+			 "barker\n", 0, le32_to_cpu(bcf_hdr->module_id));
+		return bcf_hdr;
+	}
+	for (bcf_itr = i2400m->fw_hdrs; *bcf_itr != NULL; bcf_itr++, i++) {
+		bcf_hdr = *bcf_itr;
+		if (i2400m_bcf_hdr_match(i2400m, bcf_hdr)) {
+			d_printf(1, dev, "hit on BCF hdr #%u/%08x\n",
+				 i, le32_to_cpu(bcf_hdr->module_id));
+			return bcf_hdr;
+		} else
+			d_printf(1, dev, "miss on BCF hdr #%u/%08x\n",
+				 i, le32_to_cpu(bcf_hdr->module_id));
+	}
+	dev_err(dev, "cannot find a matching BCF header for barker %08x\n",
+		barker);
+	return NULL;
+}
+
+
+/*
  * Download the firmware to the device
  *
  * @i2400m: device descriptor
@@ -956,14 +1388,16 @@ error:
  */
 static
 int i2400m_fw_dnload(struct i2400m *i2400m, const struct i2400m_bcf_hdr *bcf,
-		     size_t bcf_size, enum i2400m_bri flags)
+		     size_t fw_size, enum i2400m_bri flags)
 {
 	int ret = 0;
 	struct device *dev = i2400m_dev(i2400m);
 	int count = i2400m->bus_bm_retries;
+	const struct i2400m_bcf_hdr *bcf_hdr;
+	size_t bcf_size;
 
-	d_fnstart(5, dev, "(i2400m %p bcf %p size %zu)\n",
-		  i2400m, bcf, bcf_size);
+	d_fnstart(5, dev, "(i2400m %p bcf %p fw size %zu)\n",
+		  i2400m, bcf, fw_size);
 	i2400m->boot_mode = 1;
 	wmb();		/* Make sure other readers see it */
 hw_reboot:
@@ -985,13 +1419,28 @@ hw_reboot:
 	 * Initialize the download, push the bytes to the device and
 	 * then jump to the new firmware. Note @ret is passed with the
 	 * offset of the jump instruction to _dnload_finalize()
+	 *
+	 * Note we need to use the BCF header in the firmware image
+	 * that matches the barker that the device sent when it
+	 * rebooted, so it has to be passed along.
 	 */
-	ret = i2400m_dnload_init(i2400m, bcf);	/* Init device's dnload */
+	ret = -EBADF;
+	bcf_hdr = i2400m_bcf_hdr_find(i2400m);
+	if (bcf_hdr == NULL)
+		goto error_bcf_hdr_find;
+
+	ret = i2400m_dnload_init(i2400m, bcf_hdr);
 	if (ret == -ERESTARTSYS)
 		goto error_dev_rebooted;
 	if (ret < 0)
 		goto error_dnload_init;
 
+	/*
+	 * bcf_size refers to one header size plus the fw sections size
+	 * indicated by the header,ie. if there are other extended headers
+	 * at the tail, they are not counted
+	 */
+	bcf_size = sizeof(u32) * le32_to_cpu(bcf_hdr->size);
 	ret = i2400m_dnload_bcf(i2400m, bcf, bcf_size);
 	if (ret == -ERESTARTSYS)
 		goto error_dev_rebooted;
@@ -1001,7 +1450,7 @@ hw_reboot:
 		goto error_dnload_bcf;
 	}
 
-	ret = i2400m_dnload_finalize(i2400m, bcf, ret);
+	ret = i2400m_dnload_finalize(i2400m, bcf_hdr, bcf, ret);
 	if (ret == -ERESTARTSYS)
 		goto error_dev_rebooted;
 	if (ret < 0) {
@@ -1018,10 +1467,11 @@ hw_reboot:
 error_dnload_finalize:
 error_dnload_bcf:
 error_dnload_init:
+error_bcf_hdr_find:
 error_bootrom_init:
 error_too_many_reboots:
 	d_fnend(5, dev, "(i2400m %p bcf %p size %zu) = %d\n",
-		i2400m, bcf, bcf_size, ret);
+		i2400m, bcf, fw_size, ret);
 	return ret;
 
 error_dev_rebooted:
@@ -1031,6 +1481,61 @@ error_dev_rebooted:
 	goto hw_reboot;
 }
 
+static
+int i2400m_fw_bootstrap(struct i2400m *i2400m, const struct firmware *fw,
+			enum i2400m_bri flags)
+{
+	int ret;
+	struct device *dev = i2400m_dev(i2400m);
+	const struct i2400m_bcf_hdr *bcf;	/* Firmware data */
+
+	d_fnstart(5, dev, "(i2400m %p)\n", i2400m);
+	bcf = (void *) fw->data;
+	ret = i2400m_fw_check(i2400m, bcf, fw->size);
+	if (ret >= 0)
+		ret = i2400m_fw_dnload(i2400m, bcf, fw->size, flags);
+	if (ret < 0)
+		dev_err(dev, "%s: cannot use: %d, skipping\n",
+			i2400m->fw_name, ret);
+	kfree(i2400m->fw_hdrs);
+	i2400m->fw_hdrs = NULL;
+	d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, ret);
+	return ret;
+}
+
+
+/* Refcounted container for firmware data */
+struct i2400m_fw {
+	struct kref kref;
+	const struct firmware *fw;
+};
+
+
+static
+void i2400m_fw_destroy(struct kref *kref)
+{
+	struct i2400m_fw *i2400m_fw =
+		container_of(kref, struct i2400m_fw, kref);
+	release_firmware(i2400m_fw->fw);
+	kfree(i2400m_fw);
+}
+
+
+static
+struct i2400m_fw *i2400m_fw_get(struct i2400m_fw *i2400m_fw)
+{
+	if (i2400m_fw != NULL && i2400m_fw != (void *) ~0)
+		kref_get(&i2400m_fw->kref);
+	return i2400m_fw;
+}
+
+
+static
+void i2400m_fw_put(struct i2400m_fw *i2400m_fw)
+{
+	kref_put(&i2400m_fw->kref, i2400m_fw_destroy);
+}
+
 
 /**
  * i2400m_dev_bootstrap - Bring the device to a known state and upload firmware
@@ -1049,42 +1554,109 @@ error_dev_rebooted:
  */
 int i2400m_dev_bootstrap(struct i2400m *i2400m, enum i2400m_bri flags)
 {
-	int ret = 0, itr = 0;
+	int ret, itr;
 	struct device *dev = i2400m_dev(i2400m);
-	const struct firmware *fw;
+	struct i2400m_fw *i2400m_fw;
 	const struct i2400m_bcf_hdr *bcf;	/* Firmware data */
+	const struct firmware *fw;
 	const char *fw_name;
 
 	d_fnstart(5, dev, "(i2400m %p)\n", i2400m);
 
+	ret = -ENODEV;
+	spin_lock(&i2400m->rx_lock);
+	i2400m_fw = i2400m_fw_get(i2400m->fw_cached);
+	spin_unlock(&i2400m->rx_lock);
+	if (i2400m_fw == (void *) ~0) {
+		dev_err(dev, "can't load firmware now!");
+		goto out;
+	} else if (i2400m_fw != NULL) {
+		dev_info(dev, "firmware %s: loading from cache\n",
+			 i2400m->fw_name);
+		ret = i2400m_fw_bootstrap(i2400m, i2400m_fw->fw, flags);
+		i2400m_fw_put(i2400m_fw);
+		goto out;
+	}
+
 	/* Load firmware files to memory. */
-	itr = 0;
-	while(1) {
+	for (itr = 0, bcf = NULL, ret = -ENOENT; ; itr++) {
 		fw_name = i2400m->bus_fw_names[itr];
 		if (fw_name == NULL) {
 			dev_err(dev, "Could not find a usable firmware image\n");
-			ret = -ENOENT;
-			goto error_no_fw;
+			break;
 		}
+		d_printf(1, dev, "trying firmware %s (%d)\n", fw_name, itr);
 		ret = request_firmware(&fw, fw_name, dev);
-		if (ret == 0)
-			break;		/* got it */
-		if (ret < 0)
+		if (ret < 0) {
 			dev_err(dev, "fw %s: cannot load file: %d\n",
 				fw_name, ret);
-		itr++;
+			continue;
+		}
+		i2400m->fw_name = fw_name;
+		ret = i2400m_fw_bootstrap(i2400m, fw, flags);
+		release_firmware(fw);
+		if (ret >= 0)	/* firmware loaded succesfully */
+			break;
+		i2400m->fw_name = NULL;
 	}
-
-	bcf = (void *) fw->data;
-	i2400m->fw_name = fw_name;
-	ret = i2400m_fw_check(i2400m, bcf, fw->size);
-	if (ret < 0)
-		goto error_fw_bad;
-	ret = i2400m_fw_dnload(i2400m, bcf, fw->size, flags);
-error_fw_bad:
-	release_firmware(fw);
-error_no_fw:
+out:
 	d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, ret);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(i2400m_dev_bootstrap);
+
+
+void i2400m_fw_cache(struct i2400m *i2400m)
+{
+	int result;
+	struct i2400m_fw *i2400m_fw;
+	struct device *dev = i2400m_dev(i2400m);
+
+	/* if there is anything there, free it -- now, this'd be weird */
+	spin_lock(&i2400m->rx_lock);
+	i2400m_fw = i2400m->fw_cached;
+	spin_unlock(&i2400m->rx_lock);
+	if (i2400m_fw != NULL && i2400m_fw != (void *) ~0) {
+		i2400m_fw_put(i2400m_fw);
+		WARN(1, "%s:%u: still cached fw still present?\n",
+		     __func__, __LINE__);
+	}
+
+	if (i2400m->fw_name == NULL) {
+		dev_err(dev, "firmware n/a: can't cache\n");
+		i2400m_fw = (void *) ~0;
+		goto out;
+	}
+
+	i2400m_fw = kzalloc(sizeof(*i2400m_fw), GFP_ATOMIC);
+	if (i2400m_fw == NULL)
+		goto out;
+	kref_init(&i2400m_fw->kref);
+	result = request_firmware(&i2400m_fw->fw, i2400m->fw_name, dev);
+	if (result < 0) {
+		dev_err(dev, "firmware %s: failed to cache: %d\n",
+			i2400m->fw_name, result);
+		kfree(i2400m_fw);
+		i2400m_fw = (void *) ~0;
+	} else
+		dev_info(dev, "firmware %s: cached\n", i2400m->fw_name);
+out:
+	spin_lock(&i2400m->rx_lock);
+	i2400m->fw_cached = i2400m_fw;
+	spin_unlock(&i2400m->rx_lock);
+}
+
+
+void i2400m_fw_uncache(struct i2400m *i2400m)
+{
+	struct i2400m_fw *i2400m_fw;
+
+	spin_lock(&i2400m->rx_lock);
+	i2400m_fw = i2400m->fw_cached;
+	i2400m->fw_cached = NULL;
+	spin_unlock(&i2400m->rx_lock);
+
+	if (i2400m_fw != NULL && i2400m_fw != (void *) ~0)
+		i2400m_fw_put(i2400m_fw);
+}
+
diff --git a/drivers/net/wimax/i2400m/i2400m-sdio.h b/drivers/net/wimax/i2400m/i2400m-sdio.h
index 9c4e3189f7b5..b9c4bed3b457 100644
--- a/drivers/net/wimax/i2400m/i2400m-sdio.h
+++ b/drivers/net/wimax/i2400m/i2400m-sdio.h
@@ -67,6 +67,7 @@
 
 /* Host-Device interface for SDIO */
 enum {
+	I2400M_SDIO_BOOT_RETRIES = 3,
 	I2400MS_BLK_SIZE = 256,
 	I2400MS_PL_SIZE_MAX = 0x3E00,
 
@@ -77,9 +78,11 @@ enum {
 	I2400MS_INTR_GET_SIZE_ADDR = 0x2C,
 	/* The number of ticks to wait for the device to signal that
 	 * it is ready */
-	I2400MS_INIT_SLEEP_INTERVAL = 10,
+	I2400MS_INIT_SLEEP_INTERVAL = 100,
 	/* How long to wait for the device to settle after reset */
 	I2400MS_SETTLE_TIME = 40,
+	/* The number of msec to wait for IOR after sending IOE */
+	IWMC3200_IOR_TIMEOUT = 10,
 };
 
 
@@ -97,6 +100,14 @@ enum {
  * @tx_workqueue: workqeueue used for data TX; we don't use the
  *     system's workqueue as that might cause deadlocks with code in
  *     the bus-generic driver.
+ *
+ * @debugfs_dentry: dentry for the SDIO specific debugfs files
+ *
+ *     Note this value is set to NULL upon destruction; this is
+ *     because some routinges use it to determine if we are inside the
+ *     probe() path or some other path. When debugfs is disabled,
+ *     creation sets the dentry to '(void*) -ENODEV', which is valid
+ *     for the test.
  */
 struct i2400ms {
 	struct i2400m i2400m;		/* FIRST! See doc */
@@ -111,6 +122,9 @@ struct i2400ms {
 	wait_queue_head_t bm_wfa_wq;
 	int bm_wait_result;
 	size_t bm_ack_size;
+
+	/* Device is any of the iwmc3200 SKUs */
+	unsigned iwmc3200:1;
 };
 
 
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
index 6f76558b170f..5cc0f279417e 100644
--- a/drivers/net/wimax/i2400m/i2400m-usb.h
+++ b/drivers/net/wimax/i2400m/i2400m-usb.h
@@ -88,6 +88,13 @@ struct edc {
 	u16 errorcount;
 };
 
+struct i2400m_endpoint_cfg {
+	unsigned char bulk_out;
+	unsigned char notification;
+	unsigned char reset_cold;
+	unsigned char bulk_in;
+};
+
 static inline void edc_init(struct edc *edc)
 {
 	edc->timestart = jiffies;
@@ -137,15 +144,13 @@ static inline int edc_inc(struct edc *edc, u16 max_err, u16 timeframe)
 
 /* Host-Device interface for USB */
 enum {
+	I2400M_USB_BOOT_RETRIES = 3,
 	I2400MU_MAX_NOTIFICATION_LEN = 256,
 	I2400MU_BLK_SIZE = 16,
 	I2400MU_PL_SIZE_MAX = 0x3EFF,
 
-	/* Endpoints */
-	I2400MU_EP_BULK_OUT = 0,
-	I2400MU_EP_NOTIFICATION,
-	I2400MU_EP_RESET_COLD,
-	I2400MU_EP_BULK_IN,
+	/* Device IDs */
+	USB_DEVICE_ID_I6050 = 0x0186,
 };
 
 
@@ -215,6 +220,7 @@ struct i2400mu {
 	struct usb_device *usb_dev;
 	struct usb_interface *usb_iface;
 	struct edc urb_edc;		/* Error density counter */
+	struct i2400m_endpoint_cfg endpoint_cfg;
 
 	struct urb *notif_urb;
 	struct task_struct *tx_kthread;
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 60330f313f27..04df9bbe340f 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -117,16 +117,30 @@
  * well as i2400m->wimax_dev.net_dev and call i2400m_setup(). The
  * i2400m driver will only register with the WiMAX and network stacks;
  * the only access done to the device is to read the MAC address so we
- * can register a network device. This calls i2400m_dev_start() to
- * load firmware, setup communication with the device and configure it
- * for operation.
+ * can register a network device.
  *
- * At this point, control and data communications are possible.
+ * The high-level call flow is:
+ *
+ * bus_probe()
+ *   i2400m_setup()
+ *     i2400m->bus_setup()
+ *     boot rom initialization / read mac addr
+ *     network / WiMAX stacks registration
+ *     i2400m_dev_start()
+ *       i2400m->bus_dev_start()
+ *       i2400m_dev_initialize()
  *
- * On disconnect/driver unload, the bus-specific disconnect function
- * calls i2400m_release() to undo i2400m_setup(). i2400m_dev_stop()
- * shuts the firmware down and releases resources uses to communicate
- * with the device.
+ * The reverse applies for a disconnect() call:
+ *
+ * bus_disconnect()
+ *   i2400m_release()
+ *     i2400m_dev_stop()
+ *       i2400m_dev_shutdown()
+ *       i2400m->bus_dev_stop()
+ *     network / WiMAX stack unregistration
+ *     i2400m->bus_release()
+ *
+ * At this point, control and data communications are possible.
  *
  * While the device is up, it might reset. The bus-specific driver has
  * to catch that situation and call i2400m_dev_reset_handle() to deal
@@ -148,9 +162,6 @@
 
 /* Misc constants */
 enum {
-	/* Firmware uploading */
-	I2400M_BOOT_RETRIES = 3,
-	I3200_BOOT_RETRIES = 3,
 	/* Size of the Boot Mode Command buffer */
 	I2400M_BM_CMD_BUF_SIZE = 16 * 1024,
 	I2400M_BM_ACK_BUF_SIZE = 256,
@@ -197,6 +208,7 @@ enum i2400m_reset_type {
 
 struct i2400m_reset_ctx;
 struct i2400m_roq;
+struct i2400m_barker_db;
 
 /**
  * struct i2400m - descriptor for an Intel 2400m
@@ -204,27 +216,50 @@ struct i2400m_roq;
  * Members marked with [fill] must be filled out/initialized before
  * calling i2400m_setup().
  *
+ * Note the @bus_setup/@bus_release, @bus_dev_start/@bus_dev_release
+ * call pairs are very much doing almost the same, and depending on
+ * the underlying bus, some stuff has to be put in one or the
+ * other. The idea of setup/release is that they setup the minimal
+ * amount needed for loading firmware, where us dev_start/stop setup
+ * the rest needed to do full data/control traffic.
+ *
  * @bus_tx_block_size: [fill] SDIO imposes a 256 block size, USB 16,
  *     so we have a tx_blk_size variable that the bus layer sets to
  *     tell the engine how much of that we need.
  *
  * @bus_pl_size_max: [fill] Maximum payload size.
  *
- * @bus_dev_start: [fill] Function called by the bus-generic code
- *     [i2400m_dev_start()] to setup the bus-specific communications
- *     to the the device. See LIFE CYCLE above.
+ * @bus_setup: [optional fill] Function called by the bus-generic code
+ *     [i2400m_setup()] to setup the basic bus-specific communications
+ *     to the the device needed to load firmware. See LIFE CYCLE above.
  *
  *     NOTE: Doesn't need to upload the firmware, as that is taken
  *     care of by the bus-generic code.
  *
- * @bus_dev_stop: [fill] Function called by the bus-generic code
- *     [i2400m_dev_stop()] to shutdown the bus-specific communications
- *     to the the device. See LIFE CYCLE above.
+ * @bus_release: [optional fill] Function called by the bus-generic
+ *     code [i2400m_release()] to shutdown the basic bus-specific
+ *     communications to the the device needed to load firmware. See
+ *     LIFE CYCLE above.
  *
  *     This function does not need to reset the device, just tear down
  *     all the host resources created to  handle communication with
  *     the device.
  *
+ * @bus_dev_start: [optional fill] Function called by the bus-generic
+ *     code [i2400m_dev_start()] to do things needed to start the
+ *     device. See LIFE CYCLE above.
+ *
+ *     NOTE: Doesn't need to upload the firmware, as that is taken
+ *     care of by the bus-generic code.
+ *
+ * @bus_dev_stop: [optional fill] Function called by the bus-generic
+ *     code [i2400m_dev_stop()] to do things needed for stopping the
+ *     device. See LIFE CYCLE above.
+ *
+ *     This function does not need to reset the device, just tear down
+ *     all the host resources created to handle communication with
+ *     the device.
+ *
  * @bus_tx_kick: [fill] Function called by the bus-generic code to let
  *     the bus-specific code know that there is data available in the
  *     TX FIFO for transmission to the device.
@@ -246,6 +281,9 @@ struct i2400m_roq;
  *     process, so it cannot rely on common infrastructure being laid
  *     out.
  *
+ *     IMPORTANT: don't call reset on RT_BUS with i2400m->init_mutex
+ *     held, as the .pre/.post reset handlers will deadlock.
+ *
  * @bus_bm_retries: [fill] How many times shall a firmware upload /
  *     device initialization be retried? Different models of the same
  *     device might need different values, hence it is set by the
@@ -297,6 +335,27 @@ struct i2400m_roq;
  *     force this to be the first field so that we can get from
  *     netdev_priv() the right pointer.
  *
+ * @updown: the device is up and ready for transmitting control and
+ *     data packets. This implies @ready (communication infrastructure
+ *     with the device is ready) and the device's firmware has been
+ *     loaded and the device initialized.
+ *
+ *     Write to it only inside a i2400m->init_mutex protected area
+ *     followed with a wmb(); rmb() before accesing (unless locked
+ *     inside i2400m->init_mutex). Read access can be loose like that
+ *     [just using rmb()] because the paths that use this also do
+ *     other error checks later on.
+ *
+ * @ready: Communication infrastructure with the device is ready, data
+ *     frames can start to be passed around (this is lighter than
+ *     using the WiMAX state for certain hot paths).
+ *
+ *     Write to it only inside a i2400m->init_mutex protected area
+ *     followed with a wmb(); rmb() before accesing (unless locked
+ *     inside i2400m->init_mutex). Read access can be loose like that
+ *     [just using rmb()] because the paths that use this also do
+ *     other error checks later on.
+ *
  * @rx_reorder: 1 if RX reordering is enabled; this can only be
  *     set at probe time.
  *
@@ -362,6 +421,13 @@ struct i2400m_roq;
  *     delivered. Then the driver can release them to the host. See
  *     drivers/net/i2400m/rx.c for details.
  *
+ * @rx_reports: reports received from the device that couldn't be
+ *     processed because the driver wasn't still ready; when ready,
+ *     they are pulled from here and chewed.
+ *
+ * @rx_reports_ws: Work struct used to kick a scan of the RX reports
+ *     list and to process each.
+ *
  * @src_mac_addr: MAC address used to make ethernet packets be coming
  *     from. This is generated at i2400m_setup() time and used during
  *     the life cycle of the instance. See i2400m_fake_eth_header().
@@ -422,6 +488,25 @@ struct i2400m_roq;
  *
  * @fw_version: version of the firmware interface, Major.minor,
  *     encoded in the high word and low word (major << 16 | minor).
+ *
+ * @fw_hdrs: NULL terminated array of pointers to the firmware
+ *     headers. This is only available during firmware load time.
+ *
+ * @fw_cached: Used to cache firmware when the system goes to
+ *     suspend/standby/hibernation (as on resume we can't read it). If
+ *     NULL, no firmware was cached, read it. If ~0, you can't read
+ *     any firmware files (the system still didn't come out of suspend
+ *     and failed to cache one), so abort; otherwise, a valid cached
+ *     firmware to be used. Access to this variable is protected by
+ *     the spinlock i2400m->rx_lock.
+ *
+ * @barker: barker type that the device uses; this is initialized by
+ *     i2400m_is_boot_barker() the first time it is called. Then it
+ *     won't change during the life cycle of the device and everytime
+ *     a boot barker is received, it is just verified for it being the
+ *     same.
+ *
+ * @pm_notifier: used to register for PM events
  */
 struct i2400m {
 	struct wimax_dev wimax_dev;	/* FIRST! See doc */
@@ -429,7 +514,7 @@ struct i2400m {
 	unsigned updown:1;		/* Network device is up or down */
 	unsigned boot_mode:1;		/* is the device in boot mode? */
 	unsigned sboot:1;		/* signed or unsigned fw boot */
-	unsigned ready:1;		/* all probing steps done */
+	unsigned ready:1;		/* Device comm infrastructure ready */
 	unsigned rx_reorder:1;		/* RX reorder is enabled */
 	u8 trace_msg_from_user;		/* echo rx msgs to 'trace' pipe */
 					/* typed u8 so /sys/kernel/debug/u8 can tweak */
@@ -440,8 +525,10 @@ struct i2400m {
 	size_t bus_pl_size_max;
 	unsigned bus_bm_retries;
 
+	int (*bus_setup)(struct i2400m *);
 	int (*bus_dev_start)(struct i2400m *);
 	void (*bus_dev_stop)(struct i2400m *);
+	void (*bus_release)(struct i2400m *);
 	void (*bus_tx_kick)(struct i2400m *);
 	int (*bus_reset)(struct i2400m *, enum i2400m_reset_type);
 	ssize_t (*bus_bm_cmd_send)(struct i2400m *,
@@ -468,6 +555,8 @@ struct i2400m {
 		rx_num, rx_size_acc, rx_size_min, rx_size_max;
 	struct i2400m_roq *rx_roq;	/* not under rx_lock! */
 	u8 src_mac_addr[ETH_HLEN];
+	struct list_head rx_reports;	/* under rx_lock! */
+	struct work_struct rx_report_ws;
 
 	struct mutex msg_mutex;		/* serialize command execution */
 	struct completion msg_completion;
@@ -487,37 +576,12 @@ struct i2400m {
 	struct dentry *debugfs_dentry;
 	const char *fw_name;		/* name of the current firmware image */
 	unsigned long fw_version;	/* version of the firmware interface */
-};
-
+	const struct i2400m_bcf_hdr **fw_hdrs;
+	struct i2400m_fw *fw_cached;	/* protected by rx_lock */
+	struct i2400m_barker_db *barker;
 
-/*
- * Initialize a 'struct i2400m' from all zeroes
- *
- * This is a bus-generic API call.
- */
-static inline
-void i2400m_init(struct i2400m *i2400m)
-{
-	wimax_dev_init(&i2400m->wimax_dev);
-
-	i2400m->boot_mode = 1;
-	i2400m->rx_reorder = 1;
-	init_waitqueue_head(&i2400m->state_wq);
-
-	spin_lock_init(&i2400m->tx_lock);
-	i2400m->tx_pl_min = UINT_MAX;
-	i2400m->tx_size_min = UINT_MAX;
-
-	spin_lock_init(&i2400m->rx_lock);
-	i2400m->rx_pl_min = UINT_MAX;
-	i2400m->rx_size_min = UINT_MAX;
-
-	mutex_init(&i2400m->msg_mutex);
-	init_completion(&i2400m->msg_completion);
-
-	mutex_init(&i2400m->init_mutex);
-	/* wake_tx_ws is initialized in i2400m_tx_setup() */
-}
+	struct notifier_block pm_notifier;
+};
 
 
 /*
@@ -577,6 +641,14 @@ extern void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *);
 extern int i2400m_dev_bootstrap(struct i2400m *, enum i2400m_bri);
 extern int i2400m_read_mac_addr(struct i2400m *);
 extern int i2400m_bootrom_init(struct i2400m *, enum i2400m_bri);
+extern int i2400m_is_boot_barker(struct i2400m *, const void *, size_t);
+static inline
+int i2400m_is_d2h_barker(const void *buf)
+{
+	const __le32 *barker = buf;
+	return le32_to_cpu(*barker) == I2400M_D2H_MSG_BARKER;
+}
+extern void i2400m_unknown_barker(struct i2400m *, const void *, size_t);
 
 /* Make/grok boot-rom header commands */
 
@@ -644,6 +716,8 @@ unsigned i2400m_brh_get_signature(const struct i2400m_bootrom_header *hdr)
 /*
  * Driver / device setup and internal functions
  */
+extern void i2400m_init(struct i2400m *);
+extern int i2400m_reset(struct i2400m *, enum i2400m_reset_type);
 extern void i2400m_netdev_setup(struct net_device *net_dev);
 extern int i2400m_sysfs_setup(struct device_driver *);
 extern void i2400m_sysfs_release(struct device_driver *);
@@ -654,10 +728,14 @@ extern void i2400m_tx_release(struct i2400m *);
 extern int i2400m_rx_setup(struct i2400m *);
 extern void i2400m_rx_release(struct i2400m *);
 
+extern void i2400m_fw_cache(struct i2400m *);
+extern void i2400m_fw_uncache(struct i2400m *);
+
 extern void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned,
 			  const void *, int);
 extern void i2400m_net_erx(struct i2400m *, struct sk_buff *,
 			   enum i2400m_cs);
+extern void i2400m_net_wake_stop(struct i2400m *);
 enum i2400m_pt;
 extern int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt);
 
@@ -672,14 +750,12 @@ static inline int i2400m_debugfs_add(struct i2400m *i2400m)
 static inline void i2400m_debugfs_rm(struct i2400m *i2400m) {}
 #endif
 
-/* Called by _dev_start()/_dev_stop() to initialize the device itself */
+/* Initialize/shutdown the device */
 extern int i2400m_dev_initialize(struct i2400m *);
 extern void i2400m_dev_shutdown(struct i2400m *);
 
 extern struct attribute_group i2400m_dev_attr_group;
 
-extern int i2400m_schedule_work(struct i2400m *,
-				void (*)(struct work_struct *), gfp_t);
 
 /* HDI message's payload description handling */
 
@@ -724,7 +800,9 @@ void i2400m_put(struct i2400m *i2400m)
 	dev_put(i2400m->wimax_dev.net_dev);
 }
 
-extern int i2400m_dev_reset_handle(struct i2400m *);
+extern int i2400m_dev_reset_handle(struct i2400m *, const char *);
+extern int i2400m_pre_reset(struct i2400m *);
+extern int i2400m_post_reset(struct i2400m *);
 
 /*
  * _setup()/_release() are called by the probe/disconnect functions of
@@ -737,20 +815,6 @@ extern int i2400m_rx(struct i2400m *, struct sk_buff *);
 extern struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
 extern void i2400m_tx_msg_sent(struct i2400m *);
 
-static const __le32 i2400m_NBOOT_BARKER[4] = {
-	cpu_to_le32(I2400M_NBOOT_BARKER),
-	cpu_to_le32(I2400M_NBOOT_BARKER),
-	cpu_to_le32(I2400M_NBOOT_BARKER),
-	cpu_to_le32(I2400M_NBOOT_BARKER)
-};
-
-static const __le32 i2400m_SBOOT_BARKER[4] = {
-	cpu_to_le32(I2400M_SBOOT_BARKER),
-	cpu_to_le32(I2400M_SBOOT_BARKER),
-	cpu_to_le32(I2400M_SBOOT_BARKER),
-	cpu_to_le32(I2400M_SBOOT_BARKER)
-};
-
 extern int i2400m_power_save_disabled;
 
 /*
@@ -773,10 +837,12 @@ struct device *i2400m_dev(struct i2400m *i2400m)
 struct i2400m_work {
 	struct work_struct ws;
 	struct i2400m *i2400m;
+	size_t pl_size;
 	u8 pl[0];
 };
-extern int i2400m_queue_work(struct i2400m *,
-			     void (*)(struct work_struct *), gfp_t,
+
+extern int i2400m_schedule_work(struct i2400m *,
+				void (*)(struct work_struct *), gfp_t,
 				const void *, size_t);
 
 extern int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *,
@@ -789,6 +855,7 @@ extern void i2400m_msg_ack_hook(struct i2400m *,
 				const struct i2400m_l3l4_hdr *, size_t);
 extern void i2400m_report_hook(struct i2400m *,
 			       const struct i2400m_l3l4_hdr *, size_t);
+extern void i2400m_report_hook_work(struct work_struct *);
 extern int i2400m_cmd_enter_powersave(struct i2400m *);
 extern int i2400m_cmd_get_state(struct i2400m *);
 extern int i2400m_cmd_exit_idle(struct i2400m *);
@@ -849,6 +916,12 @@ void __i2400m_msleep(unsigned ms)
 #endif
 }
 
+
+/* module initialization helpers */
+extern int i2400m_barker_db_init(const char *);
+extern void i2400m_barker_db_exit(void);
+
+
 /* Module parameters */
 
 extern int i2400m_idle_mode_disabled;
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index 796396cb4c82..599aa4eb9baa 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -74,6 +74,7 @@
  */
 #include <linux/if_arp.h>
 #include <linux/netdevice.h>
+#include <linux/ethtool.h>
 #include "i2400m.h"
 
 
@@ -88,7 +89,10 @@ enum {
 	 * The MTU is 1400 or less
 	 */
 	I2400M_MAX_MTU = 1400,
-	I2400M_TX_TIMEOUT = HZ,
+	/* 20 secs? yep, this is the maximum timeout that the device
+	 * might take to get out of IDLE / negotiate it with the base
+	 * station. We add 1sec for good measure. */
+	I2400M_TX_TIMEOUT = 21 * HZ,
 	I2400M_TX_QLEN = 5,
 };
 
@@ -101,22 +105,19 @@ int i2400m_open(struct net_device *net_dev)
 	struct device *dev = i2400m_dev(i2400m);
 
 	d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
-	if (i2400m->ready == 0) {
-		dev_err(dev, "Device is still initializing\n");
-		result = -EBUSY;
-	} else
+	/* Make sure we wait until init is complete... */
+	mutex_lock(&i2400m->init_mutex);
+	if (i2400m->updown)
 		result = 0;
+	else
+		result = -EBUSY;
+	mutex_unlock(&i2400m->init_mutex);
 	d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n",
 		net_dev, i2400m, result);
 	return result;
 }
 
 
-/*
- *
- * On kernel versions where cancel_work_sync() didn't return anything,
- * we rely on wake_tx_skb() being non-NULL.
- */
 static
 int i2400m_stop(struct net_device *net_dev)
 {
@@ -124,21 +125,7 @@ int i2400m_stop(struct net_device *net_dev)
 	struct device *dev = i2400m_dev(i2400m);
 
 	d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
-	/* See i2400m_hard_start_xmit(), references are taken there
-	 * and here we release them if the work was still
-	 * pending. Note we can't differentiate work not pending vs
-	 * never scheduled, so the NULL check does that. */
-	if (cancel_work_sync(&i2400m->wake_tx_ws) == 0
-	    && i2400m->wake_tx_skb != NULL) {
-		unsigned long flags;
-		struct sk_buff *wake_tx_skb;
-		spin_lock_irqsave(&i2400m->tx_lock, flags);
-		wake_tx_skb = i2400m->wake_tx_skb;	/* compat help */
-		i2400m->wake_tx_skb = NULL;	/* compat help */
-		spin_unlock_irqrestore(&i2400m->tx_lock, flags);
-		i2400m_put(i2400m);
-		kfree_skb(wake_tx_skb);
-	}
+	i2400m_net_wake_stop(i2400m);
 	d_fnend(3, dev, "(net_dev %p [i2400m %p]) = 0\n", net_dev, i2400m);
 	return 0;
 }
@@ -167,6 +154,7 @@ void i2400m_wake_tx_work(struct work_struct *ws)
 {
 	int result;
 	struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws);
+	struct net_device *net_dev = i2400m->wimax_dev.net_dev;
 	struct device *dev = i2400m_dev(i2400m);
 	struct sk_buff *skb = i2400m->wake_tx_skb;
 	unsigned long flags;
@@ -182,27 +170,36 @@ void i2400m_wake_tx_work(struct work_struct *ws)
 		dev_err(dev, "WAKE&TX: skb dissapeared!\n");
 		goto out_put;
 	}
+	/* If we have, somehow, lost the connection after this was
+	 * queued, don't do anything; this might be the device got
+	 * reset or just disconnected. */
+	if (unlikely(!netif_carrier_ok(net_dev)))
+		goto out_kfree;
 	result = i2400m_cmd_exit_idle(i2400m);
 	if (result == -EILSEQ)
 		result = 0;
 	if (result < 0) {
 		dev_err(dev, "WAKE&TX: device didn't get out of idle: "
-			"%d\n", result);
-			goto error;
+			"%d - resetting\n", result);
+		i2400m_reset(i2400m, I2400M_RT_BUS);
+		goto error;
 	}
 	result = wait_event_timeout(i2400m->state_wq,
-				    i2400m->state != I2400M_SS_IDLE, 5 * HZ);
+				    i2400m->state != I2400M_SS_IDLE,
+				    net_dev->watchdog_timeo - HZ/2);
 	if (result == 0)
 		result = -ETIMEDOUT;
 	if (result < 0) {
 		dev_err(dev, "WAKE&TX: error waiting for device to exit IDLE: "
-			"%d\n", result);
+			"%d - resetting\n", result);
+		i2400m_reset(i2400m, I2400M_RT_BUS);
 		goto error;
 	}
 	msleep(20);	/* device still needs some time or it drops it */
 	result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA);
-	netif_wake_queue(i2400m->wimax_dev.net_dev);
 error:
+	netif_wake_queue(net_dev);
+out_kfree:
 	kfree_skb(skb);	/* refcount transferred by _hard_start_xmit() */
 out_put:
 	i2400m_put(i2400m);
@@ -229,6 +226,38 @@ void i2400m_tx_prep_header(struct sk_buff *skb)
 }
 
 
+
+/*
+ * Cleanup resources acquired during i2400m_net_wake_tx()
+ *
+ * This is called by __i2400m_dev_stop and means we have to make sure
+ * the workqueue is flushed from any pending work.
+ */
+void i2400m_net_wake_stop(struct i2400m *i2400m)
+{
+	struct device *dev = i2400m_dev(i2400m);
+
+	d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+	/* See i2400m_hard_start_xmit(), references are taken there
+	 * and here we release them if the work was still
+	 * pending. Note we can't differentiate work not pending vs
+	 * never scheduled, so the NULL check does that. */
+	if (cancel_work_sync(&i2400m->wake_tx_ws) == 0
+	    && i2400m->wake_tx_skb != NULL) {
+		unsigned long flags;
+		struct sk_buff *wake_tx_skb;
+		spin_lock_irqsave(&i2400m->tx_lock, flags);
+		wake_tx_skb = i2400m->wake_tx_skb;	/* compat help */
+		i2400m->wake_tx_skb = NULL;	/* compat help */
+		spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+		i2400m_put(i2400m);
+		kfree_skb(wake_tx_skb);
+	}
+	d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+	return;
+}
+
+
 /*
  * TX an skb to an idle device
  *
@@ -342,6 +371,20 @@ netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb,
 	int result;
 
 	d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
+	if (skb_header_cloned(skb)) {
+		/*
+		 * Make tcpdump/wireshark happy -- if they are
+		 * running, the skb is cloned and we will overwrite
+		 * the mac fields in i2400m_tx_prep_header. Expand
+		 * seems to fix this...
+		 */
+		result = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+		if (result) {
+			result = NETDEV_TX_BUSY;
+			goto error_expand;
+		}
+	}
+
 	if (i2400m->state == I2400M_SS_IDLE)
 		result = i2400m_net_wake_tx(i2400m, net_dev, skb);
 	else
@@ -352,10 +395,11 @@ netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb,
 		net_dev->stats.tx_packets++;
 		net_dev->stats.tx_bytes += skb->len;
 	}
+	result = NETDEV_TX_OK;
+error_expand:
 	kfree_skb(skb);
-
-	d_fnend(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
-	return NETDEV_TX_OK;
+	d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
+	return result;
 }
 
 
@@ -559,6 +603,22 @@ static const struct net_device_ops i2400m_netdev_ops = {
 	.ndo_change_mtu = i2400m_change_mtu,
 };
 
+static void i2400m_get_drvinfo(struct net_device *net_dev,
+			       struct ethtool_drvinfo *info)
+{
+	struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
+
+	strncpy(info->driver, KBUILD_MODNAME, sizeof(info->driver) - 1);
+	strncpy(info->fw_version, i2400m->fw_name, sizeof(info->fw_version) - 1);
+	if (net_dev->dev.parent)
+		strncpy(info->bus_info, dev_name(net_dev->dev.parent),
+			sizeof(info->bus_info) - 1);
+}
+
+static const struct ethtool_ops i2400m_ethtool_ops = {
+	.get_drvinfo = i2400m_get_drvinfo,
+	.get_link = ethtool_op_get_link,
+};
 
 /**
  * i2400m_netdev_setup - Setup setup @net_dev's i2400m private data
@@ -580,6 +640,7 @@ void i2400m_netdev_setup(struct net_device *net_dev)
 		   & ~IFF_MULTICAST);
 	net_dev->watchdog_timeo = I2400M_TX_TIMEOUT;
 	net_dev->netdev_ops = &i2400m_netdev_ops;
+	net_dev->ethtool_ops = &i2400m_ethtool_ops;
 	d_fnend(3, NULL, "(net_dev %p) = void\n", net_dev);
 }
 EXPORT_SYMBOL_GPL(i2400m_netdev_setup);
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index 07c32e68909f..e3d2a9de023c 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -158,30 +158,104 @@ struct i2400m_report_hook_args {
 	struct sk_buff *skb_rx;
 	const struct i2400m_l3l4_hdr *l3l4_hdr;
 	size_t size;
+	struct list_head list_node;
 };
 
 
 /*
  * Execute i2400m_report_hook in a workqueue
  *
- * Unpacks arguments from the deferred call, executes it and then
- * drops the references.
+ * Goes over the list of queued reports in i2400m->rx_reports and
+ * processes them.
  *
- * Obvious NOTE: References are needed because we are a separate
- *     thread; otherwise the buffer changes under us because it is
- *     released by the original caller.
+ * NOTE: refcounts on i2400m are not needed because we flush the
+ *     workqueue this runs on (i2400m->work_queue) before destroying
+ *     i2400m.
  */
-static
 void i2400m_report_hook_work(struct work_struct *ws)
 {
-	struct i2400m_work *iw =
-		container_of(ws, struct i2400m_work, ws);
-	struct i2400m_report_hook_args *args = (void *) iw->pl;
-	if (iw->i2400m->ready)
-		i2400m_report_hook(iw->i2400m, args->l3l4_hdr, args->size);
-	kfree_skb(args->skb_rx);
-	i2400m_put(iw->i2400m);
-	kfree(iw);
+	struct i2400m *i2400m = container_of(ws, struct i2400m, rx_report_ws);
+	struct device *dev = i2400m_dev(i2400m);
+	struct i2400m_report_hook_args *args, *args_next;
+	LIST_HEAD(list);
+	unsigned long flags;
+
+	while (1) {
+		spin_lock_irqsave(&i2400m->rx_lock, flags);
+		list_splice_init(&i2400m->rx_reports, &list);
+		spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+		if (list_empty(&list))
+			break;
+		else
+			d_printf(1, dev, "processing queued reports\n");
+		list_for_each_entry_safe(args, args_next, &list, list_node) {
+			d_printf(2, dev, "processing queued report %p\n", args);
+			i2400m_report_hook(i2400m, args->l3l4_hdr, args->size);
+			kfree_skb(args->skb_rx);
+			list_del(&args->list_node);
+			kfree(args);
+		}
+	}
+}
+
+
+/*
+ * Flush the list of queued reports
+ */
+static
+void i2400m_report_hook_flush(struct i2400m *i2400m)
+{
+	struct device *dev = i2400m_dev(i2400m);
+	struct i2400m_report_hook_args *args, *args_next;
+	LIST_HEAD(list);
+	unsigned long flags;
+
+	d_printf(1, dev, "flushing queued reports\n");
+	spin_lock_irqsave(&i2400m->rx_lock, flags);
+	list_splice_init(&i2400m->rx_reports, &list);
+	spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+	list_for_each_entry_safe(args, args_next, &list, list_node) {
+		d_printf(2, dev, "flushing queued report %p\n", args);
+		kfree_skb(args->skb_rx);
+		list_del(&args->list_node);
+		kfree(args);
+	}
+}
+
+
+/*
+ * Queue a report for later processing
+ *
+ * @i2400m: device descriptor
+ * @skb_rx: skb that contains the payload (for reference counting)
+ * @l3l4_hdr: pointer to the control
+ * @size: size of the message
+ */
+static
+void i2400m_report_hook_queue(struct i2400m *i2400m, struct sk_buff *skb_rx,
+			      const void *l3l4_hdr, size_t size)
+{
+	struct device *dev = i2400m_dev(i2400m);
+	unsigned long flags;
+	struct i2400m_report_hook_args *args;
+
+	args = kzalloc(sizeof(*args), GFP_NOIO);
+	if (args) {
+		args->skb_rx = skb_get(skb_rx);
+		args->l3l4_hdr = l3l4_hdr;
+		args->size = size;
+		spin_lock_irqsave(&i2400m->rx_lock, flags);
+		list_add_tail(&args->list_node, &i2400m->rx_reports);
+		spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+		d_printf(2, dev, "queued report %p\n", args);
+		rmb();		/* see i2400m->ready's documentation  */
+		if (likely(i2400m->ready))	/* only send if up */
+			queue_work(i2400m->work_queue, &i2400m->rx_report_ws);
+	} else  {
+		if (printk_ratelimit())
+			dev_err(dev, "%s:%u: Can't allocate %zu B\n",
+				__func__, __LINE__, sizeof(*args));
+	}
 }
 
 
@@ -295,21 +369,29 @@ void i2400m_rx_ctl(struct i2400m *i2400m, struct sk_buff *skb_rx,
 		 msg_type, size);
 	d_dump(2, dev, l3l4_hdr, size);
 	if (msg_type & I2400M_MT_REPORT_MASK) {
-		/* These hooks have to be ran serialized; as well, the
-		 * handling might force the execution of commands, and
-		 * that might cause reentrancy issues with
-		 * bus-specific subdrivers and workqueues. So we run
-		 * it in a separate workqueue. */
-		struct i2400m_report_hook_args args = {
-			.skb_rx = skb_rx,
-			.l3l4_hdr = l3l4_hdr,
-			.size = size
-		};
-		if (unlikely(i2400m->ready == 0))	/* only send if up */
-			return;
-		skb_get(skb_rx);
-		i2400m_queue_work(i2400m, i2400m_report_hook_work,
-				  GFP_KERNEL, &args, sizeof(args));
+		/*
+		 * Process each report
+		 *
+		 * - has to be ran serialized as well
+		 *
+		 * - the handling might force the execution of
+		 *   commands. That might cause reentrancy issues with
+		 *   bus-specific subdrivers and workqueues, so the we
+		 *   run it in a separate workqueue.
+		 *
+		 * - when the driver is not yet ready to handle them,
+		 *   they are queued and at some point the queue is
+		 *   restarted [NOTE: we can't queue SKBs directly, as
+		 *   this might be a piece of a SKB, not the whole
+		 *   thing, and this is cheaper than cloning the
+		 *   SKB].
+		 *
+		 * Note we don't do refcounting for the device
+		 * structure; this is because before destroying
+		 * 'i2400m', we make sure to flush the
+		 * i2400m->work_queue, so there are no issues.
+		 */
+		i2400m_report_hook_queue(i2400m, skb_rx, l3l4_hdr, size);
 		if (unlikely(i2400m->trace_msg_from_user))
 			wimax_msg(&i2400m->wimax_dev, "echo",
 				  l3l4_hdr, size, GFP_KERNEL);
@@ -363,8 +445,6 @@ void i2400m_rx_trace(struct i2400m *i2400m,
 		 msg_type & I2400M_MT_REPORT_MASK ? "REPORT" : "CMD/SET/GET",
 		 msg_type, size);
 	d_dump(2, dev, l3l4_hdr, size);
-	if (unlikely(i2400m->ready == 0))	/* only send if up */
-		return;
 	result = wimax_msg(wimax_dev, "trace", l3l4_hdr, size, GFP_KERNEL);
 	if (result < 0)
 		dev_err(dev, "error sending trace to userspace: %d\n",
@@ -748,7 +828,7 @@ void i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq,
 		dev_err(dev, "SW BUG? queue nsn %d (lbn %u ws %u)\n",
 			nsn, lbn, roq->ws);
 		i2400m_roq_log_dump(i2400m, roq);
-		i2400m->bus_reset(i2400m, I2400M_RT_WARM);
+		i2400m_reset(i2400m, I2400M_RT_WARM);
 	} else {
 		__i2400m_roq_queue(i2400m, roq, skb, lbn, nsn);
 		i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET,
@@ -814,7 +894,7 @@ void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
 		dev_err(dev, "SW BUG? queue_update_ws nsn %u (sn %u ws %u)\n",
 			nsn, sn, roq->ws);
 		i2400m_roq_log_dump(i2400m, roq);
-		i2400m->bus_reset(i2400m, I2400M_RT_WARM);
+		i2400m_reset(i2400m, I2400M_RT_WARM);
 	} else {
 		/* if the queue is empty, don't bother as we'd queue
 		 * it and inmediately unqueue it -- just deliver it */
@@ -1194,6 +1274,28 @@ error_msg_hdr_check:
 EXPORT_SYMBOL_GPL(i2400m_rx);
 
 
+void i2400m_unknown_barker(struct i2400m *i2400m,
+			   const void *buf, size_t size)
+{
+	struct device *dev = i2400m_dev(i2400m);
+	char prefix[64];
+	const __le32 *barker = buf;
+	dev_err(dev, "RX: HW BUG? unknown barker %08x, "
+		"dropping %zu bytes\n", le32_to_cpu(*barker), size);
+	snprintf(prefix, sizeof(prefix), "%s %s: ",
+		 dev_driver_string(dev), dev_name(dev));
+	if (size > 64) {
+		print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
+			       8, 4, buf, 64, 0);
+		printk(KERN_ERR "%s... (only first 64 bytes "
+		       "dumped)\n", prefix);
+	} else
+		print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
+			       8, 4, buf, size, 0);
+}
+EXPORT_SYMBOL(i2400m_unknown_barker);
+
+
 /*
  * Initialize the RX queue and infrastructure
  *
@@ -1261,4 +1363,6 @@ void i2400m_rx_release(struct i2400m *i2400m)
 		kfree(i2400m->rx_roq[0].log);
 		kfree(i2400m->rx_roq);
 	}
+	/* at this point, nothing can be received... */
+	i2400m_report_hook_flush(i2400m);
 }
diff --git a/drivers/net/wimax/i2400m/sdio-fw.c b/drivers/net/wimax/i2400m/sdio-fw.c
index 7d6ec0f475f8..8e025418f5be 100644
--- a/drivers/net/wimax/i2400m/sdio-fw.c
+++ b/drivers/net/wimax/i2400m/sdio-fw.c
@@ -118,7 +118,8 @@ ssize_t i2400ms_bus_bm_cmd_send(struct i2400m *i2400m,
 	if (cmd_size > I2400M_BM_CMD_BUF_SIZE)
 		goto error_too_big;
 
-	memcpy(i2400m->bm_cmd_buf, _cmd, cmd_size);	/* Prep command */
+	if (_cmd != i2400m->bm_cmd_buf)
+		memmove(i2400m->bm_cmd_buf, _cmd, cmd_size);
 	cmd = i2400m->bm_cmd_buf;
 	if (cmd_size_a > cmd_size)			/* Zero pad space */
 		memset(i2400m->bm_cmd_buf + cmd_size, 0, cmd_size_a - cmd_size);
@@ -177,10 +178,6 @@ ssize_t i2400ms_bus_bm_wait_for_ack(struct i2400m *i2400m,
 	d_fnstart(5, dev, "(i2400m %p ack %p size %zu)\n",
 		  i2400m, ack, ack_size);
 
-	spin_lock(&i2400m->rx_lock);
-	i2400ms->bm_ack_size = -EINPROGRESS;
-	spin_unlock(&i2400m->rx_lock);
-
 	result = wait_event_timeout(i2400ms->bm_wfa_wq,
 				    i2400ms->bm_ack_size != -EINPROGRESS,
 				    2 * HZ);
@@ -199,6 +196,10 @@ ssize_t i2400ms_bus_bm_wait_for_ack(struct i2400m *i2400m,
 		size = min(ack_size, i2400ms->bm_ack_size);
 		memcpy(ack, i2400m->bm_ack_buf, size);
 	}
+	/*
+	 * Remember always to clear the bm_ack_size to -EINPROGRESS
+	 * after the RX data is processed
+	 */
 	i2400ms->bm_ack_size = -EINPROGRESS;
 	spin_unlock(&i2400m->rx_lock);
 
diff --git a/drivers/net/wimax/i2400m/sdio-rx.c b/drivers/net/wimax/i2400m/sdio-rx.c
index 321beadf6e47..8adf6c9b6f8f 100644
--- a/drivers/net/wimax/i2400m/sdio-rx.c
+++ b/drivers/net/wimax/i2400m/sdio-rx.c
@@ -53,6 +53,7 @@
  * i2400ms_irq()
  *   i2400ms_rx()
  *     __i2400ms_rx_get_size()
+ *     i2400m_is_boot_barker()
  *     i2400m_rx()
  *
  * i2400ms_rx_setup()
@@ -138,6 +139,11 @@ void i2400ms_rx(struct i2400ms *i2400ms)
 		ret = rx_size;
 		goto error_get_size;
 	}
+	/*
+	 * Hardware quirk: make sure to clear the INTR status register
+	 * AFTER getting the data transfer size.
+	 */
+	sdio_writeb(func, 1, I2400MS_INTR_CLEAR_ADDR, &ret);
 
 	ret = -ENOMEM;
 	skb = alloc_skb(rx_size, GFP_ATOMIC);
@@ -153,25 +159,34 @@ void i2400ms_rx(struct i2400ms *i2400ms)
 	}
 
 	rmb();	/* make sure we get boot_mode from dev_reset_handle */
-	if (i2400m->boot_mode == 1) {
+	if (unlikely(i2400m->boot_mode == 1)) {
 		spin_lock(&i2400m->rx_lock);
 		i2400ms->bm_ack_size = rx_size;
 		spin_unlock(&i2400m->rx_lock);
 		memcpy(i2400m->bm_ack_buf, skb->data, rx_size);
 		wake_up(&i2400ms->bm_wfa_wq);
-		dev_err(dev, "RX: SDIO boot mode message\n");
+		d_printf(5, dev, "RX: SDIO boot mode message\n");
 		kfree_skb(skb);
-	} else if (unlikely(!memcmp(skb->data, i2400m_NBOOT_BARKER,
-				    sizeof(i2400m_NBOOT_BARKER))
-			    || !memcmp(skb->data, i2400m_SBOOT_BARKER,
-				       sizeof(i2400m_SBOOT_BARKER)))) {
-		ret = i2400m_dev_reset_handle(i2400m);
+		goto out;
+	}
+	ret = -EIO;
+	if (unlikely(rx_size < sizeof(__le32))) {
+		dev_err(dev, "HW BUG? only %zu bytes received\n", rx_size);
+		goto error_bad_size;
+	}
+	if (likely(i2400m_is_d2h_barker(skb->data))) {
+		skb_put(skb, rx_size);
+		i2400m_rx(i2400m, skb);
+	} else if (unlikely(i2400m_is_boot_barker(i2400m,
+						  skb->data, rx_size))) {
+		ret = i2400m_dev_reset_handle(i2400m, "device rebooted");
 		dev_err(dev, "RX: SDIO reboot barker\n");
 		kfree_skb(skb);
 	} else {
-		skb_put(skb, rx_size);
-		i2400m_rx(i2400m, skb);
+		i2400m_unknown_barker(i2400m, skb->data, rx_size);
+		kfree_skb(skb);
 	}
+out:
 	d_fnend(7, dev, "(i2400ms %p) = void\n", i2400ms);
 	return;
 
@@ -179,6 +194,7 @@ error_memcpy_fromio:
 	kfree_skb(skb);
 error_alloc_skb:
 error_get_size:
+error_bad_size:
 	d_fnend(7, dev, "(i2400ms %p) = %d\n", i2400ms, ret);
 	return;
 }
@@ -209,7 +225,6 @@ void i2400ms_irq(struct sdio_func *func)
 		dev_err(dev, "RX: BUG? got IRQ but no interrupt ready?\n");
 		goto error_no_irq;
 	}
-	sdio_writeb(func, 1, I2400MS_INTR_CLEAR_ADDR, &ret);
 	i2400ms_rx(i2400ms);
 error_no_irq:
 	d_fnend(6, dev, "(i2400ms %p) = void\n", i2400ms);
@@ -234,6 +249,13 @@ int i2400ms_rx_setup(struct i2400ms *i2400ms)
 	init_waitqueue_head(&i2400ms->bm_wfa_wq);
 	spin_lock(&i2400m->rx_lock);
 	i2400ms->bm_wait_result = -EINPROGRESS;
+	/*
+	 * Before we are about to enable the RX interrupt, make sure
+	 * bm_ack_size is cleared to -EINPROGRESS which indicates
+	 * no RX interrupt happened yet or the previous interrupt
+	 * has been handled, we are ready to take the new interrupt
+	 */
+	i2400ms->bm_ack_size = -EINPROGRESS;
 	spin_unlock(&i2400m->rx_lock);
 
 	sdio_claim_host(func);
diff --git a/drivers/net/wimax/i2400m/sdio-tx.c b/drivers/net/wimax/i2400m/sdio-tx.c
index 5105a5ebc44f..de66d068c9cb 100644
--- a/drivers/net/wimax/i2400m/sdio-tx.c
+++ b/drivers/net/wimax/i2400m/sdio-tx.c
@@ -149,5 +149,8 @@ int i2400ms_tx_setup(struct i2400ms *i2400ms)
 
 void i2400ms_tx_release(struct i2400ms *i2400ms)
 {
-	destroy_workqueue(i2400ms->tx_workqueue);
+	if (i2400ms->tx_workqueue) {
+		destroy_workqueue(i2400ms->tx_workqueue);
+		i2400ms->tx_workqueue = NULL;
+	}
 }
diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c
index 2981e211e04f..76a50ac02ebb 100644
--- a/drivers/net/wimax/i2400m/sdio.c
+++ b/drivers/net/wimax/i2400m/sdio.c
@@ -43,18 +43,9 @@
  *     i2400m_release()
  *     free_netdev(net_dev)
  *
- * i2400ms_bus_reset()            Called by i2400m->bus_reset
+ * i2400ms_bus_reset()            Called by i2400m_reset
  *   __i2400ms_reset()
  *     __i2400ms_send_barker()
- *
- * i2400ms_bus_dev_start()        Called by i2400m_dev_start() [who is
- *   i2400ms_tx_setup()           called by i2400m_setup()]
- *   i2400ms_rx_setup()
- *
- * i2400ms_bus_dev_stop()         Called by i2400m_dev_stop() [who is
- *   i2400ms_rx_release()         is called by i2400m_release()]
- *   i2400ms_tx_release()
- *
  */
 
 #include <linux/debugfs.h>
@@ -71,6 +62,14 @@
 static int ioe_timeout = 2;
 module_param(ioe_timeout, int, 0);
 
+static char i2400ms_debug_params[128];
+module_param_string(debug, i2400ms_debug_params, sizeof(i2400ms_debug_params),
+		    0644);
+MODULE_PARM_DESC(debug,
+		 "String of space-separated NAME:VALUE pairs, where NAMEs "
+		 "are the different debug submodules and VALUE are the "
+		 "initial debug value to set.");
+
 /* Our firmware file name list */
 static const char *i2400ms_bus_fw_names[] = {
 #define I2400MS_FW_FILE_NAME "i2400m-fw-sdio-1.3.sbcf"
@@ -95,17 +94,24 @@ static const struct i2400m_poke_table i2400ms_pokes[] = {
  * when we ask it to explicitly doing). Tries until a timeout is
  * reached.
  *
+ * The @maxtries argument indicates how many times (at most) it should
+ * be tried to enable the function. 0 means forever. This acts along
+ * with the timeout (ie: it'll stop trying as soon as the maximum
+ * number of tries is reached _or_ as soon as the timeout is reached).
+ *
  * The reverse of this is...sdio_disable_function()
  *
  * Returns: 0 if the SDIO function was enabled, < 0 errno code on
  *     error (-ENODEV when it was unable to enable the function).
  */
 static
-int i2400ms_enable_function(struct sdio_func *func)
+int i2400ms_enable_function(struct i2400ms *i2400ms, unsigned maxtries)
 {
+	struct sdio_func *func = i2400ms->func;
 	u64 timeout;
 	int err;
 	struct device *dev = &func->dev;
+	unsigned tries = 0;
 
 	d_fnstart(3, dev, "(func %p)\n", func);
 	/* Setup timeout (FIXME: This needs to read the CIS table to
@@ -115,6 +121,14 @@ int i2400ms_enable_function(struct sdio_func *func)
 	err = -ENODEV;
 	while (err != 0 && time_before64(get_jiffies_64(), timeout)) {
 		sdio_claim_host(func);
+		/*
+		 * There is a sillicon bug on the IWMC3200, where the
+		 * IOE timeout will cause problems on Moorestown
+		 * platforms (system hang). We explicitly overwrite
+		 * func->enable_timeout here to work around the issue.
+		 */
+		if (i2400ms->iwmc3200)
+			func->enable_timeout = IWMC3200_IOR_TIMEOUT;
 		err = sdio_enable_func(func);
 		if (0 == err) {
 			sdio_release_host(func);
@@ -122,8 +136,11 @@ int i2400ms_enable_function(struct sdio_func *func)
 			goto function_enabled;
 		}
 		d_printf(2, dev, "SDIO function failed to enable: %d\n", err);
-		sdio_disable_func(func);
 		sdio_release_host(func);
+		if (maxtries > 0 && ++tries >= maxtries) {
+			err = -ETIME;
+			break;
+		}
 		msleep(I2400MS_INIT_SLEEP_INTERVAL);
 	}
 	/* If timed out, device is not there yet -- get -ENODEV so
@@ -140,46 +157,99 @@ function_enabled:
 
 
 /*
- * Setup driver resources needed to communicate with the device
+ * Setup minimal device communication infrastructure needed to at
+ * least be able to update the firmware.
  *
- * The fw needs some time to settle, and it was just uploaded,
- * so give it a break first. I'd prefer to just wait for the device to
- * send something, but seems the poking we do to enable SDIO stuff
- * interferes with it, so just give it a break before starting...
+ * Note the ugly trick: if we are in the probe path
+ * (i2400ms->debugfs_dentry == NULL), we only retry function
+ * enablement one, to avoid racing with the iwmc3200 top controller.
  */
 static
-int i2400ms_bus_dev_start(struct i2400m *i2400m)
+int i2400ms_bus_setup(struct i2400m *i2400m)
 {
 	int result;
-	struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
+	struct i2400ms *i2400ms =
+		container_of(i2400m, struct i2400ms, i2400m);
+	struct device *dev = i2400m_dev(i2400m);
 	struct sdio_func *func = i2400ms->func;
-	struct device *dev = &func->dev;
+	int retries;
+
+	sdio_claim_host(func);
+	result = sdio_set_block_size(func, I2400MS_BLK_SIZE);
+	sdio_release_host(func);
+	if (result < 0) {
+		dev_err(dev, "Failed to set block size: %d\n", result);
+		goto error_set_blk_size;
+	}
+
+	if (i2400ms->iwmc3200 && i2400ms->debugfs_dentry == NULL)
+		retries = 1;
+	else
+		retries = 0;
+	result = i2400ms_enable_function(i2400ms, retries);
+	if (result < 0) {
+		dev_err(dev, "Cannot enable SDIO function: %d\n", result);
+		goto error_func_enable;
+	}
 
-	d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
-	msleep(200);
 	result = i2400ms_tx_setup(i2400ms);
 	if (result < 0)
 		goto error_tx_setup;
-	d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
-	return result;
+	result = i2400ms_rx_setup(i2400ms);
+	if (result < 0)
+		goto error_rx_setup;
+	return 0;
 
-error_tx_setup:
+error_rx_setup:
 	i2400ms_tx_release(i2400ms);
-	d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+error_tx_setup:
+	sdio_claim_host(func);
+	sdio_disable_func(func);
+	sdio_release_host(func);
+error_func_enable:
+error_set_blk_size:
 	return result;
 }
 
 
+/*
+ * Tear down minimal device communication infrastructure needed to at
+ * least be able to update the firmware.
+ */
+static
+void i2400ms_bus_release(struct i2400m *i2400m)
+{
+	struct i2400ms *i2400ms =
+		container_of(i2400m, struct i2400ms, i2400m);
+	struct sdio_func *func = i2400ms->func;
+
+	i2400ms_rx_release(i2400ms);
+	i2400ms_tx_release(i2400ms);
+	sdio_claim_host(func);
+	sdio_disable_func(func);
+	sdio_release_host(func);
+}
+
+
+/*
+ * Setup driver resources needed to communicate with the device
+ *
+ * The fw needs some time to settle, and it was just uploaded,
+ * so give it a break first. I'd prefer to just wait for the device to
+ * send something, but seems the poking we do to enable SDIO stuff
+ * interferes with it, so just give it a break before starting...
+ */
 static
-void i2400ms_bus_dev_stop(struct i2400m *i2400m)
+int i2400ms_bus_dev_start(struct i2400m *i2400m)
 {
 	struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
 	struct sdio_func *func = i2400ms->func;
 	struct device *dev = &func->dev;
 
 	d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
-	i2400ms_tx_release(i2400ms);
-	d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+	msleep(200);
+	d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, 0);
+	return 0;
 }
 
 
@@ -233,18 +303,17 @@ error_kzalloc:
  * Warm reset:
  *
  * The device will be fully reset internally, but won't be
- * disconnected from the USB bus (so no reenumeration will
+ * disconnected from the bus (so no reenumeration will
  * happen). Firmware upload will be neccessary.
  *
- * The device will send a reboot barker in the notification endpoint
- * that will trigger the driver to reinitialize the state
- * automatically from notif.c:i2400m_notification_grok() into
- * i2400m_dev_bootstrap_delayed().
+ * The device will send a reboot barker that will trigger the driver
+ * to reinitialize the state via __i2400m_dev_reset_handle.
  *
- * Cold and bus (USB) reset:
+ *
+ * Cold and bus reset:
  *
  * The device will be fully reset internally, disconnected from the
- * USB bus an a reenumeration will happen. Firmware upload will be
+ * bus an a reenumeration will happen. Firmware upload will be
  * neccessary. Thus, we don't do any locking or struct
  * reinitialization, as we are going to be fully disconnected and
  * reenumerated.
@@ -283,25 +352,13 @@ int i2400ms_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt)
 					       sizeof(i2400m_COLD_BOOT_BARKER));
 	else if (rt == I2400M_RT_BUS) {
 do_bus_reset:
-		/* call netif_tx_disable() before sending IOE disable,
-		 * so that all the tx from network layer are stopped
-		 * while IOE is being reset. Make sure it is called
-		 * only after register_netdev() was issued.
-		 */
-		if (i2400m->wimax_dev.net_dev->reg_state == NETREG_REGISTERED)
-			netif_tx_disable(i2400m->wimax_dev.net_dev);
 
-		i2400ms_rx_release(i2400ms);
-		sdio_claim_host(i2400ms->func);
-		sdio_disable_func(i2400ms->func);
-		sdio_release_host(i2400ms->func);
+		i2400ms_bus_release(i2400m);
 
 		/* Wait for the device to settle */
 		msleep(40);
 
-		result = i2400ms_enable_function(i2400ms->func);
-		if (result >= 0)
-			i2400ms_rx_setup(i2400ms);
+		result =  i2400ms_bus_setup(i2400m);
 	} else
 		BUG();
 	if (result < 0 && rt != I2400M_RT_BUS) {
@@ -350,7 +407,7 @@ int i2400ms_debugfs_add(struct i2400ms *i2400ms)
 	int result;
 	struct dentry *dentry = i2400ms->i2400m.wimax_dev.debugfs_dentry;
 
-	dentry = debugfs_create_dir("i2400m-usb", dentry);
+	dentry = debugfs_create_dir("i2400m-sdio", dentry);
 	result = PTR_ERR(dentry);
 	if (IS_ERR(dentry)) {
 		if (result == -ENODEV)
@@ -367,6 +424,7 @@ int i2400ms_debugfs_add(struct i2400ms *i2400ms)
 
 error:
 	debugfs_remove_recursive(i2400ms->debugfs_dentry);
+	i2400ms->debugfs_dentry = NULL;
 	return result;
 }
 
@@ -425,37 +483,30 @@ int i2400ms_probe(struct sdio_func *func,
 
 	i2400m->bus_tx_block_size = I2400MS_BLK_SIZE;
 	i2400m->bus_pl_size_max = I2400MS_PL_SIZE_MAX;
+	i2400m->bus_setup = i2400ms_bus_setup;
 	i2400m->bus_dev_start = i2400ms_bus_dev_start;
-	i2400m->bus_dev_stop = i2400ms_bus_dev_stop;
+	i2400m->bus_dev_stop = NULL;
+	i2400m->bus_release = i2400ms_bus_release;
 	i2400m->bus_tx_kick = i2400ms_bus_tx_kick;
 	i2400m->bus_reset = i2400ms_bus_reset;
 	/* The iwmc3200-wimax sometimes requires the driver to try
 	 * hard when we paint it into a corner. */
-	i2400m->bus_bm_retries = I3200_BOOT_RETRIES;
+	i2400m->bus_bm_retries = I2400M_SDIO_BOOT_RETRIES;
 	i2400m->bus_bm_cmd_send = i2400ms_bus_bm_cmd_send;
 	i2400m->bus_bm_wait_for_ack = i2400ms_bus_bm_wait_for_ack;
 	i2400m->bus_fw_names = i2400ms_bus_fw_names;
 	i2400m->bus_bm_mac_addr_impaired = 1;
 	i2400m->bus_bm_pokes_table = &i2400ms_pokes[0];
 
-	sdio_claim_host(func);
-	result = sdio_set_block_size(func, I2400MS_BLK_SIZE);
-	sdio_release_host(func);
-	if (result < 0) {
-		dev_err(dev, "Failed to set block size: %d\n", result);
-		goto error_set_blk_size;
-	}
-
-	result = i2400ms_enable_function(i2400ms->func);
-	if (result < 0) {
-		dev_err(dev, "Cannot enable SDIO function: %d\n", result);
-		goto error_func_enable;
+	switch (func->device) {
+	case SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX:
+	case SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5:
+		i2400ms->iwmc3200 = 1;
+		break;
+	default:
+		i2400ms->iwmc3200 = 0;
 	}
 
-	result = i2400ms_rx_setup(i2400ms);
-	if (result < 0)
-		goto error_rx_setup;
-
 	result = i2400m_setup(i2400m, I2400M_BRI_NO_REBOOT);
 	if (result < 0) {
 		dev_err(dev, "cannot setup device: %d\n", result);
@@ -473,13 +524,6 @@ int i2400ms_probe(struct sdio_func *func,
 error_debugfs_add:
 	i2400m_release(i2400m);
 error_setup:
-	i2400ms_rx_release(i2400ms);
-error_rx_setup:
-	sdio_claim_host(func);
-	sdio_disable_func(func);
-	sdio_release_host(func);
-error_func_enable:
-error_set_blk_size:
 	sdio_set_drvdata(func, NULL);
 	free_netdev(net_dev);
 error_alloc_netdev:
@@ -497,12 +541,9 @@ void i2400ms_remove(struct sdio_func *func)
 
 	d_fnstart(3, dev, "SDIO func %p\n", func);
 	debugfs_remove_recursive(i2400ms->debugfs_dentry);
-	i2400ms_rx_release(i2400ms);
+	i2400ms->debugfs_dentry = NULL;
 	i2400m_release(i2400m);
 	sdio_set_drvdata(func, NULL);
-	sdio_claim_host(func);
-	sdio_disable_func(func);
-	sdio_release_host(func);
 	free_netdev(net_dev);
 	d_fnend(3, dev, "SDIO func %p\n", func);
 }
@@ -512,6 +553,8 @@ const struct sdio_device_id i2400ms_sdio_ids[] = {
 	/* Intel: i2400m WiMAX (iwmc3200) over SDIO */
 	{ SDIO_DEVICE(SDIO_VENDOR_ID_INTEL,
 		      SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_INTEL,
+		      SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5) },
 	{ /* end: all zeroes */ },
 };
 MODULE_DEVICE_TABLE(sdio, i2400ms_sdio_ids);
@@ -529,6 +572,8 @@ struct sdio_driver i2400m_sdio_driver = {
 static
 int __init i2400ms_driver_init(void)
 {
+	d_parse_params(D_LEVEL, D_LEVEL_SIZE, i2400ms_debug_params,
+		       "i2400m_sdio.debug");
 	return sdio_register_driver(&i2400m_sdio_driver);
 }
 module_init(i2400ms_driver_init);
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
index fa16ccf8e26a..54480e8947f1 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -310,7 +310,7 @@ size_t __i2400m_tx_tail_room(struct i2400m *i2400m)
 	size_t tail_room;
 	size_t tx_in;
 
-	if (unlikely(i2400m->tx_in) == 0)
+	if (unlikely(i2400m->tx_in == 0))
 		return I2400M_TX_BUF_SIZE;
 	tx_in = i2400m->tx_in % I2400M_TX_BUF_SIZE;
 	tail_room = I2400M_TX_BUF_SIZE - tx_in;
@@ -642,6 +642,9 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
 	 * current one is out of payload slots or we have a singleton,
 	 * close it and start a new one */
 	spin_lock_irqsave(&i2400m->tx_lock, flags);
+	result = -ESHUTDOWN;
+	if (i2400m->tx_buf == NULL)
+		goto error_tx_new;
 try_new:
 	if (unlikely(i2400m->tx_msg == NULL))
 		i2400m_tx_new(i2400m);
@@ -697,7 +700,10 @@ try_new:
 	}
 error_tx_new:
 	spin_unlock_irqrestore(&i2400m->tx_lock, flags);
-	i2400m->bus_tx_kick(i2400m);	/* always kick, might free up space */
+	/* kick in most cases, except when the TX subsys is down, as
+	 * it might free space */
+	if (likely(result != -ESHUTDOWN))
+		i2400m->bus_tx_kick(i2400m);
 	d_fnend(3, dev, "(i2400m %p skb %p [%zu bytes] pt %u) = %d\n",
 		i2400m, buf, buf_len, pl_type, result);
 	return result;
@@ -740,6 +746,9 @@ struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *i2400m,
 
 	d_fnstart(3, dev, "(i2400m %p bus_size %p)\n", i2400m, bus_size);
 	spin_lock_irqsave(&i2400m->tx_lock, flags);
+	tx_msg_moved = NULL;
+	if (i2400m->tx_buf == NULL)
+		goto out_unlock;
 skip:
 	tx_msg_moved = NULL;
 	if (i2400m->tx_in == i2400m->tx_out) {	/* Empty FIFO? */
@@ -829,6 +838,8 @@ void i2400m_tx_msg_sent(struct i2400m *i2400m)
 
 	d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
 	spin_lock_irqsave(&i2400m->tx_lock, flags);
+	if (i2400m->tx_buf == NULL)
+		goto out_unlock;
 	i2400m->tx_out += i2400m->tx_msg_size;
 	d_printf(2, dev, "TX: sent %zu b\n", (size_t) i2400m->tx_msg_size);
 	i2400m->tx_msg_size = 0;
@@ -837,6 +848,7 @@ void i2400m_tx_msg_sent(struct i2400m *i2400m)
 	n = i2400m->tx_out / I2400M_TX_BUF_SIZE;
 	i2400m->tx_out %= I2400M_TX_BUF_SIZE;
 	i2400m->tx_in -= n * I2400M_TX_BUF_SIZE;
+out_unlock:
 	spin_unlock_irqrestore(&i2400m->tx_lock, flags);
 	d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
 }
@@ -876,5 +888,9 @@ int i2400m_tx_setup(struct i2400m *i2400m)
  */
 void i2400m_tx_release(struct i2400m *i2400m)
 {
+	unsigned long flags;
+	spin_lock_irqsave(&i2400m->tx_lock, flags);
 	kfree(i2400m->tx_buf);
+	i2400m->tx_buf = NULL;
+	spin_unlock_irqrestore(&i2400m->tx_lock, flags);
 }
diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
index 5ad287c228b8..ce6b9938fde0 100644
--- a/drivers/net/wimax/i2400m/usb-fw.c
+++ b/drivers/net/wimax/i2400m/usb-fw.c
@@ -99,10 +99,10 @@ ssize_t i2400mu_tx_bulk_out(struct i2400mu *i2400mu, void *buf, size_t buf_size)
 		dev_err(dev, "BM-CMD: can't get autopm: %d\n", result);
 		do_autopm = 0;
 	}
-	epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_BULK_OUT);
+	epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_out);
 	pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
 retry:
-	result = usb_bulk_msg(i2400mu->usb_dev, pipe, buf, buf_size, &len, HZ);
+	result = usb_bulk_msg(i2400mu->usb_dev, pipe, buf, buf_size, &len, 200);
 	switch (result) {
 	case 0:
 		if (len != buf_size) {
@@ -113,6 +113,28 @@ retry:
 		}
 		result = len;
 		break;
+	case -EPIPE:
+		/*
+		 * Stall -- maybe the device is choking with our
+		 * requests. Clear it and give it some time. If they
+		 * happen to often, it might be another symptom, so we
+		 * reset.
+		 *
+		 * No error handling for usb_clear_halt(0; if it
+		 * works, the retry works; if it fails, this switch
+		 * does the error handling for us.
+		 */
+		if (edc_inc(&i2400mu->urb_edc,
+			    10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
+			dev_err(dev, "BM-CMD: too many stalls in "
+				"URB; resetting device\n");
+			usb_queue_reset_device(i2400mu->usb_iface);
+			/* fallthrough */
+		} else {
+			usb_clear_halt(i2400mu->usb_dev, pipe);
+			msleep(10);	/* give the device some time */
+			goto retry;
+		}
 	case -EINVAL:			/* while removing driver */
 	case -ENODEV:			/* dev disconnect ... */
 	case -ENOENT:			/* just ignore it */
@@ -135,7 +157,6 @@ retry:
 			result);
 		goto retry;
 	}
-	result = len;
 	if (do_autopm)
 		usb_autopm_put_interface(i2400mu->usb_iface);
 	return result;
@@ -172,7 +193,8 @@ ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *i2400m,
 	result = -E2BIG;
 	if (cmd_size > I2400M_BM_CMD_BUF_SIZE)
 		goto error_too_big;
-	memcpy(i2400m->bm_cmd_buf, _cmd, cmd_size);
+	if (_cmd != i2400m->bm_cmd_buf)
+		memmove(i2400m->bm_cmd_buf, _cmd, cmd_size);
 	cmd = i2400m->bm_cmd_buf;
 	if (cmd_size_a > cmd_size)			/* Zero pad space */
 		memset(i2400m->bm_cmd_buf + cmd_size, 0, cmd_size_a - cmd_size);
@@ -226,7 +248,8 @@ int i2400mu_notif_submit(struct i2400mu *i2400mu, struct urb *urb,
 	struct usb_endpoint_descriptor *epd;
 	int pipe;
 
-	epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_NOTIFICATION);
+	epd = usb_get_epd(i2400mu->usb_iface,
+			  i2400mu->endpoint_cfg.notification);
 	pipe = usb_rcvintpipe(i2400mu->usb_dev, epd->bEndpointAddress);
 	usb_fill_int_urb(urb, i2400mu->usb_dev, pipe,
 			 i2400m->bm_ack_buf, I2400M_BM_ACK_BUF_SIZE,
@@ -328,8 +351,8 @@ error_dev_gone:
 out:
 	if (do_autopm)
 		usb_autopm_put_interface(i2400mu->usb_iface);
-	d_fnend(8, dev, "(i2400m %p ack %p size %zu) = %zd\n",
-		i2400m, ack, ack_size, result);
+	d_fnend(8, dev, "(i2400m %p ack %p size %zu) = %ld\n",
+		i2400m, ack, ack_size, (long) result);
 	return result;
 
 error_exceeded:
diff --git a/drivers/net/wimax/i2400m/usb-notif.c b/drivers/net/wimax/i2400m/usb-notif.c
index 6add27c3f35c..f88d1c6e35cb 100644
--- a/drivers/net/wimax/i2400m/usb-notif.c
+++ b/drivers/net/wimax/i2400m/usb-notif.c
@@ -51,6 +51,7 @@
  *
  * i2400mu_usb_notification_cb()	Called when a URB is ready
  *   i2400mu_notif_grok()
+ *     i2400m_is_boot_barker()
  *     i2400m_dev_reset_handle()
  *     i2400mu_rx_kick()
  */
@@ -87,32 +88,21 @@ int i2400mu_notification_grok(struct i2400mu *i2400mu, const void *buf,
 	d_fnstart(4, dev, "(i2400m %p buf %p buf_len %zu)\n",
 		  i2400mu, buf, buf_len);
 	ret = -EIO;
-	if (buf_len < sizeof(i2400m_NBOOT_BARKER))
+	if (buf_len < sizeof(i2400m_ZERO_BARKER))
 		/* Not a bug, just ignore */
 		goto error_bad_size;
-	if (!memcmp(i2400m_NBOOT_BARKER, buf, sizeof(i2400m_NBOOT_BARKER))
-	    || !memcmp(i2400m_SBOOT_BARKER, buf, sizeof(i2400m_SBOOT_BARKER)))
-		ret = i2400m_dev_reset_handle(i2400m);
-	else if (!memcmp(i2400m_ZERO_BARKER, buf, sizeof(i2400m_ZERO_BARKER))) {
+	ret = 0;
+	if (!memcmp(i2400m_ZERO_BARKER, buf, sizeof(i2400m_ZERO_BARKER))) {
 		i2400mu_rx_kick(i2400mu);
-		ret = 0;
-	} else {	/* Unknown or unexpected data in the notif message */
-		char prefix[64];
-		ret = -EIO;
-		dev_err(dev, "HW BUG? Unknown/unexpected data in notification "
-			"message (%zu bytes)\n", buf_len);
-		snprintf(prefix, sizeof(prefix), "%s %s: ",
-			 dev_driver_string(dev), dev_name(dev));
-		if (buf_len > 64) {
-			print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
-				       8, 4, buf, 64, 0);
-			printk(KERN_ERR "%s... (only first 64 bytes "
-			       "dumped)\n", prefix);
-		} else
-			print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
-				       8, 4, buf, buf_len, 0);
+		goto out;
 	}
+	ret = i2400m_is_boot_barker(i2400m, buf, buf_len);
+	if (unlikely(ret >= 0))
+		ret = i2400m_dev_reset_handle(i2400m, "device rebooted");
+	else	/* Unknown or unexpected data in the notif message */
+		i2400m_unknown_barker(i2400m, buf, buf_len);
 error_bad_size:
+out:
 	d_fnend(4, dev, "(i2400m %p buf %p buf_len %zu) = %d\n",
 		i2400mu, buf, buf_len, ret);
 	return ret;
@@ -220,7 +210,8 @@ int i2400mu_notification_setup(struct i2400mu *i2400mu)
 		dev_err(dev, "notification: cannot allocate URB\n");
 		goto error_alloc_urb;
 	}
-	epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_NOTIFICATION);
+	epd = usb_get_epd(i2400mu->usb_iface,
+			  i2400mu->endpoint_cfg.notification);
 	usb_pipe = usb_rcvintpipe(i2400mu->usb_dev, epd->bEndpointAddress);
 	usb_fill_int_urb(i2400mu->notif_urb, i2400mu->usb_dev, usb_pipe,
 			 buf, I2400MU_MAX_NOTIFICATION_LEN,
diff --git a/drivers/net/wimax/i2400m/usb-rx.c b/drivers/net/wimax/i2400m/usb-rx.c
index a314799967cf..ba1b02362dfc 100644
--- a/drivers/net/wimax/i2400m/usb-rx.c
+++ b/drivers/net/wimax/i2400m/usb-rx.c
@@ -204,7 +204,7 @@ struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb)
 		dev_err(dev, "RX: can't get autopm: %d\n", result);
 		do_autopm = 0;
 	}
-	epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_BULK_IN);
+	epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_in);
 	usb_pipe = usb_rcvbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
 retry:
 	rx_size = skb_end_pointer(rx_skb) - rx_skb->data - rx_skb->len;
@@ -214,7 +214,7 @@ retry:
 	}
 	result = usb_bulk_msg(
 		i2400mu->usb_dev, usb_pipe, rx_skb->data + rx_skb->len,
-		rx_size, &read_size, HZ);
+		rx_size, &read_size, 200);
 	usb_mark_last_busy(i2400mu->usb_dev);
 	switch (result) {
 	case 0:
@@ -222,6 +222,26 @@ retry:
 			goto retry;	/* ZLP, just resubmit */
 		skb_put(rx_skb, read_size);
 		break;
+	case -EPIPE:
+		/*
+		 * Stall -- maybe the device is choking with our
+		 * requests. Clear it and give it some time. If they
+		 * happen to often, it might be another symptom, so we
+		 * reset.
+		 *
+		 * No error handling for usb_clear_halt(0; if it
+		 * works, the retry works; if it fails, this switch
+		 * does the error handling for us.
+		 */
+		if (edc_inc(&i2400mu->urb_edc,
+			    10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
+			dev_err(dev, "BM-CMD: too many stalls in "
+				"URB; resetting device\n");
+			goto do_reset;
+		}
+		usb_clear_halt(i2400mu->usb_dev, usb_pipe);
+		msleep(10);	/* give the device some time */
+		goto retry;
 	case -EINVAL:			/* while removing driver */
 	case -ENODEV:			/* dev disconnect ... */
 	case -ENOENT:			/* just ignore it */
@@ -283,6 +303,7 @@ out:
 error_reset:
 	dev_err(dev, "RX: maximum errors in URB exceeded; "
 		"resetting device\n");
+do_reset:
 	usb_queue_reset_device(i2400mu->usb_iface);
 	rx_skb = ERR_PTR(result);
 	goto out;
@@ -316,10 +337,15 @@ int i2400mu_rxd(void *_i2400mu)
 	size_t pending;
 	int rx_size;
 	struct sk_buff *rx_skb;
+	unsigned long flags;
 
 	d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
+	spin_lock_irqsave(&i2400m->rx_lock, flags);
+	BUG_ON(i2400mu->rx_kthread != NULL);
+	i2400mu->rx_kthread = current;
+	spin_unlock_irqrestore(&i2400m->rx_lock, flags);
 	while (1) {
-		d_printf(2, dev, "TX: waiting for messages\n");
+		d_printf(2, dev, "RX: waiting for messages\n");
 		pending = 0;
 		wait_event_interruptible(
 			i2400mu->rx_wq,
@@ -367,6 +393,9 @@ int i2400mu_rxd(void *_i2400mu)
 	}
 	result = 0;
 out:
+	spin_lock_irqsave(&i2400m->rx_lock, flags);
+	i2400mu->rx_kthread = NULL;
+	spin_unlock_irqrestore(&i2400m->rx_lock, flags);
 	d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
 	return result;
 
@@ -403,18 +432,33 @@ int i2400mu_rx_setup(struct i2400mu *i2400mu)
 	struct i2400m *i2400m = &i2400mu->i2400m;
 	struct device *dev = &i2400mu->usb_iface->dev;
 	struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+	struct task_struct *kthread;
 
-	i2400mu->rx_kthread = kthread_run(i2400mu_rxd, i2400mu, "%s-rx",
-					  wimax_dev->name);
-	if (IS_ERR(i2400mu->rx_kthread)) {
-		result = PTR_ERR(i2400mu->rx_kthread);
+	kthread = kthread_run(i2400mu_rxd, i2400mu, "%s-rx",
+			      wimax_dev->name);
+	/* the kthread function sets i2400mu->rx_thread */
+	if (IS_ERR(kthread)) {
+		result = PTR_ERR(kthread);
 		dev_err(dev, "RX: cannot start thread: %d\n", result);
 	}
 	return result;
 }
 
+
 void i2400mu_rx_release(struct i2400mu *i2400mu)
 {
-	kthread_stop(i2400mu->rx_kthread);
+	unsigned long flags;
+	struct i2400m *i2400m = &i2400mu->i2400m;
+	struct device *dev = i2400m_dev(i2400m);
+	struct task_struct *kthread;
+
+	spin_lock_irqsave(&i2400m->rx_lock, flags);
+	kthread = i2400mu->rx_kthread;
+	i2400mu->rx_kthread = NULL;
+	spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+	if (kthread)
+		kthread_stop(kthread);
+	else
+		d_printf(1, dev, "RX: kthread had already exited\n");
 }
 
diff --git a/drivers/net/wimax/i2400m/usb-tx.c b/drivers/net/wimax/i2400m/usb-tx.c
index dfd893356f49..c65b9979f87e 100644
--- a/drivers/net/wimax/i2400m/usb-tx.c
+++ b/drivers/net/wimax/i2400m/usb-tx.c
@@ -101,11 +101,11 @@ int i2400mu_tx(struct i2400mu *i2400mu, struct i2400m_msg_hdr *tx_msg,
 		dev_err(dev, "TX: can't get autopm: %d\n", result);
 		do_autopm = 0;
 	}
-	epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_BULK_OUT);
+	epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_out);
 	usb_pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
 retry:
 	result = usb_bulk_msg(i2400mu->usb_dev, usb_pipe,
-			      tx_msg, tx_msg_size, &sent_size, HZ);
+			      tx_msg, tx_msg_size, &sent_size, 200);
 	usb_mark_last_busy(i2400mu->usb_dev);
 	switch (result) {
 	case 0:
@@ -115,6 +115,28 @@ retry:
 			result = -EIO;
 		}
 		break;
+	case -EPIPE:
+		/*
+		 * Stall -- maybe the device is choking with our
+		 * requests. Clear it and give it some time. If they
+		 * happen to often, it might be another symptom, so we
+		 * reset.
+		 *
+		 * No error handling for usb_clear_halt(0; if it
+		 * works, the retry works; if it fails, this switch
+		 * does the error handling for us.
+		 */
+		if (edc_inc(&i2400mu->urb_edc,
+			    10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
+			dev_err(dev, "BM-CMD: too many stalls in "
+				"URB; resetting device\n");
+			usb_queue_reset_device(i2400mu->usb_iface);
+			/* fallthrough */
+		} else {
+			usb_clear_halt(i2400mu->usb_dev, usb_pipe);
+			msleep(10);	/* give the device some time */
+			goto retry;
+		}
 	case -EINVAL:			/* while removing driver */
 	case -ENODEV:			/* dev disconnect ... */
 	case -ENOENT:			/* just ignore it */
@@ -161,9 +183,15 @@ int i2400mu_txd(void *_i2400mu)
 	struct device *dev = &i2400mu->usb_iface->dev;
 	struct i2400m_msg_hdr *tx_msg;
 	size_t tx_msg_size;
+	unsigned long flags;
 
 	d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
 
+	spin_lock_irqsave(&i2400m->tx_lock, flags);
+	BUG_ON(i2400mu->tx_kthread != NULL);
+	i2400mu->tx_kthread = current;
+	spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+
 	while (1) {
 		d_printf(2, dev, "TX: waiting for messages\n");
 		tx_msg = NULL;
@@ -183,6 +211,11 @@ int i2400mu_txd(void *_i2400mu)
 		if (result < 0)
 			break;
 	}
+
+	spin_lock_irqsave(&i2400m->tx_lock, flags);
+	i2400mu->tx_kthread = NULL;
+	spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+
 	d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
 	return result;
 }
@@ -213,11 +246,13 @@ int i2400mu_tx_setup(struct i2400mu *i2400mu)
 	struct i2400m *i2400m = &i2400mu->i2400m;
 	struct device *dev = &i2400mu->usb_iface->dev;
 	struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+	struct task_struct *kthread;
 
-	i2400mu->tx_kthread = kthread_run(i2400mu_txd, i2400mu, "%s-tx",
-					  wimax_dev->name);
-	if (IS_ERR(i2400mu->tx_kthread)) {
-		result = PTR_ERR(i2400mu->tx_kthread);
+	kthread = kthread_run(i2400mu_txd, i2400mu, "%s-tx",
+			      wimax_dev->name);
+	/* the kthread function sets i2400mu->tx_thread */
+	if (IS_ERR(kthread)) {
+		result = PTR_ERR(kthread);
 		dev_err(dev, "TX: cannot start thread: %d\n", result);
 	}
 	return result;
@@ -225,5 +260,17 @@ int i2400mu_tx_setup(struct i2400mu *i2400mu)
 
 void i2400mu_tx_release(struct i2400mu *i2400mu)
 {
-	kthread_stop(i2400mu->tx_kthread);
+	unsigned long flags;
+	struct i2400m *i2400m = &i2400mu->i2400m;
+	struct device *dev = i2400m_dev(i2400m);
+	struct task_struct *kthread;
+
+	spin_lock_irqsave(&i2400m->tx_lock, flags);
+	kthread = i2400mu->tx_kthread;
+	i2400mu->tx_kthread = NULL;
+	spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+	if (kthread)
+		kthread_stop(kthread);
+	else
+		d_printf(1, dev, "TX: kthread had already exited\n");
 }
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 7eadd11c815b..47e84ef355c5 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -58,7 +58,7 @@
  *   i2400mu_rx_release()
  *   i2400mu_tx_release()
  *
- * i2400mu_bus_reset()            Called by i2400m->bus_reset
+ * i2400mu_bus_reset()            Called by i2400m_reset
  *   __i2400mu_reset()
  *     __i2400mu_send_barker()
  *   usb_reset_device()
@@ -71,13 +71,25 @@
 #define D_SUBMODULE usb
 #include "usb-debug-levels.h"
 
+static char i2400mu_debug_params[128];
+module_param_string(debug, i2400mu_debug_params, sizeof(i2400mu_debug_params),
+		    0644);
+MODULE_PARM_DESC(debug,
+		 "String of space-separated NAME:VALUE pairs, where NAMEs "
+		 "are the different debug submodules and VALUE are the "
+		 "initial debug value to set.");
 
 /* Our firmware file name */
-static const char *i2400mu_bus_fw_names[] = {
+static const char *i2400mu_bus_fw_names_5x50[] = {
 #define I2400MU_FW_FILE_NAME_v1_4 "i2400m-fw-usb-1.4.sbcf"
 	I2400MU_FW_FILE_NAME_v1_4,
-#define I2400MU_FW_FILE_NAME_v1_3 "i2400m-fw-usb-1.3.sbcf"
-	I2400MU_FW_FILE_NAME_v1_3,
+	NULL,
+};
+
+
+static const char *i2400mu_bus_fw_names_6050[] = {
+#define I6050U_FW_FILE_NAME_v1_5 "i6050-fw-usb-1.5.sbcf"
+	I6050U_FW_FILE_NAME_v1_5,
 	NULL,
 };
 
@@ -160,14 +172,59 @@ int __i2400mu_send_barker(struct i2400mu *i2400mu,
 	epd = usb_get_epd(i2400mu->usb_iface, endpoint);
 	pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
 	memcpy(buffer, barker, barker_size);
+retry:
 	ret = usb_bulk_msg(i2400mu->usb_dev, pipe, buffer, barker_size,
-			   &actual_len, HZ);
-	if (ret < 0) {
-		if (ret != -EINVAL)
-			dev_err(dev, "E: barker error: %d\n", ret);
-	} else if (actual_len != barker_size) {
-		dev_err(dev, "E: only %d bytes transmitted\n", actual_len);
-		ret = -EIO;
+			   &actual_len, 200);
+	switch (ret) {
+	case 0:
+		if (actual_len != barker_size) {	/* Too short? drop it */
+			dev_err(dev, "E: %s: short write (%d B vs %zu "
+				"expected)\n",
+				__func__, actual_len, barker_size);
+			ret = -EIO;
+		}
+		break;
+	case -EPIPE:
+		/*
+		 * Stall -- maybe the device is choking with our
+		 * requests. Clear it and give it some time. If they
+		 * happen to often, it might be another symptom, so we
+		 * reset.
+		 *
+		 * No error handling for usb_clear_halt(0; if it
+		 * works, the retry works; if it fails, this switch
+		 * does the error handling for us.
+		 */
+		if (edc_inc(&i2400mu->urb_edc,
+			    10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
+			dev_err(dev, "E: %s: too many stalls in "
+				"URB; resetting device\n", __func__);
+			usb_queue_reset_device(i2400mu->usb_iface);
+			/* fallthrough */
+		} else {
+			usb_clear_halt(i2400mu->usb_dev, pipe);
+			msleep(10);	/* give the device some time */
+			goto retry;
+		}
+	case -EINVAL:			/* while removing driver */
+	case -ENODEV:			/* dev disconnect ... */
+	case -ENOENT:			/* just ignore it */
+	case -ESHUTDOWN:		/* and exit */
+	case -ECONNRESET:
+		ret = -ESHUTDOWN;
+		break;
+	default:			/* Some error? */
+		if (edc_inc(&i2400mu->urb_edc,
+			    EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
+			dev_err(dev, "E: %s: maximum errors in URB "
+				"exceeded; resetting device\n",
+				__func__);
+			usb_queue_reset_device(i2400mu->usb_iface);
+		} else {
+			dev_warn(dev, "W: %s: cannot send URB: %d\n",
+				 __func__, ret);
+			goto retry;
+		}
 	}
 	kfree(buffer);
 error_kzalloc:
@@ -232,15 +289,16 @@ int i2400mu_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt)
 
 	d_fnstart(3, dev, "(i2400m %p rt %u)\n", i2400m, rt);
 	if (rt == I2400M_RT_WARM)
-		result = __i2400mu_send_barker(i2400mu, i2400m_WARM_BOOT_BARKER,
-					       sizeof(i2400m_WARM_BOOT_BARKER),
-					       I2400MU_EP_BULK_OUT);
+		result = __i2400mu_send_barker(
+			i2400mu, i2400m_WARM_BOOT_BARKER,
+			sizeof(i2400m_WARM_BOOT_BARKER),
+			i2400mu->endpoint_cfg.bulk_out);
 	else if (rt == I2400M_RT_COLD)
-		result = __i2400mu_send_barker(i2400mu, i2400m_COLD_BOOT_BARKER,
-					       sizeof(i2400m_COLD_BOOT_BARKER),
-					       I2400MU_EP_RESET_COLD);
+		result = __i2400mu_send_barker(
+			i2400mu, i2400m_COLD_BOOT_BARKER,
+			sizeof(i2400m_COLD_BOOT_BARKER),
+			i2400mu->endpoint_cfg.reset_cold);
 	else if (rt == I2400M_RT_BUS) {
-do_bus_reset:
 		result = usb_reset_device(i2400mu->usb_dev);
 		switch (result) {
 		case 0:
@@ -248,7 +306,7 @@ do_bus_reset:
 		case -ENODEV:
 		case -ENOENT:
 		case -ESHUTDOWN:
-			result = rt == I2400M_RT_WARM ? -ENODEV : 0;
+			result = 0;
 			break;	/* We assume the device is disconnected */
 		default:
 			dev_err(dev, "USB reset failed (%d), giving up!\n",
@@ -261,10 +319,17 @@ do_bus_reset:
 	if (result < 0
 	    && result != -EINVAL	/* device is gone */
 	    && rt != I2400M_RT_BUS) {
+		/*
+		 * Things failed -- resort to lower level reset, that
+		 * we queue in another context; the reason for this is
+		 * that the pre and post reset functionality requires
+		 * the i2400m->init_mutex; RT_WARM and RT_COLD can
+		 * come from areas where i2400m->init_mutex is taken.
+		 */
 		dev_err(dev, "%s reset failed (%d); trying USB reset\n",
 			rt == I2400M_RT_WARM ? "warm" : "cold", result);
-		rt = I2400M_RT_BUS;
-		goto do_bus_reset;
+		usb_queue_reset_device(i2400mu->usb_iface);
+		result = -ENODEV;
 	}
 	d_fnend(3, dev, "(i2400m %p rt %u) = %d\n", i2400m, rt, result);
 	return result;
@@ -402,20 +467,33 @@ int i2400mu_probe(struct usb_interface *iface,
 
 	i2400m->bus_tx_block_size = I2400MU_BLK_SIZE;
 	i2400m->bus_pl_size_max = I2400MU_PL_SIZE_MAX;
+	i2400m->bus_setup = NULL;
 	i2400m->bus_dev_start = i2400mu_bus_dev_start;
 	i2400m->bus_dev_stop = i2400mu_bus_dev_stop;
+	i2400m->bus_release = NULL;
 	i2400m->bus_tx_kick = i2400mu_bus_tx_kick;
 	i2400m->bus_reset = i2400mu_bus_reset;
-	i2400m->bus_bm_retries = I2400M_BOOT_RETRIES;
+	i2400m->bus_bm_retries = I2400M_USB_BOOT_RETRIES;
 	i2400m->bus_bm_cmd_send = i2400mu_bus_bm_cmd_send;
 	i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack;
-	i2400m->bus_fw_names = i2400mu_bus_fw_names;
 	i2400m->bus_bm_mac_addr_impaired = 0;
 
+	if (id->idProduct == USB_DEVICE_ID_I6050) {
+		i2400m->bus_fw_names = i2400mu_bus_fw_names_6050;
+		i2400mu->endpoint_cfg.bulk_out = 0;
+		i2400mu->endpoint_cfg.notification = 3;
+		i2400mu->endpoint_cfg.reset_cold = 2;
+		i2400mu->endpoint_cfg.bulk_in = 1;
+	} else {
+		i2400m->bus_fw_names = i2400mu_bus_fw_names_5x50;
+		i2400mu->endpoint_cfg.bulk_out = 0;
+		i2400mu->endpoint_cfg.notification = 1;
+		i2400mu->endpoint_cfg.reset_cold = 2;
+		i2400mu->endpoint_cfg.bulk_in = 3;
+	}
 #ifdef CONFIG_PM
 	iface->needs_remote_wakeup = 1;		/* autosuspend (15s delay) */
 	device_init_wakeup(dev, 1);
-	usb_autopm_enable(i2400mu->usb_iface);
 	usb_dev->autosuspend_delay = 15 * HZ;
 	usb_dev->autosuspend_disabled = 0;
 #endif
@@ -483,7 +561,10 @@ void i2400mu_disconnect(struct usb_interface *iface)
  * So at the end, the three cases require common handling.
  *
  * If at the time of this call the device's firmware is not loaded,
- * nothing has to be done.
+ * nothing has to be done. Note we can be "loose" about not reading
+ * i2400m->updown under i2400m->init_mutex. If it happens to change
+ * inmediately, other parts of the call flow will fail and effectively
+ * catch it.
  *
  * If the firmware is loaded, we need to:
  *
@@ -522,6 +603,7 @@ int i2400mu_suspend(struct usb_interface *iface, pm_message_t pm_msg)
 #endif
 
 	d_fnstart(3, dev, "(iface %p pm_msg %u)\n", iface, pm_msg.event);
+	rmb();		/* see i2400m->updown's documentation  */
 	if (i2400m->updown == 0)
 		goto no_firmware;
 	if (i2400m->state == I2400M_SS_DATA_PATH_CONNECTED && is_autosuspend) {
@@ -575,6 +657,7 @@ int i2400mu_resume(struct usb_interface *iface)
 	struct i2400m *i2400m = &i2400mu->i2400m;
 
 	d_fnstart(3, dev, "(iface %p)\n", iface);
+	rmb();		/* see i2400m->updown's documentation  */
 	if (i2400m->updown == 0) {
 		d_printf(1, dev, "fw was down, no resume neeed\n");
 		goto out;
@@ -591,7 +674,54 @@ out:
 
 
 static
+int i2400mu_reset_resume(struct usb_interface *iface)
+{
+	int result;
+	struct device *dev = &iface->dev;
+	struct i2400mu *i2400mu = usb_get_intfdata(iface);
+	struct i2400m *i2400m = &i2400mu->i2400m;
+
+	d_fnstart(3, dev, "(iface %p)\n", iface);
+	result = i2400m_dev_reset_handle(i2400m, "device reset on resume");
+	d_fnend(3, dev, "(iface %p) = %d\n", iface, result);
+	return result < 0 ? result : 0;
+}
+
+
+/*
+ * Another driver or user space is triggering a reset on the device
+ * which contains the interface passed as an argument. Cease IO and
+ * save any device state you need to restore.
+ *
+ * If you need to allocate memory here, use GFP_NOIO or GFP_ATOMIC, if
+ * you are in atomic context.
+ */
+static
+int i2400mu_pre_reset(struct usb_interface *iface)
+{
+	struct i2400mu *i2400mu = usb_get_intfdata(iface);
+	return i2400m_pre_reset(&i2400mu->i2400m);
+}
+
+
+/*
+ * The reset has completed.  Restore any saved device state and begin
+ * using the device again.
+ *
+ * If you need to allocate memory here, use GFP_NOIO or GFP_ATOMIC, if
+ * you are in atomic context.
+ */
+static
+int i2400mu_post_reset(struct usb_interface *iface)
+{
+	struct i2400mu *i2400mu = usb_get_intfdata(iface);
+	return i2400m_post_reset(&i2400mu->i2400m);
+}
+
+
+static
 struct usb_device_id i2400mu_id_table[] = {
+	{ USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) },
 	{ USB_DEVICE(0x8086, 0x0181) },
 	{ USB_DEVICE(0x8086, 0x1403) },
 	{ USB_DEVICE(0x8086, 0x1405) },
@@ -609,8 +739,11 @@ struct usb_driver i2400mu_driver = {
 	.name = KBUILD_MODNAME,
 	.suspend = i2400mu_suspend,
 	.resume = i2400mu_resume,
+	.reset_resume = i2400mu_reset_resume,
 	.probe = i2400mu_probe,
 	.disconnect = i2400mu_disconnect,
+	.pre_reset = i2400mu_pre_reset,
+	.post_reset = i2400mu_post_reset,
 	.id_table = i2400mu_id_table,
 	.supports_autosuspend = 1,
 };
@@ -618,6 +751,8 @@ struct usb_driver i2400mu_driver = {
 static
 int __init i2400mu_driver_init(void)
 {
+	d_parse_params(D_LEVEL, D_LEVEL_SIZE, i2400mu_debug_params,
+		       "i2400m_usb.debug");
 	return usb_register(&i2400mu_driver);
 }
 module_init(i2400mu_driver_init);
@@ -632,7 +767,7 @@ void __exit i2400mu_driver_exit(void)
 module_exit(i2400mu_driver_exit);
 
 MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>");
-MODULE_DESCRIPTION("Intel 2400M WiMAX networking for USB");
+MODULE_DESCRIPTION("Driver for USB based Intel Wireless WiMAX Connection 2400M "
+		   "(5x50 & 6050)");
 MODULE_LICENSE("GPL");
 MODULE_FIRMWARE(I2400MU_FW_FILE_NAME_v1_4);
-MODULE_FIRMWARE(I2400MU_FW_FILE_NAME_v1_3);
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index d7a764a2fc1a..56dd6650c97a 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -5,6 +5,7 @@
 menuconfig WLAN
 	bool "Wireless LAN"
 	depends on !S390
+	select WIRELESS
 	default y
 	---help---
 	  This section contains all the pre 802.11 and 802.11 wireless
@@ -15,114 +16,12 @@ menuconfig WLAN
 
 if WLAN
 
-menuconfig WLAN_PRE80211
-	bool "Wireless LAN (pre-802.11)"
-	depends on NETDEVICES
-	---help---
-	  Say Y if you have any pre-802.11 wireless LAN hardware.
-
-	  This option does not affect the kernel build, it only
-	  lets you choose drivers.
-
-config STRIP
-	tristate "STRIP (Metricom starmode radio IP)"
-	depends on INET && WLAN_PRE80211
-	select WIRELESS_EXT
-	---help---
-	  Say Y if you have a Metricom radio and intend to use Starmode Radio
-	  IP. STRIP is a radio protocol developed for the MosquitoNet project
-	  to send Internet traffic using Metricom radios.  Metricom radios are
-	  small, battery powered, 100kbit/sec packet radio transceivers, about
-	  the size and weight of a cellular telephone. (You may also have heard
-	  them called "Metricom modems" but we avoid the term "modem" because
-	  it misleads many people into thinking that you can plug a Metricom
-	  modem into a phone line and use it as a modem.)
-
-	  You can use STRIP on any Linux machine with a serial port, although
-	  it is obviously most useful for people with laptop computers. If you
-	  think you might get a Metricom radio in the future, there is no harm
-	  in saying Y to STRIP now, except that it makes the kernel a bit
-	  bigger.
-
-	  To compile this as a module, choose M here: the module will be
-	  called strip.
-
-config ARLAN
-	tristate "Aironet Arlan 655 & IC2200 DS support"
-	depends on ISA && !64BIT && WLAN_PRE80211
-	select WIRELESS_EXT
-	---help---
-	  Aironet makes Arlan, a class of wireless LAN adapters. These use the
-	  www.Telxon.com chip, which is also used on several similar cards.
-	  This driver is tested on the 655 and IC2200 series cards. Look at
-	  <http://www.ylenurme.ee/~elmer/655/> for the latest information.
-
-	  The driver is built as two modules, arlan and arlan-proc. The latter
-	  is the /proc interface and is not needed most of time.
-
-	  On some computers the card ends up in non-valid state after some
-	  time. Use a ping-reset script to clear it.
-
-config WAVELAN
-	tristate "AT&T/Lucent old WaveLAN & DEC RoamAbout DS ISA support"
-	depends on ISA && WLAN_PRE80211
-	select WIRELESS_EXT
-	---help---
-	  The Lucent WaveLAN (formerly NCR and AT&T; or DEC RoamAbout DS) is
-	  a Radio LAN (wireless Ethernet-like Local Area Network) using the
-	  radio frequencies 900 MHz and 2.4 GHz.
-
-	  If you want to use an ISA WaveLAN card under Linux, say Y and read
-	  the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>. Some more specific
-	  information is contained in
-	  <file:Documentation/networking/wavelan.txt> and in the source code
-	  <file:drivers/net/wireless/wavelan.p.h>.
-
-	  You will also need the wireless tools package available from
-	  <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
-	  Please read the man pages contained therein.
-
-	  To compile this driver as a module, choose M here: the module will be
-	  called wavelan.
-
-config PCMCIA_WAVELAN
-	tristate "AT&T/Lucent old WaveLAN Pcmcia wireless support"
-	depends on PCMCIA && WLAN_PRE80211
-	select WIRELESS_EXT
-	help
-	  Say Y here if you intend to attach an AT&T/Lucent Wavelan PCMCIA
-	  (PC-card) wireless Ethernet networking card to your computer.  This
-	  driver is for the non-IEEE-802.11 Wavelan cards.
-
-	  To compile this driver as a module, choose M here: the module will be
-	  called wavelan_cs.  If unsure, say N.
-
-config PCMCIA_NETWAVE
-	tristate "Xircom Netwave AirSurfer Pcmcia wireless support"
-	depends on PCMCIA && WLAN_PRE80211
-	select WIRELESS_EXT
-	help
-	  Say Y here if you intend to attach this type of PCMCIA (PC-card)
-	  wireless Ethernet networking card to your computer.
-
-	  To compile this driver as a module, choose M here: the module will be
-	  called netwave_cs.  If unsure, say N.
-
-
-menuconfig WLAN_80211
-	bool "Wireless LAN (IEEE 802.11)"
-	depends on NETDEVICES
-	---help---
-	  Say Y if you have any 802.11 wireless LAN hardware.
-
-	  This option does not affect the kernel build, it only
-	  lets you choose drivers.
-
 config PCMCIA_RAYCS
 	tristate "Aviator/Raytheon 2.4GHz wireless support"
-	depends on PCMCIA && WLAN_80211
+	depends on PCMCIA
 	select WIRELESS_EXT
+	select WEXT_SPY
+	select WEXT_PRIV
 	---help---
 	  Say Y here if you intend to attach an Aviator/Raytheon PCMCIA
 	  (PC-card) wireless Ethernet networking card to your computer.
@@ -132,49 +31,9 @@ config PCMCIA_RAYCS
 	  To compile this driver as a module, choose M here: the module will be
 	  called ray_cs.  If unsure, say N.
 
-config LIBERTAS
-	tristate "Marvell 8xxx Libertas WLAN driver support"
-	depends on WLAN_80211
-	select WIRELESS_EXT
-	select LIB80211
-	select FW_LOADER
-	---help---
-	  A library for Marvell Libertas 8xxx devices.
-
-config LIBERTAS_USB
-	tristate "Marvell Libertas 8388 USB 802.11b/g cards"
-	depends on LIBERTAS && USB
-	---help---
-	  A driver for Marvell Libertas 8388 USB devices.
-
-config LIBERTAS_CS
-	tristate "Marvell Libertas 8385 CompactFlash 802.11b/g cards"
-	depends on LIBERTAS && PCMCIA
-	select FW_LOADER
-	---help---
-	  A driver for Marvell Libertas 8385 CompactFlash devices.
-
-config LIBERTAS_SDIO
-	tristate "Marvell Libertas 8385/8686/8688 SDIO 802.11b/g cards"
-	depends on LIBERTAS && MMC
-	---help---
-	  A driver for Marvell Libertas 8385/8686/8688 SDIO devices.
-
-config LIBERTAS_SPI
-	tristate "Marvell Libertas 8686 SPI 802.11b/g cards"
-	depends on LIBERTAS && SPI
-	---help---
-	  A driver for Marvell Libertas 8686 SPI devices.
-
-config LIBERTAS_DEBUG
-	bool "Enable full debugging output in the Libertas module."
-	depends on LIBERTAS
-	---help---
-	  Debugging support.
-
 config LIBERTAS_THINFIRM
 	tristate "Marvell 8xxx Libertas WLAN driver support with thin firmware"
-	depends on WLAN_80211 && MAC80211
+	depends on MAC80211
 	select FW_LOADER
 	---help---
 	  A library for Marvell Libertas 8xxx devices using thinfirm.
@@ -187,9 +46,11 @@ config LIBERTAS_THINFIRM_USB
 
 config AIRO
 	tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
-	depends on ISA_DMA_API && WLAN_80211 && (PCI || BROKEN)
+	depends on ISA_DMA_API && (PCI || BROKEN)
 	select WIRELESS_EXT
 	select CRYPTO
+	select WEXT_SPY
+	select WEXT_PRIV
 	---help---
 	  This is the standard Linux driver to support Cisco/Aironet ISA and
 	  PCI 802.11 wireless cards.
@@ -205,8 +66,9 @@ config AIRO
 
 config ATMEL
       tristate "Atmel at76c50x chipset  802.11b support"
-      depends on (PCI || PCMCIA) && WLAN_80211
+      depends on (PCI || PCMCIA)
       select WIRELESS_EXT
+      select WEXT_PRIV
       select FW_LOADER
       select CRC32
        ---help---
@@ -239,7 +101,7 @@ config PCMCIA_ATMEL
 
 config AT76C50X_USB
         tristate "Atmel at76c503/at76c505/at76c505a USB cards"
-        depends on MAC80211 && WLAN_80211 && USB
+        depends on MAC80211 && USB
         select FW_LOADER
         ---help---
           Enable support for USB Wireless devices using Atmel at76c503,
@@ -247,8 +109,9 @@ config AT76C50X_USB
 
 config AIRO_CS
 	tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards"
-	depends on PCMCIA && (BROKEN || !M32R) && WLAN_80211
+	depends on PCMCIA && (BROKEN || !M32R)
 	select WIRELESS_EXT
+	select WEXT_SPY
 	select CRYPTO
 	select CRYPTO_AES
 	---help---
@@ -266,18 +129,21 @@ config AIRO_CS
 	  Cisco Linux utilities can be used to configure the card.
 
 config PCMCIA_WL3501
-      tristate "Planet WL3501 PCMCIA cards"
-      depends on EXPERIMENTAL && PCMCIA && WLAN_80211
-      select WIRELESS_EXT
-       ---help---
-         A driver for WL3501 PCMCIA 802.11 wireless cards made by Planet.
-	 It has basic support for Linux wireless extensions and initial
-	 micro support for ethtool.
+	tristate "Planet WL3501 PCMCIA cards"
+	depends on EXPERIMENTAL && PCMCIA
+	select WIRELESS_EXT
+	select WEXT_SPY
+	help
+	  A driver for WL3501 PCMCIA 802.11 wireless cards made by Planet.
+	  It has basic support for Linux wireless extensions and initial
+	  micro support for ethtool.
 
 config PRISM54
 	tristate 'Intersil Prism GT/Duette/Indigo PCI/Cardbus (DEPRECATED)'
-	depends on PCI && EXPERIMENTAL && WLAN_80211
+	depends on PCI && EXPERIMENTAL
 	select WIRELESS_EXT
+	select WEXT_SPY
+	select WEXT_PRIV
 	select FW_LOADER
 	---help---
 	  This enables support for FullMAC PCI/Cardbus prism54 devices. This
@@ -298,8 +164,9 @@ config PRISM54
 
 config USB_ZD1201
 	tristate "USB ZD1201 based Wireless device support"
-	depends on USB && WLAN_80211
+	depends on USB
 	select WIRELESS_EXT
+	select WEXT_PRIV
 	select FW_LOADER
 	---help---
 	  Say Y if you want to use wireless LAN adapters based on the ZyDAS
@@ -316,7 +183,7 @@ config USB_ZD1201
 
 config USB_NET_RNDIS_WLAN
 	tristate "Wireless RNDIS USB support"
-	depends on USB && WLAN_80211 && EXPERIMENTAL
+	depends on USB && EXPERIMENTAL
 	depends on CFG80211
 	select USB_USBNET
 	select USB_NET_CDCETHER
@@ -344,7 +211,7 @@ config USB_NET_RNDIS_WLAN
 
 config RTL8180
 	tristate "Realtek 8180/8185 PCI support"
-	depends on MAC80211 && PCI && WLAN_80211 && EXPERIMENTAL
+	depends on MAC80211 && PCI && EXPERIMENTAL
 	select EEPROM_93CX6
 	---help---
 	  This is a driver for RTL8180 and RTL8185 based cards.
@@ -400,7 +267,7 @@ config RTL8180
 
 config RTL8187
 	tristate "Realtek 8187 and 8187B USB support"
-	depends on MAC80211 && USB && WLAN_80211
+	depends on MAC80211 && USB
 	select EEPROM_93CX6
 	---help---
 	  This is a driver for RTL8187 and RTL8187B based cards.
@@ -429,7 +296,7 @@ config RTL8187_LEDS
 
 config ADM8211
 	tristate "ADMtek ADM8211 support"
-	depends on MAC80211 && PCI && WLAN_80211 && EXPERIMENTAL
+	depends on MAC80211 && PCI && EXPERIMENTAL
 	select CRC32
 	select EEPROM_93CX6
 	---help---
@@ -456,7 +323,7 @@ config ADM8211
 
 config MAC80211_HWSIM
 	tristate "Simulated radio testing tool for mac80211"
-	depends on MAC80211 && WLAN_80211
+	depends on MAC80211
 	---help---
 	  This driver is a developer testing tool that can be used to test
 	  IEEE 802.11 networking stack (mac80211) functionality. This is not
@@ -469,24 +336,25 @@ config MAC80211_HWSIM
 
 config MWL8K
 	tristate "Marvell 88W8xxx PCI/PCIe Wireless support"
-	depends on MAC80211 && PCI && WLAN_80211 && EXPERIMENTAL
+	depends on MAC80211 && PCI && EXPERIMENTAL
 	---help---
 	  This driver supports Marvell TOPDOG 802.11 wireless cards.
 
 	  To compile this driver as a module, choose M here: the module
 	  will be called mwl8k.  If unsure, say N.
 
-source "drivers/net/wireless/p54/Kconfig"
 source "drivers/net/wireless/ath/Kconfig"
-source "drivers/net/wireless/ipw2x00/Kconfig"
-source "drivers/net/wireless/iwlwifi/Kconfig"
-source "drivers/net/wireless/hostap/Kconfig"
 source "drivers/net/wireless/b43/Kconfig"
 source "drivers/net/wireless/b43legacy/Kconfig"
-source "drivers/net/wireless/zd1211rw/Kconfig"
-source "drivers/net/wireless/rt2x00/Kconfig"
+source "drivers/net/wireless/hostap/Kconfig"
+source "drivers/net/wireless/ipw2x00/Kconfig"
+source "drivers/net/wireless/iwlwifi/Kconfig"
+source "drivers/net/wireless/iwmc3200wifi/Kconfig"
+source "drivers/net/wireless/libertas/Kconfig"
 source "drivers/net/wireless/orinoco/Kconfig"
+source "drivers/net/wireless/p54/Kconfig"
+source "drivers/net/wireless/rt2x00/Kconfig"
 source "drivers/net/wireless/wl12xx/Kconfig"
-source "drivers/net/wireless/iwmc3200wifi/Kconfig"
+source "drivers/net/wireless/zd1211rw/Kconfig"
 
 endif # WLAN
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 7a4647e78fd3..5d4ce4d2b32b 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -5,16 +5,6 @@
 obj-$(CONFIG_IPW2100) += ipw2x00/
 obj-$(CONFIG_IPW2200) += ipw2x00/
 
-obj-$(CONFIG_STRIP) += strip.o
-obj-$(CONFIG_ARLAN) += arlan.o 
-
-arlan-objs := arlan-main.o arlan-proc.o
-
-# Obsolete cards
-obj-$(CONFIG_WAVELAN)		+= wavelan.o
-obj-$(CONFIG_PCMCIA_NETWAVE)	+= netwave_cs.o
-obj-$(CONFIG_PCMCIA_WAVELAN)	+= wavelan_cs.o
-
 obj-$(CONFIG_HERMES)		+= orinoco/
 
 obj-$(CONFIG_AIRO)		+= airo.o
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index b80f514877d8..39410016b4ff 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1538,7 +1538,7 @@ static int adm8211_start(struct ieee80211_hw *dev)
 	adm8211_hw_init(dev);
 	adm8211_rf_set_channel(dev, priv->channel);
 
-	retval = request_irq(priv->pdev->irq, &adm8211_interrupt,
+	retval = request_irq(priv->pdev->irq, adm8211_interrupt,
 			     IRQF_SHARED, "adm8211", dev);
 	if (retval) {
 		printk(KERN_ERR "%s: failed to register IRQ handler\n",
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index abf896a7390e..4331d675fcc6 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -4806,7 +4806,7 @@ static int airo_config_commit(struct net_device *dev,
 
 static inline int sniffing_mode(struct airo_info *ai)
 {
-	return le16_to_cpu(ai->config.rmode & RXMODE_MASK) >=
+	return (le16_to_cpu(ai->config.rmode) & le16_to_cpu(RXMODE_MASK)) >=
 		le16_to_cpu(RXMODE_RFMON);
 }
 
@@ -5659,7 +5659,8 @@ static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state)
 
 	pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
 	pci_save_state(pdev);
-	return pci_set_power_state(pdev, pci_choose_state(pdev, state));
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+	return 0;
 }
 
 static int airo_pci_resume(struct pci_dev *pdev)
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 8e1a55dec351..2517364d3ebe 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -121,6 +121,14 @@ static struct fwentry firmwares[] = {
 	[BOARD_505A] = { "atmel_at76c505a-rfmd2958.bin" },
 	[BOARD_505AMX] = { "atmel_at76c505amx-rfmd.bin" },
 };
+MODULE_FIRMWARE("atmel_at76c503-i3861.bin");
+MODULE_FIRMWARE("atmel_at76c503-i3863.bin");
+MODULE_FIRMWARE("atmel_at76c503-rfmd.bin");
+MODULE_FIRMWARE("atmel_at76c503-rfmd-acc.bin");
+MODULE_FIRMWARE("atmel_at76c505-rfmd.bin");
+MODULE_FIRMWARE("atmel_at76c505-rfmd2958.bin");
+MODULE_FIRMWARE("atmel_at76c505a-rfmd2958.bin");
+MODULE_FIRMWARE("atmel_at76c505amx-rfmd.bin");
 
 #define USB_DEVICE_DATA(__ops)	.driver_info = (kernel_ulong_t)(__ops)
 
@@ -524,20 +532,6 @@ static char *hex2str(void *buf, int len)
 	return ret;
 }
 
-#define MAC2STR_BUFFERS 4
-
-static inline char *mac2str(u8 *mac)
-{
-	static atomic_t a = ATOMIC_INIT(0);
-	static char bufs[MAC2STR_BUFFERS][6 * 3];
-	char *str;
-
-	str = bufs[atomic_inc_return(&a) & (MAC2STR_BUFFERS - 1)];
-	sprintf(str, "%02x:%02x:%02x:%02x:%02x:%02x",
-		mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
-	return str;
-}
-
 /* LED trigger */
 static int tx_activity;
 static void at76_ledtrig_tx_timerfunc(unsigned long data);
@@ -973,13 +967,13 @@ static void at76_dump_mib_mac_addr(struct at76_priv *priv)
 		goto exit;
 	}
 
-	at76_dbg(DBG_MIB, "%s: MIB MAC_ADDR: mac_addr %s res 0x%x 0x%x",
+	at76_dbg(DBG_MIB, "%s: MIB MAC_ADDR: mac_addr %pM res 0x%x 0x%x",
 		 wiphy_name(priv->hw->wiphy),
-		 mac2str(m->mac_addr), m->res[0], m->res[1]);
+		 m->mac_addr, m->res[0], m->res[1]);
 	for (i = 0; i < ARRAY_SIZE(m->group_addr); i++)
-		at76_dbg(DBG_MIB, "%s: MIB MAC_ADDR: group addr %d: %s, "
+		at76_dbg(DBG_MIB, "%s: MIB MAC_ADDR: group addr %d: %pM, "
 			 "status %d", wiphy_name(priv->hw->wiphy), i,
-			 mac2str(m->group_addr[i]), m->group_addr_status[i]);
+			 m->group_addr[i], m->group_addr_status[i]);
 exit:
 	kfree(m);
 }
@@ -1042,7 +1036,7 @@ static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
 	at76_dbg(DBG_MIB, "%s: MIB MAC_MGMT: beacon_period %d CFP_max_duration "
 		 "%d medium_occupancy_limit %d station_id 0x%x ATIM_window %d "
 		 "CFP_mode %d privacy_opt_impl %d DTIM_period %d CFP_period %d "
-		 "current_bssid %s current_essid %s current_bss_type %d "
+		 "current_bssid %pM current_essid %s current_bss_type %d "
 		 "pm_mode %d ibss_change %d res %d "
 		 "multi_domain_capability_implemented %d "
 		 "international_roaming %d country_string %.3s",
@@ -1051,7 +1045,7 @@ static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
 		 le16_to_cpu(m->medium_occupancy_limit),
 		 le16_to_cpu(m->station_id), le16_to_cpu(m->ATIM_window),
 		 m->CFP_mode, m->privacy_option_implemented, m->DTIM_period,
-		 m->CFP_period, mac2str(m->current_bssid),
+		 m->CFP_period, m->current_bssid,
 		 hex2str(m->current_essid, IW_ESSID_MAX_SIZE),
 		 m->current_bss_type, m->power_mgmt_mode, m->ibss_change,
 		 m->res, m->multi_domain_capability_implemented,
@@ -1080,7 +1074,7 @@ static void at76_dump_mib_mac(struct at76_priv *priv)
 		 "cwmin %d cwmax %d short_retry_time %d long_retry_time %d "
 		 "scan_type %d scan_channel %d probe_delay %u "
 		 "min_channel_time %d max_channel_time %d listen_int %d "
-		 "desired_ssid %s desired_bssid %s desired_bsstype %d",
+		 "desired_ssid %s desired_bssid %pM desired_bsstype %d",
 		 wiphy_name(priv->hw->wiphy),
 		 le32_to_cpu(m->max_tx_msdu_lifetime),
 		 le32_to_cpu(m->max_rx_lifetime),
@@ -1092,7 +1086,7 @@ static void at76_dump_mib_mac(struct at76_priv *priv)
 		 le16_to_cpu(m->max_channel_time),
 		 le16_to_cpu(m->listen_interval),
 		 hex2str(m->desired_ssid, IW_ESSID_MAX_SIZE),
-		 mac2str(m->desired_bssid), m->desired_bsstype);
+		 m->desired_bssid, m->desired_bsstype);
 exit:
 	kfree(m);
 }
@@ -1194,6 +1188,9 @@ static int at76_start_monitor(struct at76_priv *priv)
 	scan.channel = priv->channel;
 	scan.scan_type = SCAN_TYPE_PASSIVE;
 	scan.international_scan = 0;
+	scan.min_channel_time = cpu_to_le16(priv->scan_min_time);
+	scan.max_channel_time = cpu_to_le16(priv->scan_max_time);
+	scan.probe_delay = cpu_to_le16(0);
 
 	ret = at76_set_card_command(priv->udev, CMD_SCAN, &scan, sizeof(scan));
 	if (ret >= 0)
@@ -2217,6 +2214,8 @@ static struct ieee80211_supported_band at76_supported_band = {
 static int at76_init_new_device(struct at76_priv *priv,
 				struct usb_interface *interface)
 {
+	struct wiphy *wiphy;
+	size_t len;
 	int ret;
 
 	/* set up the endpoint information */
@@ -2254,6 +2253,7 @@ static int at76_init_new_device(struct at76_priv *priv,
 	priv->device_unplugged = 0;
 
 	/* mac80211 initialisation */
+	wiphy = priv->hw->wiphy;
 	priv->hw->wiphy->max_scan_ssids = 1;
 	priv->hw->wiphy->max_scan_ie_len = 0;
 	priv->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
@@ -2265,6 +2265,13 @@ static int at76_init_new_device(struct at76_priv *priv,
 	SET_IEEE80211_DEV(priv->hw, &interface->dev);
 	SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
 
+	len = sizeof(wiphy->fw_version);
+	snprintf(wiphy->fw_version, len, "%d.%d.%d-%d",
+		 priv->fw_version.major, priv->fw_version.minor,
+		 priv->fw_version.patch, priv->fw_version.build);
+
+	wiphy->hw_version = priv->board_type;
+
 	ret = ieee80211_register_hw(priv->hw);
 	if (ret) {
 		printk(KERN_ERR "cannot register mac80211 hw (status %d)!\n",
@@ -2274,9 +2281,9 @@ static int at76_init_new_device(struct at76_priv *priv,
 
 	priv->mac80211_registered = 1;
 
-	printk(KERN_INFO "%s: USB %s, MAC %s, firmware %d.%d.%d-%d\n",
+	printk(KERN_INFO "%s: USB %s, MAC %pM, firmware %d.%d.%d-%d\n",
 	       wiphy_name(priv->hw->wiphy),
-	       dev_name(&interface->dev), mac2str(priv->mac_addr),
+	       dev_name(&interface->dev), priv->mac_addr,
 	       priv->fw_version.major, priv->fw_version.minor,
 	       priv->fw_version.patch, priv->fw_version.build);
 	printk(KERN_INFO "%s: regulatory domain 0x%02x: %s\n",
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 11ded150b932..4e7a7fd695c8 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -1,6 +1,5 @@
 menuconfig ATH_COMMON
 	tristate "Atheros Wireless Cards"
-	depends on WLAN_80211
 	depends on CFG80211
 	---help---
 	  This will enable the support for the Atheros wireless drivers.
@@ -16,7 +15,15 @@ menuconfig ATH_COMMON
 	  http://wireless.kernel.org/en/users/Drivers/Atheros
 
 if ATH_COMMON
+
+config ATH_DEBUG
+	bool "Atheros wireless debugging"
+	---help---
+	  Say Y, if you want to debug atheros wireless drivers.
+	  Right now only ath9k makes use of this.
+
 source "drivers/net/wireless/ath/ath5k/Kconfig"
 source "drivers/net/wireless/ath/ath9k/Kconfig"
 source "drivers/net/wireless/ath/ar9170/Kconfig"
+
 endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index 4bb0132ada37..8113a5042afa 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -1,6 +1,11 @@
 obj-$(CONFIG_ATH5K)		+= ath5k/
-obj-$(CONFIG_ATH9K)		+= ath9k/
+obj-$(CONFIG_ATH9K_HW)		+= ath9k/
 obj-$(CONFIG_AR9170_USB)        += ar9170/
 
 obj-$(CONFIG_ATH_COMMON)	+= ath.o
-ath-objs 		:= main.o regd.o
+
+ath-objs :=	main.o \
+		regd.o \
+		hw.o
+
+ath-$(CONFIG_ATH_DEBUG) += debug.o
diff --git a/drivers/net/wireless/ath/ar9170/Kconfig b/drivers/net/wireless/ath/ar9170/Kconfig
index 05918f1e685a..d7a4799d20fb 100644
--- a/drivers/net/wireless/ath/ar9170/Kconfig
+++ b/drivers/net/wireless/ath/ar9170/Kconfig
@@ -1,6 +1,6 @@
 config AR9170_USB
 	tristate "Atheros AR9170 802.11n USB support"
-	depends on USB && MAC80211 && WLAN_80211
+	depends on USB && MAC80211
 	select FW_LOADER
 	help
 	  This is a driver for the Atheros "otus" 802.11n USB devices.
diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h
index 914e4718a9a8..9f9459860d82 100644
--- a/drivers/net/wireless/ath/ar9170/ar9170.h
+++ b/drivers/net/wireless/ath/ar9170/ar9170.h
@@ -172,8 +172,6 @@ struct ar9170 {
 
 	/* interface mode settings */
 	struct ieee80211_vif *vif;
-	u8 mac_addr[ETH_ALEN];
-	u8 bssid[ETH_ALEN];
 
 	/* beaconing */
 	struct sk_buff *beacon;
@@ -204,6 +202,8 @@ struct ar9170 {
 	u8 power_2G_ht20[8];
 	u8 power_2G_ht40[8];
 
+	u8 phy_heavy_clip;
+
 #ifdef CONFIG_AR9170_LEDS
 	struct delayed_work led_work;
 	struct ar9170_led leds[AR9170_NUM_LEDS];
@@ -231,7 +231,7 @@ struct ar9170 {
 	struct sk_buff_head tx_status_ampdu;
 	spinlock_t tx_ampdu_list_lock;
 	struct list_head tx_ampdu_list;
-	unsigned int tx_ampdu_pending;
+	atomic_t tx_ampdu_pending;
 
 	/* rxstream mpdu merge */
 	struct ar9170_rxstream_mpdu_merge rx_mpdu;
diff --git a/drivers/net/wireless/ath/ar9170/cmd.c b/drivers/net/wireless/ath/ar9170/cmd.c
index f57a6200167b..cf6f5c4174a6 100644
--- a/drivers/net/wireless/ath/ar9170/cmd.c
+++ b/drivers/net/wireless/ath/ar9170/cmd.c
@@ -72,8 +72,7 @@ int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val)
 	return err;
 }
 
-static int ar9170_read_mreg(struct ar9170 *ar, int nregs,
-			    const u32 *regs, u32 *out)
+int ar9170_read_mreg(struct ar9170 *ar, int nregs, const u32 *regs, u32 *out)
 {
 	int i, err;
 	__le32 *offs, *res;
diff --git a/drivers/net/wireless/ath/ar9170/cmd.h b/drivers/net/wireless/ath/ar9170/cmd.h
index a4f0e50e52b4..826c45e6b274 100644
--- a/drivers/net/wireless/ath/ar9170/cmd.h
+++ b/drivers/net/wireless/ath/ar9170/cmd.h
@@ -44,6 +44,7 @@
 int ar9170_write_mem(struct ar9170 *ar, const __le32 *data, size_t len);
 int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val);
 int ar9170_read_reg(struct ar9170 *ar, u32 reg, u32 *val);
+int ar9170_read_mreg(struct ar9170 *ar, int nregs, const u32 *regs, u32 *out);
 int ar9170_echo_test(struct ar9170 *ar, u32 v);
 
 /*
diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h
index 6cbfb2f83391..701ddb7d8400 100644
--- a/drivers/net/wireless/ath/ar9170/hw.h
+++ b/drivers/net/wireless/ath/ar9170/hw.h
@@ -152,14 +152,14 @@ enum ar9170_cmd {
 #define		AR9170_MAC_REG_FTF_BIT14		BIT(14)
 #define		AR9170_MAC_REG_FTF_BIT15		BIT(15)
 #define		AR9170_MAC_REG_FTF_BAR			BIT(24)
-#define		AR9170_MAC_REG_FTF_BIT25		BIT(25)
+#define		AR9170_MAC_REG_FTF_BA			BIT(25)
 #define		AR9170_MAC_REG_FTF_PSPOLL		BIT(26)
 #define		AR9170_MAC_REG_FTF_RTS			BIT(27)
 #define		AR9170_MAC_REG_FTF_CTS			BIT(28)
 #define		AR9170_MAC_REG_FTF_ACK			BIT(29)
 #define		AR9170_MAC_REG_FTF_CFE			BIT(30)
 #define		AR9170_MAC_REG_FTF_CFE_ACK		BIT(31)
-#define		AR9170_MAC_REG_FTF_DEFAULTS		0x0500ffff
+#define		AR9170_MAC_REG_FTF_DEFAULTS		0x0700ffff
 #define		AR9170_MAC_REG_FTF_MONITOR		0xfd00ffff
 
 #define AR9170_MAC_REG_RX_TOTAL			(AR9170_MAC_REG_BASE + 0x6A0)
@@ -311,6 +311,8 @@ struct ar9170_tx_control {
 
 #define AR9170_TX_PHY_SHORT_GI			0x80000000
 
+#define AR5416_MAX_RATE_POWER                   63
+
 struct ar9170_rx_head {
 	u8 plcp[12];
 } __packed;
diff --git a/drivers/net/wireless/ath/ar9170/mac.c b/drivers/net/wireless/ath/ar9170/mac.c
index 614e3218a2bc..ddc8c09dc79e 100644
--- a/drivers/net/wireless/ath/ar9170/mac.c
+++ b/drivers/net/wireless/ath/ar9170/mac.c
@@ -35,6 +35,9 @@
  *    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  *    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
+
+#include <asm/unaligned.h>
+
 #include "ar9170.h"
 #include "cmd.h"
 
@@ -227,11 +230,8 @@ static int ar9170_set_mac_reg(struct ar9170 *ar, const u32 reg, const u8 *mac)
 
 	ar9170_regwrite_begin(ar);
 
-	ar9170_regwrite(reg,
-			(mac[3] << 24) | (mac[2] << 16) |
-			(mac[1] << 8) | mac[0]);
-
-	ar9170_regwrite(reg + 4, (mac[5] << 8) | mac[4]);
+	ar9170_regwrite(reg, get_unaligned_le32(mac));
+	ar9170_regwrite(reg + 4, get_unaligned_le16(mac + 4));
 
 	ar9170_regwrite_finish();
 
@@ -311,13 +311,14 @@ static int ar9170_set_promiscouous(struct ar9170 *ar)
 
 int ar9170_set_operating_mode(struct ar9170 *ar)
 {
+	struct ath_common *common = &ar->common;
 	u32 pm_mode = AR9170_MAC_REG_POWERMGT_DEFAULTS;
 	u8 *mac_addr, *bssid;
 	int err;
 
 	if (ar->vif) {
-		mac_addr = ar->mac_addr;
-		bssid = ar->bssid;
+		mac_addr = common->macaddr;
+		bssid = common->curbssid;
 
 		switch (ar->vif->type) {
 		case NL80211_IFTYPE_MESH_POINT:
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index c1f8c69db165..f9d6db8d013e 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -414,9 +414,9 @@ static void ar9170_tx_ampdu_callback(struct ar9170 *ar, struct sk_buff *skb)
 
 	skb_queue_tail(&ar->tx_status_ampdu, skb);
 	ar9170_tx_fake_ampdu_status(ar);
-	ar->tx_ampdu_pending--;
 
-	if (!list_empty(&ar->tx_ampdu_list) && !ar->tx_ampdu_pending)
+	if (atomic_dec_and_test(&ar->tx_ampdu_pending) &&
+	    !list_empty(&ar->tx_ampdu_list))
 		ar9170_tx_ampdu(ar);
 }
 
@@ -850,6 +850,7 @@ static int ar9170_rx_mac_status(struct ar9170 *ar,
 		}
 		break;
 
+	case AR9170_RX_STATUS_MODULATION_DUPOFDM:
 	case AR9170_RX_STATUS_MODULATION_OFDM:
 		switch (head->plcp[0] & 0xf) {
 		case 0xb:
@@ -897,8 +898,7 @@ static int ar9170_rx_mac_status(struct ar9170 *ar,
 		status->flag |= RX_FLAG_HT;
 		break;
 
-	case AR9170_RX_STATUS_MODULATION_DUPOFDM:
-		/* XXX */
+	default:
 		if (ar9170_nag_limiter(ar))
 			printk(KERN_ERR "%s: invalid modulation\n",
 			       wiphy_name(ar->hw->wiphy));
@@ -1248,6 +1248,7 @@ static int ar9170_op_start(struct ieee80211_hw *hw)
 	ar->global_ampdu_density = 6;
 	ar->global_ampdu_factor = 3;
 
+	atomic_set(&ar->tx_ampdu_pending, 0);
 	ar->bad_hw_nagger = jiffies;
 
 	err = ar->open(ar);
@@ -1773,7 +1774,7 @@ static void ar9170_tx(struct ar9170 *ar)
 					  msecs_to_jiffies(AR9170_TX_TIMEOUT);
 
 			if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK)
-				ar->tx_ampdu_pending++;
+				atomic_inc(&ar->tx_ampdu_pending);
 
 #ifdef AR9170_QUEUE_DEBUG
 			printk(KERN_DEBUG "%s: send frame q:%d =>\n",
@@ -1784,7 +1785,7 @@ static void ar9170_tx(struct ar9170 *ar)
 			err = ar->tx(ar, skb);
 			if (unlikely(err)) {
 				if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK)
-					ar->tx_ampdu_pending--;
+					atomic_dec(&ar->tx_ampdu_pending);
 
 				frames_failed++;
 				dev_kfree_skb_any(skb);
@@ -1931,7 +1932,7 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 	if (info->flags & IEEE80211_TX_CTL_AMPDU) {
 		bool run = ar9170_tx_ampdu_queue(ar, skb);
 
-		if (run || !ar->tx_ampdu_pending)
+		if (run || !atomic_read(&ar->tx_ampdu_pending))
 			ar9170_tx_ampdu(ar);
 	} else {
 		unsigned int queue = skb_get_queue_mapping(skb);
@@ -1952,6 +1953,7 @@ static int ar9170_op_add_interface(struct ieee80211_hw *hw,
 				   struct ieee80211_if_init_conf *conf)
 {
 	struct ar9170 *ar = hw->priv;
+	struct ath_common *common = &ar->common;
 	int err = 0;
 
 	mutex_lock(&ar->mutex);
@@ -1962,7 +1964,7 @@ static int ar9170_op_add_interface(struct ieee80211_hw *hw,
 	}
 
 	ar->vif = conf->vif;
-	memcpy(ar->mac_addr, conf->mac_addr, ETH_ALEN);
+	memcpy(common->macaddr, conf->mac_addr, ETH_ALEN);
 
 	if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) {
 		ar->rx_software_decryption = true;
@@ -2131,12 +2133,13 @@ static void ar9170_op_bss_info_changed(struct ieee80211_hw *hw,
 				       u32 changed)
 {
 	struct ar9170 *ar = hw->priv;
+	struct ath_common *common = &ar->common;
 	int err = 0;
 
 	mutex_lock(&ar->mutex);
 
 	if (changed & BSS_CHANGED_BSSID) {
-		memcpy(ar->bssid, bss_conf->bssid, ETH_ALEN);
+		memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
 		err = ar9170_set_operating_mode(ar);
 		if (err)
 			goto out;
@@ -2190,22 +2193,30 @@ static u64 ar9170_op_get_tsf(struct ieee80211_hw *hw)
 {
 	struct ar9170 *ar = hw->priv;
 	int err;
-	u32 tsf_low;
-	u32 tsf_high;
 	u64 tsf;
+#define NR 3
+	static const u32 addr[NR] = { AR9170_MAC_REG_TSF_H,
+				    AR9170_MAC_REG_TSF_L,
+				    AR9170_MAC_REG_TSF_H };
+	u32 val[NR];
+	int loops = 0;
 
 	mutex_lock(&ar->mutex);
-	err = ar9170_read_reg(ar, AR9170_MAC_REG_TSF_L, &tsf_low);
-	if (!err)
-		err = ar9170_read_reg(ar, AR9170_MAC_REG_TSF_H, &tsf_high);
+
+	while (loops++ < 10) {
+		err = ar9170_read_mreg(ar, NR, addr, val);
+		if (err || val[0] == val[2])
+			break;
+	}
+
 	mutex_unlock(&ar->mutex);
 
 	if (WARN_ON(err))
 		return 0;
-
-	tsf = tsf_high;
-	tsf = (tsf << 32) | tsf_low;
+	tsf = val[0];
+	tsf = (tsf << 32) | val[1];
 	return tsf;
+#undef NR
 }
 
 static int ar9170_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -2430,6 +2441,7 @@ static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
 }
 
 static int ar9170_ampdu_action(struct ieee80211_hw *hw,
+			       struct ieee80211_vif *vif,
 			       enum ieee80211_ampdu_mlme_action action,
 			       struct ieee80211_sta *sta, u16 tid, u16 *ssn)
 {
@@ -2459,7 +2471,7 @@ static int ar9170_ampdu_action(struct ieee80211_hw *hw,
 		tid_info->state = AR9170_TID_STATE_PROGRESS;
 		tid_info->active = false;
 		spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
-		ieee80211_start_tx_ba_cb_irqsafe(hw, sta->addr, tid);
+		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
 		break;
 
 	case IEEE80211_AMPDU_TX_STOP:
@@ -2469,7 +2481,7 @@ static int ar9170_ampdu_action(struct ieee80211_hw *hw,
 		tid_info->active = false;
 		skb_queue_purge(&tid_info->queue);
 		spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
-		ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid);
+		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
 		break;
 
 	case IEEE80211_AMPDU_TX_OPERATIONAL:
diff --git a/drivers/net/wireless/ath/ar9170/phy.c b/drivers/net/wireless/ath/ar9170/phy.c
index dbd488da18b1..45a415ea809a 100644
--- a/drivers/net/wireless/ath/ar9170/phy.c
+++ b/drivers/net/wireless/ath/ar9170/phy.c
@@ -1239,9 +1239,6 @@ static u8 ar9170_get_max_edge_power(struct ar9170 *ar,
 				    struct ar9170_calctl_edges edges[],
 				    u32 freq)
 {
-/* TODO: move somewhere else */
-#define AR5416_MAX_RATE_POWER        63
-
 	int i;
 	u8 rc = AR5416_MAX_RATE_POWER;
 	u8 f;
@@ -1259,10 +1256,11 @@ static u8 ar9170_get_max_edge_power(struct ar9170 *ar,
 			break;
 		}
 		if (i > 0 && f < edges[i].channel) {
-			if (f > edges[i-1].channel &&
-			    edges[i-1].power_flags & AR9170_CALCTL_EDGE_FLAGS) {
+			if (f > edges[i - 1].channel &&
+			    edges[i - 1].power_flags &
+			    AR9170_CALCTL_EDGE_FLAGS) {
 				/* lower channel has the inband flag set */
-				rc = edges[i-1].power_flags &
+				rc = edges[i - 1].power_flags &
 					~AR9170_CALCTL_EDGE_FLAGS;
 			}
 			break;
@@ -1270,18 +1268,48 @@ static u8 ar9170_get_max_edge_power(struct ar9170 *ar,
 	}
 
 	if (i == AR5416_NUM_BAND_EDGES) {
-		if (f > edges[i-1].channel &&
-		    edges[i-1].power_flags & AR9170_CALCTL_EDGE_FLAGS) {
+		if (f > edges[i - 1].channel &&
+		    edges[i - 1].power_flags & AR9170_CALCTL_EDGE_FLAGS) {
 			/* lower channel has the inband flag set */
-			rc = edges[i-1].power_flags &
+			rc = edges[i - 1].power_flags &
 				~AR9170_CALCTL_EDGE_FLAGS;
 		}
 	}
 	return rc;
 }
 
-/* calculate the conformance test limits and apply them to ar->power*
- * (derived from otus hal/hpmain.c, line 3706 ff.)
+static u8 ar9170_get_heavy_clip(struct ar9170 *ar,
+				struct ar9170_calctl_edges edges[],
+				u32 freq, enum ar9170_bw bw)
+{
+	u8 f;
+	int i;
+	u8 rc = 0;
+
+	if (freq < 3000)
+		f = freq - 2300;
+	else
+		f = (freq - 4800) / 5;
+
+	if (bw == AR9170_BW_40_BELOW || bw == AR9170_BW_40_ABOVE)
+		rc |= 0xf0;
+
+	for (i = 0; i < AR5416_NUM_BAND_EDGES; i++) {
+		if (edges[i].channel == 0xff)
+			break;
+		if (f == edges[i].channel) {
+			if (!(edges[i].power_flags & AR9170_CALCTL_EDGE_FLAGS))
+				rc |= 0x0f;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+/*
+ * calculate the conformance test limits and the heavy clip parameter
+ * and apply them to ar->power* (derived from otus hal/hpmain.c, line 3706)
  */
 static void ar9170_calc_ctl(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
 {
@@ -1295,7 +1323,8 @@ static void ar9170_calc_ctl(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
 		int pwr_cal_len;
 	} *modes;
 
-	/* order is relevant in the mode_list_*: we fall back to the
+	/*
+	 * order is relevant in the mode_list_*: we fall back to the
 	 * lower indices if any mode is missed in the EEPROM.
 	 */
 	struct ctl_modes mode_list_2ghz[] = {
@@ -1313,7 +1342,10 @@ static void ar9170_calc_ctl(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
 
 #define EDGES(c, n) (ar->eeprom.ctl_data[c].control_edges[n])
 
-	/* TODO: investigate the differences between OTUS'
+	ar->phy_heavy_clip = 0;
+
+	/*
+	 * TODO: investigate the differences between OTUS'
 	 * hpreg.c::zfHpGetRegulatoryDomain() and
 	 * ath/regd.c::ath_regd_get_band_ctl() -
 	 * e.g. for FCC3_WORLD the OTUS procedure
@@ -1347,6 +1379,15 @@ static void ar9170_calc_ctl(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
 		if (ctl_idx < AR5416_NUM_CTLS) {
 			int f_off = 0;
 
+			/* determine heav clip parameter from
+			   the 11G edges array */
+			if (modes[i].ctl_mode == CTL_11G) {
+				ar->phy_heavy_clip =
+					ar9170_get_heavy_clip(ar,
+							      EDGES(ctl_idx, 1),
+							      freq, bw);
+			}
+
 			/* adjust freq for 40MHz */
 			if (modes[i].ctl_mode == CTL_2GHT40 ||
 			    modes[i].ctl_mode == CTL_5GHT40) {
@@ -1360,13 +1401,15 @@ static void ar9170_calc_ctl(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
 				ar9170_get_max_edge_power(ar, EDGES(ctl_idx, 1),
 							  freq+f_off);
 
-			/* TODO: check if the regulatory max. power is
+			/*
+			 * TODO: check if the regulatory max. power is
 			 *  controlled by cfg80211 for DFS
 			 * (hpmain applies it to max_power itself for DFS freq)
 			 */
 
 		} else {
-			/* Workaround in otus driver, hpmain.c, line 3906:
+			/*
+			 * Workaround in otus driver, hpmain.c, line 3906:
 			 * if no data for 5GHT20 are found, take the
 			 * legacy 5G value.
 			 * We extend this here to fallback from any other *HT or
@@ -1390,6 +1433,19 @@ static void ar9170_calc_ctl(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
 						       modes[i].max_power);
 		}
 	}
+
+	if (ar->phy_heavy_clip & 0xf0) {
+		ar->power_2G_ht40[0]--;
+		ar->power_2G_ht40[1]--;
+		ar->power_2G_ht40[2]--;
+	}
+	if (ar->phy_heavy_clip & 0xf) {
+		ar->power_2G_ht20[0]++;
+		ar->power_2G_ht20[1]++;
+		ar->power_2G_ht20[2]++;
+	}
+
+
 #undef EDGES
 }
 
@@ -1499,8 +1555,6 @@ static int ar9170_set_power_cal(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
 	/* calc. conformance test limits and apply to ar->power*[] */
 	ar9170_calc_ctl(ar, freq, bw);
 
-	/* TODO: (heavy clip) regulatory domain power level fine-tuning. */
-
 	/* set ACK/CTS TX power */
 	ar9170_regwrite_begin(ar);
 
@@ -1643,6 +1697,17 @@ int ar9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
 	if (err)
 		return err;
 
+	if (ar->phy_heavy_clip) {
+		err = ar9170_write_reg(ar, 0x1c59e0,
+				       0x200 | ar->phy_heavy_clip);
+		if (err) {
+			if (ar9170_nag_limiter(ar))
+				printk(KERN_ERR "%s: failed to set "
+				       "heavy clip\n",
+				       wiphy_name(ar->hw->wiphy));
+		}
+	}
+
 	for (i = 0; i < 2; i++) {
 		ar->noise[i] = ar9170_calc_noise_dbm(
 				(le32_to_cpu(vals[2 + i]) >> 19) & 0x1ff);
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index e974e5829e1a..e0799d924057 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -68,8 +68,10 @@ static struct usb_device_id ar9170_usb_ids[] = {
 	{ USB_DEVICE(0x0cf3, 0x1002) },
 	/* Cace Airpcap NX */
 	{ USB_DEVICE(0xcace, 0x0300) },
-	/* D-Link DWA 160A */
+	/* D-Link DWA 160 A1 */
 	{ USB_DEVICE(0x07d1, 0x3c10) },
+	/* D-Link DWA 160 A2 */
+	{ USB_DEVICE(0x07d1, 0x3a09) },
 	/* Netgear WNDA3100 */
 	{ USB_DEVICE(0x0846, 0x9010) },
 	/* Netgear WN111 v2 */
@@ -108,15 +110,15 @@ static void ar9170_usb_submit_urb(struct ar9170_usb *aru)
 		return ;
 
 	spin_lock_irqsave(&aru->tx_urb_lock, flags);
-	if (aru->tx_submitted_urbs >= AR9170_NUM_TX_URBS) {
+	if (atomic_read(&aru->tx_submitted_urbs) >= AR9170_NUM_TX_URBS) {
 		spin_unlock_irqrestore(&aru->tx_urb_lock, flags);
 		return ;
 	}
-	aru->tx_submitted_urbs++;
+	atomic_inc(&aru->tx_submitted_urbs);
 
 	urb = usb_get_from_anchor(&aru->tx_pending);
 	if (!urb) {
-		aru->tx_submitted_urbs--;
+		atomic_dec(&aru->tx_submitted_urbs);
 		spin_unlock_irqrestore(&aru->tx_urb_lock, flags);
 
 		return ;
@@ -133,7 +135,7 @@ static void ar9170_usb_submit_urb(struct ar9170_usb *aru)
 				err);
 
 		usb_unanchor_urb(urb);
-		aru->tx_submitted_urbs--;
+		atomic_dec(&aru->tx_submitted_urbs);
 		ar9170_tx_callback(&aru->common, urb->context);
 	}
 
@@ -151,7 +153,7 @@ static void ar9170_usb_tx_urb_complete_frame(struct urb *urb)
 		return ;
 	}
 
-	aru->tx_submitted_urbs--;
+	atomic_dec(&aru->tx_submitted_urbs);
 
 	ar9170_tx_callback(&aru->common, skb);
 
@@ -794,7 +796,7 @@ static int ar9170_usb_probe(struct usb_interface *intf,
 	spin_lock_init(&aru->tx_urb_lock);
 
 	aru->tx_pending_urbs = 0;
-	aru->tx_submitted_urbs = 0;
+	atomic_set(&aru->tx_submitted_urbs, 0);
 
 	aru->common.stop = ar9170_usb_stop;
 	aru->common.flush = ar9170_usb_flush;
diff --git a/drivers/net/wireless/ath/ar9170/usb.h b/drivers/net/wireless/ath/ar9170/usb.h
index d098f4d5d2f2..a2ce3b169ceb 100644
--- a/drivers/net/wireless/ath/ar9170/usb.h
+++ b/drivers/net/wireless/ath/ar9170/usb.h
@@ -67,7 +67,7 @@ struct ar9170_usb {
 	bool req_one_stage_fw;
 
 	spinlock_t tx_urb_lock;
-	unsigned int tx_submitted_urbs;
+	atomic_t tx_submitted_urbs;
 	unsigned int tx_pending_urbs;
 
 	struct completion cmd_wait;
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index a63e90cbf9e5..9e05648356fe 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -18,6 +18,35 @@
 #define ATH_H
 
 #include <linux/skbuff.h>
+#include <linux/if_ether.h>
+#include <net/mac80211.h>
+
+/*
+ * The key cache is used for h/w cipher state and also for
+ * tracking station state such as the current tx antenna.
+ * We also setup a mapping table between key cache slot indices
+ * and station state to short-circuit node lookups on rx.
+ * Different parts have different size key caches.  We handle
+ * up to ATH_KEYMAX entries (could dynamically allocate state).
+ */
+#define	ATH_KEYMAX	        128     /* max key cache size we handle */
+
+static const u8 ath_bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+struct ath_ani {
+	bool caldone;
+	int16_t noise_floor;
+	unsigned int longcal_timer;
+	unsigned int shortcal_timer;
+	unsigned int resetcal_timer;
+	unsigned int checkani_timer;
+	struct timer_list timer;
+};
+
+enum ath_device_state {
+	ATH_HW_UNAVAILABLE,
+	ATH_HW_INITIALIZED,
+};
 
 struct reg_dmn_pair_mapping {
 	u16 regDmnEnum;
@@ -36,13 +65,53 @@ struct ath_regulatory {
 	struct reg_dmn_pair_mapping *regpair;
 };
 
+struct ath_ops {
+	unsigned int (*read)(void *, u32 reg_offset);
+        void (*write)(void *, u32 val, u32 reg_offset);
+};
+
+struct ath_common;
+
+struct ath_bus_ops {
+	void		(*read_cachesize)(struct ath_common *common, int *csz);
+	void		(*cleanup)(struct ath_common *common);
+	bool		(*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
+	void		(*bt_coex_prep)(struct ath_common *common);
+};
+
 struct ath_common {
+	void *ah;
+	void *priv;
+	struct ieee80211_hw *hw;
+	int debug_mask;
+	enum ath_device_state state;
+
+	struct ath_ani ani;
+
 	u16 cachelsz;
+	u16 curaid;
+	u8 macaddr[ETH_ALEN];
+	u8 curbssid[ETH_ALEN];
+	u8 bssidmask[ETH_ALEN];
+
+	u8 tx_chainmask;
+	u8 rx_chainmask;
+
+	u32 rx_bufsize;
+
+	u32 keymax;
+	DECLARE_BITMAP(keymap, ATH_KEYMAX);
+	u8 splitmic;
+
 	struct ath_regulatory regulatory;
+	const struct ath_ops *ops;
+	const struct ath_bus_ops *bus_ops;
 };
 
 struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
 				u32 len,
 				gfp_t gfp_mask);
 
+void ath_hw_setbssidmask(struct ath_common *common);
+
 #endif /* ATH_H */
diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
index 06d006675d7d..eb83b7b4d0e3 100644
--- a/drivers/net/wireless/ath/ath5k/Kconfig
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -1,6 +1,6 @@
 config ATH5K
 	tristate "Atheros 5xxx wireless cards support"
-	depends on PCI && MAC80211 && WLAN_80211
+	depends on PCI && MAC80211
 	select MAC80211_LEDS
 	select LEDS_CLASS
 	select NEW_LEDS
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 6cd5efcec417..6a2a96761111 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -35,6 +35,7 @@
  * TODO: Make a more generic struct (eg. add more stuff to ath5k_capabilities)
  * and clean up common bits, then introduce set/get functions in eeprom.c */
 #include "eeprom.h"
+#include "../ath.h"
 
 /* PCI IDs */
 #define PCI_DEVICE_ID_ATHEROS_AR5210 		0x0007 /* AR5210 */
@@ -165,13 +166,6 @@
 #define AR5K_INI_VAL_XR			0
 #define AR5K_INI_VAL_MAX		5
 
-/* Used for BSSID etc manipulation */
-#define AR5K_LOW_ID(_a)(				\
-(_a)[0] | (_a)[1] << 8 | (_a)[2] << 16 | (_a)[3] << 24	\
-)
-
-#define AR5K_HIGH_ID(_a)	((_a)[4] | (_a)[5] << 8)
-
 /*
  * Some tuneable values (these should be changeable by the user)
  * TODO: Make use of them and add more options OR use debug/configfs
@@ -204,6 +198,7 @@
 #define AR5K_TUNE_CWMAX_11B			1023
 #define AR5K_TUNE_CWMAX_XR			7
 #define AR5K_TUNE_NOISE_FLOOR			-72
+#define AR5K_TUNE_CCA_MAX_GOOD_VALUE		-95
 #define AR5K_TUNE_MAX_TXPOWER			63
 #define AR5K_TUNE_DEFAULT_TXPOWER		25
 #define AR5K_TUNE_TPC_TXPOWER			false
@@ -1012,6 +1007,14 @@ struct ath5k_capabilities {
 	} cap_queues;
 };
 
+/* size of noise floor history (keep it a power of two) */
+#define ATH5K_NF_CAL_HIST_MAX	8
+struct ath5k_nfcal_hist
+{
+	s16 index;				/* current index into nfval */
+	s16 nfval[ATH5K_NF_CAL_HIST_MAX];	/* last few noise floors */
+};
+
 
 /***************************************\
   HARDWARE ABSTRACTION LAYER STRUCTURE
@@ -1027,6 +1030,7 @@ struct ath5k_capabilities {
 /* TODO: Clean up and merge with ath5k_softc */
 struct ath5k_hw {
 	u32			ah_magic;
+	struct ath_common       common;
 
 	struct ath5k_softc	*ah_sc;
 	void __iomem		*ah_iobase;
@@ -1067,14 +1071,6 @@ struct ath5k_hw {
 	u8			ah_def_ant;
 	bool			ah_software_retry;
 
-	u8			ah_sta_id[ETH_ALEN];
-
-	/* Current BSSID we are trying to assoc to / create.
-	 * This is passed by mac80211 on config_interface() and cached here for
-	 * use in resets */
-	u8			ah_bssid[ETH_ALEN];
-	u8			ah_bssid_mask[ETH_ALEN];
-
 	int			ah_gpio_npins;
 
 	struct ath5k_capabilities ah_capabilities;
@@ -1125,6 +1121,8 @@ struct ath5k_hw {
 		struct ieee80211_channel r_last_channel;
 	} ah_radar;
 
+	struct ath5k_nfcal_hist ah_nfcal_hist;
+
 	/* noise floor from last periodic calibration */
 	s32			ah_noise_floor;
 
@@ -1160,7 +1158,7 @@ struct ath5k_hw {
  */
 
 /* Attach/Detach Functions */
-extern struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc);
+extern int ath5k_hw_attach(struct ath5k_softc *sc);
 extern void ath5k_hw_detach(struct ath5k_hw *ah);
 
 /* LED functions */
@@ -1203,10 +1201,9 @@ extern bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah);
 /* Protocol Control Unit Functions */
 extern int ath5k_hw_set_opmode(struct ath5k_hw *ah);
 /* BSSID Functions */
-extern void ath5k_hw_get_lladdr(struct ath5k_hw *ah, u8 *mac);
 extern int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac);
-extern void ath5k_hw_set_associd(struct ath5k_hw *ah, const u8 *bssid, u16 assoc_id);
-extern int ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask);
+extern void ath5k_hw_set_associd(struct ath5k_hw *ah);
+extern void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask);
 /* Receive start/stop functions */
 extern void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah);
 extern void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
@@ -1288,8 +1285,10 @@ extern int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah);
 extern bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags);
 extern int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel);
 /* PHY calibration */
+void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah);
 extern int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, struct ieee80211_channel *channel);
 extern int ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq);
+extern s16 ath5k_hw_get_noise_floor(struct ath5k_hw *ah);
 extern void ath5k_hw_calibration_poll(struct ath5k_hw *ah);
 /* Spur mitigation */
 bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
@@ -1329,17 +1328,21 @@ static inline unsigned int ath5k_hw_clocktoh(unsigned int clock, bool turbo)
 	return turbo ? (clock / 80) : (clock / 40);
 }
 
-/*
- * Read from a register
- */
+static inline struct ath_common *ath5k_hw_common(struct ath5k_hw *ah)
+{
+        return &ah->common;
+}
+
+static inline struct ath_regulatory *ath5k_hw_regulatory(struct ath5k_hw *ah)
+{
+        return &(ath5k_hw_common(ah)->regulatory);
+}
+
 static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg)
 {
 	return ioread32(ah->ah_iobase + reg);
 }
 
-/*
- * Write to a register
- */
 static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
 {
 	iowrite32(val, ah->ah_iobase + reg);
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index 71a1bd254517..42284445b75e 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -101,25 +101,15 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
  * -ENODEV if the device is not supported or prints an error msg if something
  * else went wrong.
  */
-struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc)
+int ath5k_hw_attach(struct ath5k_softc *sc)
 {
-	struct ath5k_hw *ah;
+	struct ath5k_hw *ah = sc->ah;
+	struct ath_common *common = ath5k_hw_common(ah);
 	struct pci_dev *pdev = sc->pdev;
 	struct ath5k_eeprom_info *ee;
 	int ret;
 	u32 srev;
 
-	/*If we passed the test malloc a ath5k_hw struct*/
-	ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
-	if (ah == NULL) {
-		ret = -ENOMEM;
-		ATH5K_ERR(sc, "out of memory\n");
-		goto err;
-	}
-
-	ah->ah_sc = sc;
-	ah->ah_iobase = sc->iobase;
-
 	/*
 	 * HW information
 	 */
@@ -278,12 +268,12 @@ struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc)
 		goto err_free;
 	}
 
+	ee = &ah->ah_capabilities.cap_eeprom;
+
 	/*
 	 * Write PCI-E power save settings
 	 */
 	if ((ah->ah_version == AR5K_AR5212) && (pdev->is_pcie)) {
-		struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
-
 		ath5k_hw_reg_write(ah, 0x9248fc00, AR5K_PCIE_SERDES);
 		ath5k_hw_reg_write(ah, 0x24924924, AR5K_PCIE_SERDES);
 
@@ -321,7 +311,6 @@ struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc)
 	}
 
 	/* Crypto settings */
-	ee = &ah->ah_capabilities.cap_eeprom;
 	ah->ah_aes_support = srev >= AR5K_SREV_AR5212_V4 &&
 		(ee->ee_version >= AR5K_EEPROM_VERSION_5_0 &&
 		 !AR5K_EEPROM_AES_DIS(ee->ee_misc5));
@@ -336,20 +325,21 @@ struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc)
 	ath5k_hw_set_lladdr(ah, (u8[ETH_ALEN]){});
 
 	/* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */
-	memset(ah->ah_bssid, 0xff, ETH_ALEN);
-	ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
+	memcpy(common->curbssid, ath_bcast_mac, ETH_ALEN);
+	ath5k_hw_set_associd(ah);
 	ath5k_hw_set_opmode(ah);
 
 	ath5k_hw_rfgain_opt_init(ah);
 
+	ath5k_hw_init_nfcal_hist(ah);
+
 	/* turn on HW LEDs */
 	ath5k_hw_set_ledstate(ah, AR5K_LED_INIT);
 
-	return ah;
+	return 0;
 err_free:
 	kfree(ah);
-err:
-	return ERR_PTR(ret);
+	return ret;
 }
 
 /**
@@ -369,5 +359,4 @@ void ath5k_hw_detach(struct ath5k_hw *ah)
 	ath5k_eeprom_detach(ah);
 
 	/* assume interrupts are down */
-	kfree(ah);
 }
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 95a8e232b58f..a4c086f069b1 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -195,12 +195,13 @@ static int __devinit	ath5k_pci_probe(struct pci_dev *pdev,
 				const struct pci_device_id *id);
 static void __devexit	ath5k_pci_remove(struct pci_dev *pdev);
 #ifdef CONFIG_PM
-static int		ath5k_pci_suspend(struct pci_dev *pdev,
-					pm_message_t state);
-static int		ath5k_pci_resume(struct pci_dev *pdev);
+static int		ath5k_pci_suspend(struct device *dev);
+static int		ath5k_pci_resume(struct device *dev);
+
+SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
+#define ATH5K_PM_OPS	(&ath5k_pm_ops)
 #else
-#define ath5k_pci_suspend NULL
-#define ath5k_pci_resume NULL
+#define ATH5K_PM_OPS	NULL
 #endif /* CONFIG_PM */
 
 static struct pci_driver ath5k_pci_driver = {
@@ -208,8 +209,7 @@ static struct pci_driver ath5k_pci_driver = {
 	.id_table	= ath5k_pci_id_table,
 	.probe		= ath5k_pci_probe,
 	.remove		= __devexit_p(ath5k_pci_remove),
-	.suspend	= ath5k_pci_suspend,
-	.resume		= ath5k_pci_resume,
+	.driver.pm	= ATH5K_PM_OPS,
 };
 
 
@@ -323,10 +323,13 @@ static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
 static inline void ath5k_rxbuf_free(struct ath5k_softc *sc,
 				struct ath5k_buf *bf)
 {
+	struct ath5k_hw *ah = sc->ah;
+	struct ath_common *common = ath5k_hw_common(ah);
+
 	BUG_ON(!bf);
 	if (!bf->skb)
 		return;
-	pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize,
+	pci_unmap_single(sc->pdev, bf->skbaddr, common->rx_bufsize,
 			PCI_DMA_FROMDEVICE);
 	dev_kfree_skb_any(bf->skb);
 	bf->skb = NULL;
@@ -437,6 +440,22 @@ ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
 
 	return name;
 }
+static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset)
+{
+	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
+	return ath5k_hw_reg_read(ah, reg_offset);
+}
+
+static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
+{
+	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
+	ath5k_hw_reg_write(ah, val, reg_offset);
+}
+
+static const struct ath_ops ath5k_common_ops = {
+	.read = ath5k_ioread32,
+	.write = ath5k_iowrite32,
+};
 
 static int __devinit
 ath5k_pci_probe(struct pci_dev *pdev,
@@ -444,6 +463,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
 {
 	void __iomem *mem;
 	struct ath5k_softc *sc;
+	struct ath_common *common;
 	struct ieee80211_hw *hw;
 	int ret;
 	u8 csz;
@@ -547,7 +567,6 @@ ath5k_pci_probe(struct pci_dev *pdev,
 	__set_bit(ATH_STAT_INVALID, sc->status);
 
 	sc->iobase = mem; /* So we can unmap it on detach */
-	sc->common.cachelsz = csz << 2; /* convert to bytes */
 	sc->opmode = NL80211_IFTYPE_STATION;
 	sc->bintval = 1000;
 	mutex_init(&sc->lock);
@@ -565,13 +584,28 @@ ath5k_pci_probe(struct pci_dev *pdev,
 		goto err_free;
 	}
 
-	/* Initialize device */
-	sc->ah = ath5k_hw_attach(sc);
-	if (IS_ERR(sc->ah)) {
-		ret = PTR_ERR(sc->ah);
+	/*If we passed the test malloc a ath5k_hw struct*/
+	sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
+	if (!sc->ah) {
+		ret = -ENOMEM;
+		ATH5K_ERR(sc, "out of memory\n");
 		goto err_irq;
 	}
 
+	sc->ah->ah_sc = sc;
+	sc->ah->ah_iobase = sc->iobase;
+	common = ath5k_hw_common(sc->ah);
+	common->ops = &ath5k_common_ops;
+	common->ah = sc->ah;
+	common->hw = hw;
+	common->cachelsz = csz << 2; /* convert to bytes */
+
+	/* Initialize device */
+	ret = ath5k_hw_attach(sc);
+	if (ret) {
+		goto err_free_ah;
+	}
+
 	/* set up multi-rate retry capabilities */
 	if (sc->ah->ah_version == AR5K_AR5212) {
 		hw->max_rates = 4;
@@ -640,6 +674,8 @@ err_ah:
 	ath5k_hw_detach(sc->ah);
 err_irq:
 	free_irq(pdev->irq, sc);
+err_free_ah:
+	kfree(sc->ah);
 err_free:
 	ieee80211_free_hw(hw);
 err_map:
@@ -661,6 +697,7 @@ ath5k_pci_remove(struct pci_dev *pdev)
 	ath5k_debug_finish_device(sc);
 	ath5k_detach(pdev, hw);
 	ath5k_hw_detach(sc->ah);
+	kfree(sc->ah);
 	free_irq(pdev->irq, sc);
 	pci_iounmap(pdev, sc->iobase);
 	pci_release_region(pdev, 0);
@@ -669,33 +706,20 @@ ath5k_pci_remove(struct pci_dev *pdev)
 }
 
 #ifdef CONFIG_PM
-static int
-ath5k_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int ath5k_pci_suspend(struct device *dev)
 {
-	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+	struct ieee80211_hw *hw = pci_get_drvdata(to_pci_dev(dev));
 	struct ath5k_softc *sc = hw->priv;
 
 	ath5k_led_off(sc);
-
-	pci_save_state(pdev);
-	pci_disable_device(pdev);
-	pci_set_power_state(pdev, PCI_D3hot);
-
 	return 0;
 }
 
-static int
-ath5k_pci_resume(struct pci_dev *pdev)
+static int ath5k_pci_resume(struct device *dev)
 {
+	struct pci_dev *pdev = to_pci_dev(dev);
 	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
 	struct ath5k_softc *sc = hw->priv;
-	int err;
-
-	pci_restore_state(pdev);
-
-	err = pci_enable_device(pdev);
-	if (err)
-		return err;
 
 	/*
 	 * Suspend/Resume resets the PCI configuration space, so we have to
@@ -718,7 +742,7 @@ static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *re
 {
 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
 	struct ath5k_softc *sc = hw->priv;
-	struct ath_regulatory *regulatory = &sc->common.regulatory;
+	struct ath_regulatory *regulatory = ath5k_hw_regulatory(sc->ah);
 
 	return ath_reg_notifier_apply(wiphy, request, regulatory);
 }
@@ -728,7 +752,7 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
 {
 	struct ath5k_softc *sc = hw->priv;
 	struct ath5k_hw *ah = sc->ah;
-	struct ath_regulatory *regulatory = &sc->common.regulatory;
+	struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
 	u8 mac[ETH_ALEN] = {};
 	int ret;
 
@@ -815,7 +839,7 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
 
 	SET_IEEE80211_PERM_ADDR(hw, mac);
 	/* All MAC address bits matter for ACKs */
-	memset(sc->bssidmask, 0xff, ETH_ALEN);
+	memcpy(sc->bssidmask, ath_bcast_mac, ETH_ALEN);
 	ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
 
 	regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain;
@@ -1152,24 +1176,26 @@ ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
 static
 struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr)
 {
+	struct ath_common *common = ath5k_hw_common(sc->ah);
 	struct sk_buff *skb;
 
 	/*
 	 * Allocate buffer with headroom_needed space for the
 	 * fake physical layer header at the start.
 	 */
-	skb = ath_rxbuf_alloc(&sc->common,
-			      sc->rxbufsize + sc->common.cachelsz - 1,
+	skb = ath_rxbuf_alloc(common,
+			      common->rx_bufsize,
 			      GFP_ATOMIC);
 
 	if (!skb) {
 		ATH5K_ERR(sc, "can't alloc skbuff of size %u\n",
-				sc->rxbufsize + sc->common.cachelsz - 1);
+				common->rx_bufsize);
 		return NULL;
 	}
 
 	*skb_addr = pci_map_single(sc->pdev,
-		skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE);
+				   skb->data, common->rx_bufsize,
+				   PCI_DMA_FROMDEVICE);
 	if (unlikely(pci_dma_mapping_error(sc->pdev, *skb_addr))) {
 		ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
 		dev_kfree_skb(skb);
@@ -1605,13 +1631,14 @@ static int
 ath5k_rx_start(struct ath5k_softc *sc)
 {
 	struct ath5k_hw *ah = sc->ah;
+	struct ath_common *common = ath5k_hw_common(ah);
 	struct ath5k_buf *bf;
 	int ret;
 
-	sc->rxbufsize = roundup(IEEE80211_MAX_LEN, sc->common.cachelsz);
+	common->rx_bufsize = roundup(IEEE80211_MAX_LEN, common->cachelsz);
 
-	ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rxbufsize %u\n",
-		sc->common.cachelsz, sc->rxbufsize);
+	ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n",
+		  common->cachelsz, common->rx_bufsize);
 
 	spin_lock_bh(&sc->rxbuflock);
 	sc->rxlink = NULL;
@@ -1656,6 +1683,8 @@ static unsigned int
 ath5k_rx_decrypted(struct ath5k_softc *sc, struct ath5k_desc *ds,
 		struct sk_buff *skb, struct ath5k_rx_status *rs)
 {
+	struct ath5k_hw *ah = sc->ah;
+	struct ath_common *common = ath5k_hw_common(ah);
 	struct ieee80211_hdr *hdr = (void *)skb->data;
 	unsigned int keyix, hlen;
 
@@ -1672,7 +1701,7 @@ ath5k_rx_decrypted(struct ath5k_softc *sc, struct ath5k_desc *ds,
 	    skb->len >= hlen + 4) {
 		keyix = skb->data[hlen + 3] >> 6;
 
-		if (test_bit(keyix, sc->keymap))
+		if (test_bit(keyix, common->keymap))
 			return RX_FLAG_DECRYPTED;
 	}
 
@@ -1684,13 +1713,14 @@ static void
 ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb,
 		     struct ieee80211_rx_status *rxs)
 {
+	struct ath_common *common = ath5k_hw_common(sc->ah);
 	u64 tsf, bc_tstamp;
 	u32 hw_tu;
 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
 
 	if (ieee80211_is_beacon(mgmt->frame_control) &&
 	    le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS &&
-	    memcmp(mgmt->bssid, sc->ah->ah_bssid, ETH_ALEN) == 0) {
+	    memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) == 0) {
 		/*
 		 * Received an IBSS beacon with the same BSSID. Hardware *must*
 		 * have updated the local TSF. We have to work around various
@@ -1745,6 +1775,8 @@ ath5k_tasklet_rx(unsigned long data)
 	struct sk_buff *skb, *next_skb;
 	dma_addr_t next_skb_addr;
 	struct ath5k_softc *sc = (void *)data;
+	struct ath5k_hw *ah = sc->ah;
+	struct ath_common *common = ath5k_hw_common(ah);
 	struct ath5k_buf *bf;
 	struct ath5k_desc *ds;
 	int ret;
@@ -1822,7 +1854,7 @@ accept:
 		if (!next_skb)
 			goto next;
 
-		pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize,
+		pci_unmap_single(sc->pdev, bf->skbaddr, common->rx_bufsize,
 				PCI_DMA_FROMDEVICE);
 		skb_put(skb, rs.rs_datalen);
 
@@ -3008,6 +3040,8 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 	      struct ieee80211_key_conf *key)
 {
 	struct ath5k_softc *sc = hw->priv;
+	struct ath5k_hw *ah = sc->ah;
+	struct ath_common *common = ath5k_hw_common(ah);
 	int ret = 0;
 
 	if (modparam_nohwcrypt)
@@ -3040,14 +3074,14 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 			ATH5K_ERR(sc, "can't set the key\n");
 			goto unlock;
 		}
-		__set_bit(key->keyidx, sc->keymap);
+		__set_bit(key->keyidx, common->keymap);
 		key->hw_key_idx = key->keyidx;
 		key->flags |= (IEEE80211_KEY_FLAG_GENERATE_IV |
 			       IEEE80211_KEY_FLAG_GENERATE_MMIC);
 		break;
 	case DISABLE_KEY:
 		ath5k_hw_reset_key(sc->ah, key->keyidx);
-		__clear_bit(key->keyidx, sc->keymap);
+		__clear_bit(key->keyidx, common->keymap);
 		break;
 	default:
 		ret = -EINVAL;
@@ -3176,6 +3210,7 @@ static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
 {
 	struct ath5k_softc *sc = hw->priv;
 	struct ath5k_hw *ah = sc->ah;
+	struct ath_common *common = ath5k_hw_common(ah);
 	unsigned long flags;
 
 	mutex_lock(&sc->lock);
@@ -3184,10 +3219,9 @@ static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
 
 	if (changes & BSS_CHANGED_BSSID) {
 		/* Cache for later use during resets */
-		memcpy(ah->ah_bssid, bss_conf->bssid, ETH_ALEN);
-		/* XXX: assoc id is set to 0 for now, mac80211 doesn't have
-		 * a clean way of letting us retrieve this yet. */
-		ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
+		memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
+		common->curaid = 0;
+		ath5k_hw_set_associd(ah);
 		mmiowb();
 	}
 
@@ -3200,6 +3234,14 @@ static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
 			set_beacon_filter(hw, sc->assoc);
 		ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
 			AR5K_LED_ASSOC : AR5K_LED_INIT);
+		if (bss_conf->assoc) {
+			ATH5K_DBG(sc, ATH5K_DEBUG_ANY,
+				  "Bss Info ASSOC %d, bssid: %pM\n",
+				  bss_conf->aid, common->curbssid);
+			common->curaid = bss_conf->aid;
+			ath5k_hw_set_associd(ah);
+			/* Once ANI is available you would start it here */
+		}
 	}
 
 	if (changes & BSS_CHANGED_BEACON) {
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index a28c42f32c9d..b72338c9bde7 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -115,7 +115,6 @@ struct ath5k_rfkill {
  * associated with an instance of a device */
 struct ath5k_softc {
 	struct pci_dev		*pdev;		/* for dma mapping */
-	struct ath_common	common;
 	void __iomem		*iobase;	/* address of the device */
 	struct mutex		lock;		/* dev-level lock */
 	struct ieee80211_tx_queue_stats tx_stats[AR5K_NUM_TX_QUEUES];
@@ -154,8 +153,6 @@ struct ath5k_softc {
 
 	enum ath5k_int		imask;		/* interrupt mask copy */
 
-	DECLARE_BITMAP(keymap, AR5K_KEYCACHE_SIZE); /* key use bit map */
-
 	u8			bssidmask[ETH_ALEN];
 
 	unsigned int		led_pin,	/* GPIO pin for driving LED */
@@ -202,15 +199,4 @@ struct ath5k_softc {
 #define ath5k_hw_hasveol(_ah) \
 	(ath5k_hw_get_capability(_ah, AR5K_CAP_VEOL, 0, NULL) == 0)
 
-static inline struct ath_common *ath5k_hw_common(struct ath5k_hw *ah)
-{
-	return &ah->ah_sc->common;
-}
-
-static inline struct ath_regulatory *ath5k_hw_regulatory(struct ath5k_hw *ah)
-{
-	return &(ath5k_hw_common(ah)->regulatory);
-
-}
-
 #endif
diff --git a/drivers/net/wireless/ath/ath5k/initvals.c b/drivers/net/wireless/ath/ath5k/initvals.c
index 18eb5190ce4b..8fa439308828 100644
--- a/drivers/net/wireless/ath/ath5k/initvals.c
+++ b/drivers/net/wireless/ath/ath5k/initvals.c
@@ -560,8 +560,8 @@ static const struct ath5k_ini ar5212_ini_common_start[] = {
 	{ AR5K_SLEEP0,		0x0002aaaa },
 	{ AR5K_SLEEP1,		0x02005555 },
 	{ AR5K_SLEEP2,		0x00000000 },
-	{ AR5K_BSS_IDM0,	0xffffffff },
-	{ AR5K_BSS_IDM1,	0x0000ffff },
+	{ AR_BSSMSKL,		0xffffffff },
+	{ AR_BSSMSKU,		0x0000ffff },
 	{ AR5K_TXPC,		0x00000000 },
 	{ AR5K_PROFCNT_TX,	0x00000000 },
 	{ AR5K_PROFCNT_RX,	0x00000000 },
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index b548c8eaaae1..d495890355d9 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -59,6 +59,8 @@ static const struct pci_device_id ath5k_led_devices[] = {
 	{ ATH_SDEVICE(PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID), ATH_LED(1, 1) },
 	/* Acer Aspire One A150 (maximlevitsky@gmail.com) */
 	{ ATH_SDEVICE(PCI_VENDOR_ID_FOXCONN, 0xe008), ATH_LED(3, 0) },
+	/* Acer Aspire One AO531h AO751h (keng-yu.lin@canonical.com) */
+	{ ATH_SDEVICE(PCI_VENDOR_ID_FOXCONN, 0xe00d), ATH_LED(3, 0) },
 	/* Acer Ferrari 5000 (russ.dill@gmail.com) */
 	{ ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0422), ATH_LED(1, 1) },
 	/* E-machines E510 (tuliom@gmail.com) */
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 2942f13c9c4a..64fc1eb9b6d9 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -24,6 +24,8 @@
 * Protocol Control Unit Functions *
 \*********************************/
 
+#include <asm/unaligned.h>
+
 #include "ath5k.h"
 #include "reg.h"
 #include "debug.h"
@@ -44,6 +46,7 @@
  */
 int ath5k_hw_set_opmode(struct ath5k_hw *ah)
 {
+	struct ath_common *common = ath5k_hw_common(ah);
 	u32 pcu_reg, beacon_reg, low_id, high_id;
 
 
@@ -95,8 +98,8 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah)
 	/*
 	 * Set PCU registers
 	 */
-	low_id = AR5K_LOW_ID(ah->ah_sta_id);
-	high_id = AR5K_HIGH_ID(ah->ah_sta_id);
+	low_id = get_unaligned_le32(common->macaddr);
+	high_id = get_unaligned_le16(common->macaddr + 4);
 	ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
 	ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1);
 
@@ -238,28 +241,6 @@ int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
 	return 0;
 }
 
-
-/****************\
-* BSSID handling *
-\****************/
-
-/**
- * ath5k_hw_get_lladdr - Get station id
- *
- * @ah: The &struct ath5k_hw
- * @mac: The card's mac address
- *
- * Initialize ah->ah_sta_id using the mac address provided
- * (just a memcpy).
- *
- * TODO: Remove it once we merge ath5k_softc and ath5k_hw
- */
-void ath5k_hw_get_lladdr(struct ath5k_hw *ah, u8 *mac)
-{
-	ATH5K_TRACE(ah->ah_sc);
-	memcpy(mac, ah->ah_sta_id, ETH_ALEN);
-}
-
 /**
  * ath5k_hw_set_lladdr - Set station id
  *
@@ -270,17 +251,18 @@ void ath5k_hw_get_lladdr(struct ath5k_hw *ah, u8 *mac)
  */
 int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
 {
+	struct ath_common *common = ath5k_hw_common(ah);
 	u32 low_id, high_id;
 	u32 pcu_reg;
 
 	ATH5K_TRACE(ah->ah_sc);
 	/* Set new station ID */
-	memcpy(ah->ah_sta_id, mac, ETH_ALEN);
+	memcpy(common->macaddr, mac, ETH_ALEN);
 
 	pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000;
 
-	low_id = AR5K_LOW_ID(mac);
-	high_id = AR5K_HIGH_ID(mac);
+	low_id = get_unaligned_le32(mac);
+	high_id = get_unaligned_le16(mac + 4);
 
 	ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
 	ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1);
@@ -297,159 +279,51 @@ int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
  *
  * Sets the BSSID which trigers the "SME Join" operation
  */
-void ath5k_hw_set_associd(struct ath5k_hw *ah, const u8 *bssid, u16 assoc_id)
+void ath5k_hw_set_associd(struct ath5k_hw *ah)
 {
-	u32 low_id, high_id;
+	struct ath_common *common = ath5k_hw_common(ah);
 	u16 tim_offset = 0;
 
 	/*
 	 * Set simple BSSID mask on 5212
 	 */
-	if (ah->ah_version == AR5K_AR5212) {
-		ath5k_hw_reg_write(ah, AR5K_LOW_ID(ah->ah_bssid_mask),
-							AR5K_BSS_IDM0);
-		ath5k_hw_reg_write(ah, AR5K_HIGH_ID(ah->ah_bssid_mask),
-							AR5K_BSS_IDM1);
-	}
+	if (ah->ah_version == AR5K_AR5212)
+		ath_hw_setbssidmask(common);
 
 	/*
 	 * Set BSSID which triggers the "SME Join" operation
 	 */
-	low_id = AR5K_LOW_ID(bssid);
-	high_id = AR5K_HIGH_ID(bssid);
-	ath5k_hw_reg_write(ah, low_id, AR5K_BSS_ID0);
-	ath5k_hw_reg_write(ah, high_id | ((assoc_id & 0x3fff) <<
-				AR5K_BSS_ID1_AID_S), AR5K_BSS_ID1);
-
-	if (assoc_id == 0) {
+	ath5k_hw_reg_write(ah,
+			   get_unaligned_le32(common->curbssid),
+			   AR5K_BSS_ID0);
+	ath5k_hw_reg_write(ah,
+			   get_unaligned_le16(common->curbssid + 4) |
+			   ((common->curaid & 0x3fff) << AR5K_BSS_ID1_AID_S),
+			   AR5K_BSS_ID1);
+
+	if (common->curaid == 0) {
 		ath5k_hw_disable_pspoll(ah);
 		return;
 	}
 
 	AR5K_REG_WRITE_BITS(ah, AR5K_BEACON, AR5K_BEACON_TIM,
-			tim_offset ? tim_offset + 4 : 0);
+			    tim_offset ? tim_offset + 4 : 0);
 
 	ath5k_hw_enable_pspoll(ah, NULL, 0);
 }
 
-/**
- * ath5k_hw_set_bssid_mask - filter out bssids we listen
- *
- * @ah: the &struct ath5k_hw
- * @mask: the bssid_mask, a u8 array of size ETH_ALEN
- *
- * BSSID masking is a method used by AR5212 and newer hardware to inform PCU
- * which bits of the interface's MAC address should be looked at when trying
- * to decide which packets to ACK. In station mode and AP mode with a single
- * BSS every bit matters since we lock to only one BSS. In AP mode with
- * multiple BSSes (virtual interfaces) not every bit matters because hw must
- * accept frames for all BSSes and so we tweak some bits of our mac address
- * in order to have multiple BSSes.
- *
- * NOTE: This is a simple filter and does *not* filter out all
- * relevant frames. Some frames that are not for us might get ACKed from us
- * by PCU because they just match the mask.
- *
- * When handling multiple BSSes you can get the BSSID mask by computing the
- * set of  ~ ( MAC XOR BSSID ) for all bssids we handle.
- *
- * When you do this you are essentially computing the common bits of all your
- * BSSes. Later it is assumed the harware will "and" (&) the BSSID mask with
- * the MAC address to obtain the relevant bits and compare the result with
- * (frame's BSSID & mask) to see if they match.
- */
-/*
- * Simple example: on your card you have have two BSSes you have created with
- * BSSID-01 and BSSID-02. Lets assume BSSID-01 will not use the MAC address.
- * There is another BSSID-03 but you are not part of it. For simplicity's sake,
- * assuming only 4 bits for a mac address and for BSSIDs you can then have:
- *
- *                  \
- * MAC:                0001 |
- * BSSID-01:   0100 | --> Belongs to us
- * BSSID-02:   1001 |
- *                  /
- * -------------------
- * BSSID-03:   0110  | --> External
- * -------------------
- *
- * Our bssid_mask would then be:
- *
- *             On loop iteration for BSSID-01:
- *             ~(0001 ^ 0100)  -> ~(0101)
- *                             ->   1010
- *             bssid_mask      =    1010
- *
- *             On loop iteration for BSSID-02:
- *             bssid_mask &= ~(0001   ^   1001)
- *             bssid_mask =   (1010)  & ~(0001 ^ 1001)
- *             bssid_mask =   (1010)  & ~(1001)
- *             bssid_mask =   (1010)  &  (0110)
- *             bssid_mask =   0010
- *
- * A bssid_mask of 0010 means "only pay attention to the second least
- * significant bit". This is because its the only bit common
- * amongst the MAC and all BSSIDs we support. To findout what the real
- * common bit is we can simply "&" the bssid_mask now with any BSSID we have
- * or our MAC address (we assume the hardware uses the MAC address).
- *
- * Now, suppose there's an incoming frame for BSSID-03:
- *
- * IFRAME-01:  0110
- *
- * An easy eye-inspeciton of this already should tell you that this frame
- * will not pass our check. This is beacuse the bssid_mask tells the
- * hardware to only look at the second least significant bit and the
- * common bit amongst the MAC and BSSIDs is 0, this frame has the 2nd LSB
- * as 1, which does not match 0.
- *
- * So with IFRAME-01 we *assume* the hardware will do:
- *
- *     allow = (IFRAME-01 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
- *  --> allow = (0110 & 0010) == (0010 & 0001) ? 1 : 0;
- *  --> allow = (0010) == 0000 ? 1 : 0;
- *  --> allow = 0
- *
- *  Lets now test a frame that should work:
- *
- * IFRAME-02:  0001 (we should allow)
- *
- *     allow = (0001 & 1010) == 1010
- *
- *     allow = (IFRAME-02 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
- *  --> allow = (0001 & 0010) ==  (0010 & 0001) ? 1 :0;
- *  --> allow = (0010) == (0010)
- *  --> allow = 1
- *
- * Other examples:
- *
- * IFRAME-03:  0100 --> allowed
- * IFRAME-04:  1001 --> allowed
- * IFRAME-05:  1101 --> allowed but its not for us!!!
- *
- */
-int ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
+void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
 {
-	u32 low_id, high_id;
+	struct ath_common *common = ath5k_hw_common(ah);
 	ATH5K_TRACE(ah->ah_sc);
 
 	/* Cache bssid mask so that we can restore it
 	 * on reset */
-	memcpy(ah->ah_bssid_mask, mask, ETH_ALEN);
-	if (ah->ah_version == AR5K_AR5212) {
-		low_id = AR5K_LOW_ID(mask);
-		high_id = AR5K_HIGH_ID(mask);
-
-		ath5k_hw_reg_write(ah, low_id, AR5K_BSS_IDM0);
-		ath5k_hw_reg_write(ah, high_id, AR5K_BSS_IDM1);
-
-		return 0;
-	}
-
-	return -EIO;
+	memcpy(common->bssidmask, mask, ETH_ALEN);
+	if (ah->ah_version == AR5K_AR5212)
+		ath_hw_setbssidmask(common);
 }
 
-
 /************\
 * RX Control *
 \************/
@@ -1157,14 +1031,17 @@ int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac)
 	 /* Invalid entry (key table overflow) */
 	AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
 
-	/* MAC may be NULL if it's a broadcast key. In this case no need to
-	 * to compute AR5K_LOW_ID and AR5K_HIGH_ID as we already know it. */
+	/*
+	 * MAC may be NULL if it's a broadcast key. In this case no need to
+	 * to compute get_unaligned_le32 and get_unaligned_le16 as we
+	 * already know it.
+	 */
 	if (!mac) {
 		low_id = 0xffffffff;
 		high_id = 0xffff | AR5K_KEYTABLE_VALID;
 	} else {
-		low_id = AR5K_LOW_ID(mac);
-		high_id = AR5K_HIGH_ID(mac) | AR5K_KEYTABLE_VALID;
+		low_id = get_unaligned_le32(mac);
+		high_id = get_unaligned_le16(mac + 4) | AR5K_KEYTABLE_VALID;
 	}
 
 	ath5k_hw_reg_write(ah, low_id, AR5K_KEYTABLE_MAC0(entry));
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 1a039f2bd732..bbfdcd5e7cb1 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -1124,77 +1124,148 @@ ath5k_hw_calibration_poll(struct ath5k_hw *ah)
 		ah->ah_swi_mask = AR5K_SWI_FULL_CALIBRATION;
 		AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI);
 	}
+}
 
+static int sign_extend(int val, const int nbits)
+{
+	int order = BIT(nbits-1);
+	return (val ^ order) - order;
 }
 
-/**
- * ath5k_hw_noise_floor_calibration - perform PHY noise floor calibration
- *
- * @ah: struct ath5k_hw pointer we are operating on
- * @freq: the channel frequency, just used for error logging
- *
- * This function performs a noise floor calibration of the PHY and waits for
- * it to complete. Then the noise floor value is compared to some maximum
- * noise floor we consider valid.
- *
- * Note that this is different from what the madwifi HAL does: it reads the
- * noise floor and afterwards initiates the calibration. Since the noise floor
- * calibration can take some time to finish, depending on the current channel
- * use, that avoids the occasional timeout warnings we are seeing now.
- *
- * See the following link for an Atheros patent on noise floor calibration:
- * http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL \
- * &p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1=7245893.PN.&OS=PN/7
+static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
+{
+	s32 val;
+
+	val = ath5k_hw_reg_read(ah, AR5K_PHY_NF);
+	return sign_extend(AR5K_REG_MS(val, AR5K_PHY_NF_MINCCA_PWR), 9);
+}
+
+void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
+{
+	int i;
+
+	ah->ah_nfcal_hist.index = 0;
+	for (i = 0; i < ATH5K_NF_CAL_HIST_MAX; i++)
+		ah->ah_nfcal_hist.nfval[i] = AR5K_TUNE_CCA_MAX_GOOD_VALUE;
+}
+
+static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor)
+{
+	struct ath5k_nfcal_hist *hist = &ah->ah_nfcal_hist;
+	hist->index = (hist->index + 1) & (ATH5K_NF_CAL_HIST_MAX-1);
+	hist->nfval[hist->index] = noise_floor;
+}
+
+static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
+{
+	s16 sort[ATH5K_NF_CAL_HIST_MAX];
+	s16 tmp;
+	int i, j;
+
+	memcpy(sort, ah->ah_nfcal_hist.nfval, sizeof(sort));
+	for (i = 0; i < ATH5K_NF_CAL_HIST_MAX - 1; i++) {
+		for (j = 1; j < ATH5K_NF_CAL_HIST_MAX - i; j++) {
+			if (sort[j] > sort[j-1]) {
+				tmp = sort[j];
+				sort[j] = sort[j-1];
+				sort[j-1] = tmp;
+			}
+		}
+	}
+	for (i = 0; i < ATH5K_NF_CAL_HIST_MAX; i++) {
+		ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
+			"cal %d:%d\n", i, sort[i]);
+	}
+	return sort[(ATH5K_NF_CAL_HIST_MAX-1) / 2];
+}
+
+/*
+ * When we tell the hardware to perform a noise floor calibration
+ * by setting the AR5K_PHY_AGCCTL_NF bit, it will periodically
+ * sample-and-hold the minimum noise level seen at the antennas.
+ * This value is then stored in a ring buffer of recently measured
+ * noise floor values so we have a moving window of the last few
+ * samples.
  *
- * XXX: Since during noise floor calibration antennas are detached according to
- * the patent, we should stop tx queues here.
+ * The median of the values in the history is then loaded into the
+ * hardware for its own use for RSSI and CCA measurements.
  */
-int
-ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq)
+void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
 {
-	int ret;
-	unsigned int i;
-	s32 noise_floor;
+	struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+	u32 val;
+	s16 nf, threshold;
+	u8 ee_mode;
 
-	/*
-	 * Enable noise floor calibration
-	 */
-	AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
-				AR5K_PHY_AGCCTL_NF);
+	/* keep last value if calibration hasn't completed */
+	if (ath5k_hw_reg_read(ah, AR5K_PHY_AGCCTL) & AR5K_PHY_AGCCTL_NF) {
+		ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
+			"NF did not complete in calibration window\n");
 
-	ret = ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
-			AR5K_PHY_AGCCTL_NF, 0, false);
-	if (ret) {
-		ATH5K_ERR(ah->ah_sc,
-			"noise floor calibration timeout (%uMHz)\n", freq);
-		return -EAGAIN;
+		return;
 	}
 
-	/* Wait until the noise floor is calibrated and read the value */
-	for (i = 20; i > 0; i--) {
-		mdelay(1);
-		noise_floor = ath5k_hw_reg_read(ah, AR5K_PHY_NF);
-		noise_floor = AR5K_PHY_NF_RVAL(noise_floor);
-		if (noise_floor & AR5K_PHY_NF_ACTIVE) {
-			noise_floor = AR5K_PHY_NF_AVAL(noise_floor);
-
-			if (noise_floor <= AR5K_TUNE_NOISE_FLOOR)
-				break;
-		}
+	switch (ah->ah_current_channel->hw_value & CHANNEL_MODES) {
+	case CHANNEL_A:
+	case CHANNEL_T:
+	case CHANNEL_XR:
+		ee_mode = AR5K_EEPROM_MODE_11A;
+		break;
+	case CHANNEL_G:
+	case CHANNEL_TG:
+		ee_mode = AR5K_EEPROM_MODE_11G;
+		break;
+	default:
+	case CHANNEL_B:
+		ee_mode = AR5K_EEPROM_MODE_11B;
+		break;
 	}
 
-	ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
-		"noise floor %d\n", noise_floor);
 
-	if (noise_floor > AR5K_TUNE_NOISE_FLOOR) {
-		ATH5K_ERR(ah->ah_sc,
-			"noise floor calibration failed (%uMHz)\n", freq);
-		return -EAGAIN;
+	/* completed NF calibration, test threshold */
+	nf = ath5k_hw_read_measured_noise_floor(ah);
+	threshold = ee->ee_noise_floor_thr[ee_mode];
+
+	if (nf > threshold) {
+		ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
+			"noise floor failure detected; "
+			"read %d, threshold %d\n",
+			nf, threshold);
+
+		nf = AR5K_TUNE_CCA_MAX_GOOD_VALUE;
 	}
 
-	ah->ah_noise_floor = noise_floor;
+	ath5k_hw_update_nfcal_hist(ah, nf);
+	nf = ath5k_hw_get_median_noise_floor(ah);
 
-	return 0;
+	/* load noise floor (in .5 dBm) so the hardware will use it */
+	val = ath5k_hw_reg_read(ah, AR5K_PHY_NF) & ~AR5K_PHY_NF_M;
+	val |= (nf * 2) & AR5K_PHY_NF_M;
+	ath5k_hw_reg_write(ah, val, AR5K_PHY_NF);
+
+	AR5K_REG_MASKED_BITS(ah, AR5K_PHY_AGCCTL, AR5K_PHY_AGCCTL_NF,
+		~(AR5K_PHY_AGCCTL_NF_EN | AR5K_PHY_AGCCTL_NF_NOUPDATE));
+
+	ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL, AR5K_PHY_AGCCTL_NF,
+		0, false);
+
+	/*
+	 * Load a high max CCA Power value (-50 dBm in .5 dBm units)
+	 * so that we're not capped by the median we just loaded.
+	 * This will be used as the initial value for the next noise
+	 * floor calibration.
+	 */
+	val = (val & ~AR5K_PHY_NF_M) | ((-50 * 2) & AR5K_PHY_NF_M);
+	ath5k_hw_reg_write(ah, val, AR5K_PHY_NF);
+	AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+		AR5K_PHY_AGCCTL_NF_EN |
+		AR5K_PHY_AGCCTL_NF_NOUPDATE |
+		AR5K_PHY_AGCCTL_NF);
+
+	ah->ah_noise_floor = nf;
+
+	ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
+		"noise floor calibrated: %d\n", nf);
 }
 
 /*
@@ -1287,7 +1358,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
 		return ret;
 	}
 
-	ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
+	ath5k_hw_update_noise_floor(ah);
 
 	/*
 	 * Re-enable RX/TX and beacons
@@ -1328,7 +1399,7 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
 	if (i_coffd == 0 || q_coffd == 0)
 		goto done;
 
-	i_coff = ((-iq_corr) / i_coffd) & 0x3f;
+	i_coff = ((-iq_corr) / i_coffd);
 
 	/* Boundary check */
 	if (i_coff > 31)
@@ -1336,7 +1407,7 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
 	if (i_coff < -32)
 		i_coff = -32;
 
-	q_coff = (((s32)i_pwr / q_coffd) - 128) & 0x1f;
+	q_coff = (((s32)i_pwr / q_coffd) - 128);
 
 	/* Boundary check */
 	if (q_coff > 15)
@@ -1360,7 +1431,7 @@ done:
 	 * since noise floor calibration interrupts rx path while I/Q
 	 * calibration doesn't. We don't need to run noise floor calibration
 	 * as often as I/Q calibration.*/
-	ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
+	ath5k_hw_update_noise_floor(ah);
 
 	/* Initiate a gain_F calibration */
 	ath5k_hw_request_rfgain_probe(ah);
@@ -2954,8 +3025,6 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
 		ATH5K_ERR(ah->ah_sc, "invalid tx power: %u\n", txpower);
 		return -EINVAL;
 	}
-	if (txpower == 0)
-		txpower = AR5K_TUNE_DEFAULT_TXPOWER;
 
 	/* Reset TX power values */
 	memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower));
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index c63ea6afd96f..4cb9c5df9f46 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -35,7 +35,7 @@
  * released by Atheros and on various debug messages found on the net.
  */
 
-
+#include "../reg.h"
 
 /*====MAC DMA REGISTERS====*/
 
@@ -1650,12 +1650,6 @@
 #define AR5K_SLEEP2_DTIM_PER_S		16
 
 /*
- * BSSID mask registers
- */
-#define AR5K_BSS_IDM0			0x80e0	/* Upper bits */
-#define AR5K_BSS_IDM1			0x80e4	/* Lower bits */
-
-/*
  * TX power control (TPC) register
  *
  * XXX: PCDAC steps (0.5dbm) or DBM ?
@@ -2039,17 +2033,14 @@
 #define	AR5K_PHY_AGCCTL_NF_NOUPDATE	0x00020000	/* Don't update nf automaticaly */
 
 /*
- * PHY noise floor status register
+ * PHY noise floor status register (CCA = Clear Channel Assessment)
  */
 #define AR5K_PHY_NF			0x9864			/* Register address */
-#define AR5K_PHY_NF_M			0x000001ff	/* Noise floor mask */
-#define AR5K_PHY_NF_ACTIVE		0x00000100	/* Noise floor calibration still active */
-#define AR5K_PHY_NF_RVAL(_n)		(((_n) >> 19) & AR5K_PHY_NF_M)
-#define AR5K_PHY_NF_AVAL(_n)		(-((_n) ^ AR5K_PHY_NF_M) + 1)
-#define AR5K_PHY_NF_SVAL(_n)		(((_n) & AR5K_PHY_NF_M) | (1 << 9))
+#define AR5K_PHY_NF_M			0x000001ff	/* Noise floor, written to hardware in 1/2 dBm units */
+#define AR5K_PHY_NF_SVAL(_n)           (((_n) & AR5K_PHY_NF_M) | (1 << 9))
 #define	AR5K_PHY_NF_THRESH62		0x0007f000	/* Thresh62 -check ANI patent- (field) */
 #define	AR5K_PHY_NF_THRESH62_S		12
-#define	AR5K_PHY_NF_MINCCA_PWR		0x0ff80000	/* ??? */
+#define	AR5K_PHY_NF_MINCCA_PWR		0x0ff80000	/* Minimum measured noise level, read from hardware in 1 dBm units */
 #define	AR5K_PHY_NF_MINCCA_PWR_S	19
 
 /*
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 34e13c700849..62954fc77869 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -25,6 +25,8 @@
   Reset functions and helpers
 \*****************************/
 
+#include <asm/unaligned.h>
+
 #include <linux/pci.h> 		/* To determine if a card is pci-e */
 #include <linux/log2.h>
 #include "ath5k.h"
@@ -870,6 +872,7 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
 int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
 	struct ieee80211_channel *channel, bool change_channel)
 {
+	struct ath_common *common = ath5k_hw_common(ah);
 	u32 s_seq[10], s_ant, s_led[3], staid1_flags, tsf_up, tsf_lo;
 	u32 phy_tst1;
 	u8 mode, freq, ee_mode, ant[2];
@@ -1171,10 +1174,12 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
 	ath5k_hw_reg_write(ah, s_led[2], AR5K_GPIODO);
 
 	/* Restore sta_id flags and preserve our mac address*/
-	ath5k_hw_reg_write(ah, AR5K_LOW_ID(ah->ah_sta_id),
-						AR5K_STA_ID0);
-	ath5k_hw_reg_write(ah, staid1_flags | AR5K_HIGH_ID(ah->ah_sta_id),
-						AR5K_STA_ID1);
+	ath5k_hw_reg_write(ah,
+			   get_unaligned_le32(common->macaddr),
+			   AR5K_STA_ID0);
+	ath5k_hw_reg_write(ah,
+			   staid1_flags | get_unaligned_le16(common->macaddr + 4),
+			   AR5K_STA_ID1);
 
 
 	/*
@@ -1182,8 +1187,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
 	 */
 
 	/* Restore bssid and bssid mask */
-	/* XXX: add ah->aid once mac80211 gives this to us */
-	ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
+	ath5k_hw_set_associd(ah);
 
 	/* Set PCU config */
 	ath5k_hw_set_opmode(ah);
@@ -1289,7 +1293,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
 	 * out and/or noise floor calibration might timeout.
 	 */
 	AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
-				AR5K_PHY_AGCCTL_CAL);
+				AR5K_PHY_AGCCTL_CAL | AR5K_PHY_AGCCTL_NF);
 
 	/* At the same time start I/Q calibration for QAM constellation
 	 * -no need for CCK- */
@@ -1310,21 +1314,6 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
 			channel->center_freq);
 	}
 
-	/*
-	 * If we run NF calibration before AGC, it always times out.
-	 * Binary HAL starts NF and AGC calibration at the same time
-	 * and only waits for AGC to finish. Also if AGC or NF cal.
-	 * times out, reset doesn't fail on binary HAL. I believe
-	 * that's wrong because since rx path is routed to a detector,
-	 * if cal. doesn't finish we won't have RX. Sam's HAL for AR5210/5211
-	 * enables noise floor calibration after offset calibration and if noise
-	 * floor calibration fails, reset fails. I believe that's
-	 * a better approach, we just need to find a polling interval
-	 * that suits best, even if reset continues we need to make
-	 * sure that rx path is ready.
-	 */
-	ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
-
 	/* Restore antenna mode */
 	ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
 
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index ef5f59c4dd80..03a1106ad725 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -1,9 +1,16 @@
+config ATH9K_HW
+	tristate
+config ATH9K_COMMON
+	tristate
+
 config ATH9K
 	tristate "Atheros 802.11n wireless cards support"
-	depends on PCI && MAC80211 && WLAN_80211
+	depends on PCI && MAC80211
+	select ATH9K_HW
 	select MAC80211_LEDS
 	select LEDS_CLASS
 	select NEW_LEDS
+	select ATH9K_COMMON
 	---help---
 	  This module adds support for wireless adapters based on
 	  Atheros IEEE 802.11n AR5008, AR9001 and AR9002 family
@@ -16,13 +23,12 @@ config ATH9K
 
 	  If you choose to build a module, it'll be called ath9k.
 
-config ATH9K_DEBUG
+config ATH9K_DEBUGFS
 	bool "Atheros ath9k debugging"
 	depends on ATH9K
 	---help---
-	  Say Y, if you need ath9k to display debug messages.
-	  Pass the debug mask as a module parameter:
+	  Say Y, if you need access to ath9k's statistics for
+	  interrupts, rate control, etc.
 
-	  modprobe ath9k debug=0x00000200
+	  Also required for changing debug message flags at run time.
 
-	  Look in ath9k/debug.h for possible debug masks
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index ff2c9a26c10c..4985b2b1b0a9 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -1,22 +1,28 @@
-ath9k-y +=	hw.o \
-		eeprom.o \
-		eeprom_def.o \
-		eeprom_4k.o \
-		eeprom_9287.o \
-		mac.o \
-		calib.o \
-		ani.o \
-		phy.o \
-		beacon.o \
+ath9k-y +=	beacon.o \
 		main.o \
 		recv.o \
 		xmit.o \
 		virtual.o \
-		rc.o \
-		btcoex.o
+		rc.o
 
 ath9k-$(CONFIG_PCI) += pci.o
 ath9k-$(CONFIG_ATHEROS_AR71XX) += ahb.o
-ath9k-$(CONFIG_ATH9K_DEBUG) += debug.o
+ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
 
 obj-$(CONFIG_ATH9K) += ath9k.o
+
+ath9k_hw-y:=	hw.o \
+		eeprom.o \
+		eeprom_def.o \
+		eeprom_4k.o \
+		eeprom_9287.o \
+		calib.o \
+		ani.o \
+		phy.o \
+		btcoex.o \
+		mac.o \
+
+obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
+
+obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o
+ath9k_common-y:=	common.o
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 2ad7d0280f7a..329e6bc137ab 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -22,27 +22,29 @@
 #include "ath9k.h"
 
 /* return bus cachesize in 4B word units */
-static void ath_ahb_read_cachesize(struct ath_softc *sc, int *csz)
+static void ath_ahb_read_cachesize(struct ath_common *common, int *csz)
 {
 	*csz = L1_CACHE_BYTES >> 2;
 }
 
-static void ath_ahb_cleanup(struct ath_softc *sc)
+static void ath_ahb_cleanup(struct ath_common *common)
 {
+	struct ath_softc *sc = (struct ath_softc *)common->priv;
 	iounmap(sc->mem);
 }
 
-static bool ath_ahb_eeprom_read(struct ath_hw *ah, u32 off, u16 *data)
+static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
 {
-	struct ath_softc *sc = ah->ah_sc;
+	struct ath_softc *sc = (struct ath_softc *)common->priv;
 	struct platform_device *pdev = to_platform_device(sc->dev);
 	struct ath9k_platform_data *pdata;
 
 	pdata = (struct ath9k_platform_data *) pdev->dev.platform_data;
 	if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
-		DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-			"%s: flash read failed, offset %08x is out of range\n",
-				__func__, off);
+		ath_print(common, ATH_DBG_FATAL,
+			  "%s: flash read failed, offset %08x "
+			  "is out of range\n",
+			  __func__, off);
 		return false;
 	}
 
@@ -67,6 +69,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
 	int irq;
 	int ret = 0;
 	struct ath_hw *ah;
+	char hw_name[64];
 
 	if (!pdev->dev.platform_data) {
 		dev_err(&pdev->dev, "no platform data specified\n");
@@ -116,10 +119,9 @@ static int ath_ahb_probe(struct platform_device *pdev)
 	sc->hw = hw;
 	sc->dev = &pdev->dev;
 	sc->mem = mem;
-	sc->bus_ops = &ath_ahb_bus_ops;
 	sc->irq = irq;
 
-	ret = ath_init_device(AR5416_AR9100_DEVID, sc, 0x0);
+	ret = ath_init_device(AR5416_AR9100_DEVID, sc, 0x0, &ath_ahb_bus_ops);
 	if (ret) {
 		dev_err(&pdev->dev, "failed to initialize device\n");
 		goto err_free_hw;
@@ -132,14 +134,11 @@ static int ath_ahb_probe(struct platform_device *pdev)
 	}
 
 	ah = sc->sc_ah;
+	ath9k_hw_name(ah, hw_name, sizeof(hw_name));
 	printk(KERN_INFO
-	       "%s: Atheros AR%s MAC/BB Rev:%x, "
-	       "AR%s RF Rev:%x, mem=0x%lx, irq=%d\n",
+	       "%s: %s mem=0x%lx, irq=%d\n",
 	       wiphy_name(hw->wiphy),
-	       ath_mac_bb_name(ah->hw_version.macVersion),
-	       ah->hw_version.macRev,
-	       ath_rf_name((ah->hw_version.analog5GhzRev & AR_RADIO_SREV_MAJOR)),
-	       ah->hw_version.phyRev,
+	       hw_name,
 	       (unsigned long)mem, irq);
 
 	return 0;
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 2b493742ef10..2a0cd64c2bfb 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -14,7 +14,7 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-#include "ath9k.h"
+#include "hw.h"
 
 static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
 					struct ath9k_channel *chan)
@@ -31,8 +31,8 @@ static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
 		}
 	}
 
-	DPRINTF(ah->ah_sc, ATH_DBG_ANI,
-		"No more channel states left. Using channel 0\n");
+	ath_print(ath9k_hw_common(ah), ATH_DBG_ANI,
+		  "No more channel states left. Using channel 0\n");
 
 	return 0;
 }
@@ -41,16 +41,17 @@ static bool ath9k_hw_ani_control(struct ath_hw *ah,
 				 enum ath9k_ani_cmd cmd, int param)
 {
 	struct ar5416AniState *aniState = ah->curani;
+	struct ath_common *common = ath9k_hw_common(ah);
 
 	switch (cmd & ah->ani_function) {
 	case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
 		u32 level = param;
 
 		if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
-			DPRINTF(ah->ah_sc, ATH_DBG_ANI,
-				"level out of range (%u > %u)\n",
-				level,
-				(unsigned)ARRAY_SIZE(ah->totalSizeDesired));
+			ath_print(common, ATH_DBG_ANI,
+				  "level out of range (%u > %u)\n",
+				  level,
+				  (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
 			return false;
 		}
 
@@ -152,10 +153,10 @@ static bool ath9k_hw_ani_control(struct ath_hw *ah,
 		u32 level = param;
 
 		if (level >= ARRAY_SIZE(firstep)) {
-			DPRINTF(ah->ah_sc, ATH_DBG_ANI,
-				"level out of range (%u > %u)\n",
-				level,
-				(unsigned) ARRAY_SIZE(firstep));
+			ath_print(common, ATH_DBG_ANI,
+				  "level out of range (%u > %u)\n",
+				  level,
+				  (unsigned) ARRAY_SIZE(firstep));
 			return false;
 		}
 		REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
@@ -174,11 +175,10 @@ static bool ath9k_hw_ani_control(struct ath_hw *ah,
 		u32 level = param;
 
 		if (level >= ARRAY_SIZE(cycpwrThr1)) {
-			DPRINTF(ah->ah_sc, ATH_DBG_ANI,
-				"level out of range (%u > %u)\n",
-				level,
-				(unsigned)
-				ARRAY_SIZE(cycpwrThr1));
+			ath_print(common, ATH_DBG_ANI,
+				  "level out of range (%u > %u)\n",
+				  level,
+				  (unsigned) ARRAY_SIZE(cycpwrThr1));
 			return false;
 		}
 		REG_RMW_FIELD(ah, AR_PHY_TIMING5,
@@ -194,25 +194,28 @@ static bool ath9k_hw_ani_control(struct ath_hw *ah,
 	case ATH9K_ANI_PRESENT:
 		break;
 	default:
-		DPRINTF(ah->ah_sc, ATH_DBG_ANI,
-			"invalid cmd %u\n", cmd);
+		ath_print(common, ATH_DBG_ANI,
+			  "invalid cmd %u\n", cmd);
 		return false;
 	}
 
-	DPRINTF(ah->ah_sc, ATH_DBG_ANI, "ANI parameters:\n");
-	DPRINTF(ah->ah_sc, ATH_DBG_ANI,
-		"noiseImmunityLevel=%d, spurImmunityLevel=%d, "
-		"ofdmWeakSigDetectOff=%d\n",
-		aniState->noiseImmunityLevel, aniState->spurImmunityLevel,
-		!aniState->ofdmWeakSigDetectOff);
-	DPRINTF(ah->ah_sc, ATH_DBG_ANI,
-		"cckWeakSigThreshold=%d, "
-		"firstepLevel=%d, listenTime=%d\n",
-		aniState->cckWeakSigThreshold, aniState->firstepLevel,
-		aniState->listenTime);
-	DPRINTF(ah->ah_sc, ATH_DBG_ANI,
+	ath_print(common, ATH_DBG_ANI, "ANI parameters:\n");
+	ath_print(common, ATH_DBG_ANI,
+		  "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
+		  "ofdmWeakSigDetectOff=%d\n",
+		  aniState->noiseImmunityLevel,
+		  aniState->spurImmunityLevel,
+		  !aniState->ofdmWeakSigDetectOff);
+	ath_print(common, ATH_DBG_ANI,
+		  "cckWeakSigThreshold=%d, "
+		  "firstepLevel=%d, listenTime=%d\n",
+		  aniState->cckWeakSigThreshold,
+		  aniState->firstepLevel,
+		  aniState->listenTime);
+	ath_print(common, ATH_DBG_ANI,
 		"cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
-		aniState->cycleCount, aniState->ofdmPhyErrCount,
+		aniState->cycleCount,
+		aniState->ofdmPhyErrCount,
 		aniState->cckPhyErrCount);
 
 	return true;
@@ -231,6 +234,7 @@ static void ath9k_hw_update_mibstats(struct ath_hw *ah,
 static void ath9k_ani_restart(struct ath_hw *ah)
 {
 	struct ar5416AniState *aniState;
+	struct ath_common *common = ath9k_hw_common(ah);
 
 	if (!DO_ANI(ah))
 		return;
@@ -240,24 +244,24 @@ static void ath9k_ani_restart(struct ath_hw *ah)
 
 	if (aniState->ofdmTrigHigh > AR_PHY_COUNTMAX) {
 		aniState->ofdmPhyErrBase = 0;
-		DPRINTF(ah->ah_sc, ATH_DBG_ANI,
-			"OFDM Trigger is too high for hw counters\n");
+		ath_print(common, ATH_DBG_ANI,
+			  "OFDM Trigger is too high for hw counters\n");
 	} else {
 		aniState->ofdmPhyErrBase =
 			AR_PHY_COUNTMAX - aniState->ofdmTrigHigh;
 	}
 	if (aniState->cckTrigHigh > AR_PHY_COUNTMAX) {
 		aniState->cckPhyErrBase = 0;
-		DPRINTF(ah->ah_sc, ATH_DBG_ANI,
-			"CCK Trigger is too high for hw counters\n");
+		ath_print(common, ATH_DBG_ANI,
+			  "CCK Trigger is too high for hw counters\n");
 	} else {
 		aniState->cckPhyErrBase =
 			AR_PHY_COUNTMAX - aniState->cckTrigHigh;
 	}
-	DPRINTF(ah->ah_sc, ATH_DBG_ANI,
-		"Writing ofdmbase=%u   cckbase=%u\n",
-		aniState->ofdmPhyErrBase,
-		aniState->cckPhyErrBase);
+	ath_print(common, ATH_DBG_ANI,
+		  "Writing ofdmbase=%u   cckbase=%u\n",
+		  aniState->ofdmPhyErrBase,
+		  aniState->cckPhyErrBase);
 	REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase);
 	REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase);
 	REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
@@ -271,7 +275,7 @@ static void ath9k_ani_restart(struct ath_hw *ah)
 
 static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah)
 {
-	struct ieee80211_conf *conf = &ah->ah_sc->hw->conf;
+	struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
 	struct ar5416AniState *aniState;
 	int32_t rssi;
 
@@ -343,7 +347,7 @@ static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah)
 
 static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah)
 {
-	struct ieee80211_conf *conf = &ah->ah_sc->hw->conf;
+	struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
 	struct ar5416AniState *aniState;
 	int32_t rssi;
 
@@ -464,6 +468,7 @@ void ath9k_ani_reset(struct ath_hw *ah)
 {
 	struct ar5416AniState *aniState;
 	struct ath9k_channel *chan = ah->curchan;
+	struct ath_common *common = ath9k_hw_common(ah);
 	int index;
 
 	if (!DO_ANI(ah))
@@ -475,8 +480,8 @@ void ath9k_ani_reset(struct ath_hw *ah)
 
 	if (DO_ANI(ah) && ah->opmode != NL80211_IFTYPE_STATION
 	    && ah->opmode != NL80211_IFTYPE_ADHOC) {
-		DPRINTF(ah->ah_sc, ATH_DBG_ANI,
-			"Reset ANI state opmode %u\n", ah->opmode);
+		ath_print(common, ATH_DBG_ANI,
+			  "Reset ANI state opmode %u\n", ah->opmode);
 		ah->stats.ast_ani_reset++;
 
 		if (ah->opmode == NL80211_IFTYPE_AP) {
@@ -543,6 +548,7 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah,
 			  struct ath9k_channel *chan)
 {
 	struct ar5416AniState *aniState;
+	struct ath_common *common = ath9k_hw_common(ah);
 	int32_t listenTime;
 	u32 phyCnt1, phyCnt2;
 	u32 ofdmPhyErrCnt, cckPhyErrCnt;
@@ -569,20 +575,22 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah,
 	if (phyCnt1 < aniState->ofdmPhyErrBase ||
 	    phyCnt2 < aniState->cckPhyErrBase) {
 		if (phyCnt1 < aniState->ofdmPhyErrBase) {
-			DPRINTF(ah->ah_sc, ATH_DBG_ANI,
-				"phyCnt1 0x%x, resetting "
-				"counter value to 0x%x\n",
-				phyCnt1, aniState->ofdmPhyErrBase);
+			ath_print(common, ATH_DBG_ANI,
+				  "phyCnt1 0x%x, resetting "
+				  "counter value to 0x%x\n",
+				  phyCnt1,
+				  aniState->ofdmPhyErrBase);
 			REG_WRITE(ah, AR_PHY_ERR_1,
 				  aniState->ofdmPhyErrBase);
 			REG_WRITE(ah, AR_PHY_ERR_MASK_1,
 				  AR_PHY_ERR_OFDM_TIMING);
 		}
 		if (phyCnt2 < aniState->cckPhyErrBase) {
-			DPRINTF(ah->ah_sc, ATH_DBG_ANI,
-				"phyCnt2 0x%x, resetting "
-				"counter value to 0x%x\n",
-				phyCnt2, aniState->cckPhyErrBase);
+			ath_print(common, ATH_DBG_ANI,
+				  "phyCnt2 0x%x, resetting "
+				  "counter value to 0x%x\n",
+				  phyCnt2,
+				  aniState->cckPhyErrBase);
 			REG_WRITE(ah, AR_PHY_ERR_2,
 				  aniState->cckPhyErrBase);
 			REG_WRITE(ah, AR_PHY_ERR_MASK_2,
@@ -621,10 +629,13 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah,
 		}
 	}
 }
+EXPORT_SYMBOL(ath9k_hw_ani_monitor);
 
 void ath9k_enable_mib_counters(struct ath_hw *ah)
 {
-	DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Enable MIB counters\n");
+	struct ath_common *common = ath9k_hw_common(ah);
+
+	ath_print(common, ATH_DBG_ANI, "Enable MIB counters\n");
 
 	ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
 
@@ -640,7 +651,10 @@ void ath9k_enable_mib_counters(struct ath_hw *ah)
 /* Freeze the MIB counters, get the stats and then clear them */
 void ath9k_hw_disable_mib_counters(struct ath_hw *ah)
 {
-	DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Disable MIB counters\n");
+	struct ath_common *common = ath9k_hw_common(ah);
+
+	ath_print(common, ATH_DBG_ANI, "Disable MIB counters\n");
+
 	REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC);
 	ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
 	REG_WRITE(ah, AR_MIBC, AR_MIBC_CMC);
@@ -653,6 +667,7 @@ u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah,
 				  u32 *rxf_pcnt,
 				  u32 *txf_pcnt)
 {
+	struct ath_common *common = ath9k_hw_common(ah);
 	static u32 cycles, rx_clear, rx_frame, tx_frame;
 	u32 good = 1;
 
@@ -662,8 +677,8 @@ u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah,
 	u32 cc = REG_READ(ah, AR_CCCNT);
 
 	if (cycles == 0 || cycles > cc) {
-		DPRINTF(ah->ah_sc, ATH_DBG_ANI,
-			"cycle counter wrap. ExtBusy = 0\n");
+		ath_print(common, ATH_DBG_ANI,
+			  "cycle counter wrap. ExtBusy = 0\n");
 		good = 0;
 	} else {
 		u32 cc_d = cc - cycles;
@@ -742,6 +757,7 @@ void ath9k_hw_procmibevent(struct ath_hw *ah)
 		ath9k_ani_restart(ah);
 	}
 }
+EXPORT_SYMBOL(ath9k_hw_procmibevent);
 
 void ath9k_hw_ani_setup(struct ath_hw *ah)
 {
@@ -762,9 +778,10 @@ void ath9k_hw_ani_setup(struct ath_hw *ah)
 
 void ath9k_hw_ani_init(struct ath_hw *ah)
 {
+	struct ath_common *common = ath9k_hw_common(ah);
 	int i;
 
-	DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Initialize ANI\n");
+	ath_print(common, ATH_DBG_ANI, "Initialize ANI\n");
 
 	memset(ah->ani, 0, sizeof(ah->ani));
 	for (i = 0; i < ARRAY_SIZE(ah->ani); i++) {
@@ -786,11 +803,11 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
 			AR_PHY_COUNTMAX - ATH9K_ANI_CCK_TRIG_HIGH;
 	}
 
-	DPRINTF(ah->ah_sc, ATH_DBG_ANI,
-		"Setting OfdmErrBase = 0x%08x\n",
-		ah->ani[0].ofdmPhyErrBase);
-	DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Setting cckErrBase = 0x%08x\n",
-		ah->ani[0].cckPhyErrBase);
+	ath_print(common, ATH_DBG_ANI,
+		  "Setting OfdmErrBase = 0x%08x\n",
+		  ah->ani[0].ofdmPhyErrBase);
+	ath_print(common, ATH_DBG_ANI, "Setting cckErrBase = 0x%08x\n",
+		  ah->ani[0].cckPhyErrBase);
 
 	REG_WRITE(ah, AR_PHY_ERR_1, ah->ani[0].ofdmPhyErrBase);
 	REG_WRITE(ah, AR_PHY_ERR_2, ah->ani[0].cckPhyErrBase);
@@ -803,7 +820,7 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
 
 void ath9k_hw_ani_disable(struct ath_hw *ah)
 {
-	DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Disabling ANI\n");
+	ath_print(ath9k_hw_common(ah), ATH_DBG_ANI, "Disabling ANI\n");
 
 	ath9k_hw_disable_mib_counters(ah);
 	REG_WRITE(ah, AR_PHY_ERR_1, 0);
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 1d59f10f68da..e2cef2ff5d8f 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -19,14 +19,15 @@
 
 #include <linux/etherdevice.h>
 #include <linux/device.h>
-#include <net/mac80211.h>
 #include <linux/leds.h>
 
-#include "hw.h"
-#include "rc.h"
 #include "debug.h"
-#include "../ath.h"
-#include "btcoex.h"
+#include "common.h"
+
+/*
+ * Header for the ath9k.ko driver core *only* -- hw code nor any other driver
+ * should rely on this file or its contents.
+ */
 
 struct ath_node;
 
@@ -54,15 +55,11 @@ struct ath_node;
 
 #define A_MAX(a, b) ((a) > (b) ? (a) : (b))
 
-#define ASSERT(exp) BUG_ON(!(exp))
-
 #define TSF_TO_TU(_h,_l) \
 	((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
 
 #define	ATH_TXQ_SETUP(sc, i)        ((sc)->tx.txqsetup & (1<<i))
 
-static const u8 ath_bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
 struct ath_config {
 	u32 ath_aggr_prot;
 	u16 txpowlimit;
@@ -103,18 +100,6 @@ enum buffer_type {
 	BUF_XRETRY		= BIT(5),
 };
 
-struct ath_buf_state {
-	int bfs_nframes;
-	u16 bfs_al;
-	u16 bfs_frmlen;
-	int bfs_seqno;
-	int bfs_tidno;
-	int bfs_retries;
-	u8 bf_type;
-	u32 bfs_keyix;
-	enum ath9k_key_type bfs_keytype;
-};
-
 #define bf_nframes      	bf_state.bfs_nframes
 #define bf_al           	bf_state.bfs_al
 #define bf_frmlen       	bf_state.bfs_frmlen
@@ -129,21 +114,6 @@ struct ath_buf_state {
 #define bf_isretried(bf)	(bf->bf_state.bf_type & BUF_RETRY)
 #define bf_isxretried(bf)	(bf->bf_state.bf_type & BUF_XRETRY)
 
-struct ath_buf {
-	struct list_head list;
-	struct ath_buf *bf_lastbf;	/* last buf of this unit (a frame or
-					   an aggregate) */
-	struct ath_buf *bf_next;	/* next subframe in the aggregate */
-	struct sk_buff *bf_mpdu;	/* enclosing frame structure */
-	struct ath_desc *bf_desc;	/* virtual addr of desc */
-	dma_addr_t bf_daddr;		/* physical addr of desc */
-	dma_addr_t bf_buf_addr;		/* physical addr of data buffer */
-	bool bf_stale;
-	u16 bf_flags;
-	struct ath_buf_state bf_state;
-	dma_addr_t bf_dmacontext;
-};
-
 struct ath_descdma {
 	struct ath_desc *dd_desc;
 	dma_addr_t dd_desc_paddr;
@@ -163,13 +133,9 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
 
 #define ATH_MAX_ANTENNA         3
 #define ATH_RXBUF               512
-#define WME_NUM_TID             16
 #define ATH_TXBUF               512
 #define ATH_TXMAXTRY            13
 #define ATH_MGT_TXMAXTRY        4
-#define WME_BA_BMP_SIZE         64
-#define WME_MAX_BA              WME_BA_BMP_SIZE
-#define ATH_TID_MAX_BUFS        (2 * WME_MAX_BA)
 
 #define TID_TO_WME_AC(_tid)				\
 	((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE :	\
@@ -177,12 +143,6 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
 	 (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI :	\
 	 WME_AC_VO)
 
-#define WME_AC_BE   0
-#define WME_AC_BK   1
-#define WME_AC_VI   2
-#define WME_AC_VO   3
-#define WME_NUM_AC  4
-
 #define ADDBA_EXCHANGE_ATTEMPTS    10
 #define ATH_AGGR_DELIM_SZ          4
 #define ATH_AGGR_MINPLEN           256 /* in bytes, minimum packet length */
@@ -191,7 +151,6 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
 /* minimum h/w qdepth to be sustained to maximize aggregation */
 #define ATH_AGGR_MIN_QDEPTH        2
 #define ATH_AMPDU_SUBFRAME_DEFAULT 32
-#define ATH_AMPDU_LIMIT_MAX        (64 * 1024 - 1)
 
 #define IEEE80211_SEQ_SEQ_SHIFT    4
 #define IEEE80211_SEQ_MAX          4096
@@ -238,18 +197,8 @@ struct ath_txq {
 	struct list_head axq_q;
 	spinlock_t axq_lock;
 	u32 axq_depth;
-	u8 axq_aggr_depth;
 	bool stopped;
 	bool axq_tx_inprogress;
-	struct ath_buf *axq_linkbuf;
-
-	/* first desc of the last descriptor that contains CTS */
-	struct ath_desc *axq_lastdsWithCTS;
-
-	/* final desc of the gating desc that determines whether
-	   lastdsWithCTS has been DMA'ed or not */
-	struct ath_desc *axq_gatingds;
-
 	struct list_head axq_acq;
 };
 
@@ -257,30 +206,6 @@ struct ath_txq {
 #define AGGR_ADDBA_COMPLETE  BIT(2)
 #define AGGR_ADDBA_PROGRESS  BIT(3)
 
-struct ath_atx_tid {
-	struct list_head list;
-	struct list_head buf_q;
-	struct ath_node *an;
-	struct ath_atx_ac *ac;
-	struct ath_buf *tx_buf[ATH_TID_MAX_BUFS];
-	u16 seq_start;
-	u16 seq_next;
-	u16 baw_size;
-	int tidno;
-	int baw_head;	/* first un-acked tx buffer */
-	int baw_tail;	/* next unused tx buffer slot */
-	int sched;
-	int paused;
-	u8 state;
-};
-
-struct ath_atx_ac {
-	int sched;
-	int qnum;
-	struct list_head list;
-	struct list_head tid_q;
-};
-
 struct ath_tx_control {
 	struct ath_txq *txq;
 	int if_id;
@@ -291,30 +216,6 @@ struct ath_tx_control {
 #define ATH_TX_XRETRY       0x02
 #define ATH_TX_BAR          0x04
 
-#define ATH_RSSI_LPF_LEN 		10
-#define RSSI_LPF_THRESHOLD		-20
-#define ATH9K_RSSI_BAD			0x80
-#define ATH_RSSI_EP_MULTIPLIER     (1<<7)
-#define ATH_EP_MUL(x, mul)         ((x) * (mul))
-#define ATH_RSSI_IN(x)             (ATH_EP_MUL((x), ATH_RSSI_EP_MULTIPLIER))
-#define ATH_LPF_RSSI(x, y, len) \
-    ((x != ATH_RSSI_DUMMY_MARKER) ? (((x) * ((len) - 1) + (y)) / (len)) : (y))
-#define ATH_RSSI_LPF(x, y) do {                     			\
-    if ((y) >= RSSI_LPF_THRESHOLD)                         		\
-	x = ATH_LPF_RSSI((x), ATH_RSSI_IN((y)), ATH_RSSI_LPF_LEN);  	\
-} while (0)
-#define ATH_EP_RND(x, mul) 						\
-	((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
-
-struct ath_node {
-	struct ath_softc *an_sc;
-	struct ath_atx_tid tid[WME_NUM_TID];
-	struct ath_atx_ac ac[WME_NUM_AC];
-	u16 maxampdu;
-	u8 mpdudensity;
-	int last_rssi;
-};
-
 struct ath_tx {
 	u16 seq_no;
 	u32 txqsetup;
@@ -329,7 +230,6 @@ struct ath_rx {
 	u8 defant;
 	u8 rxotherant;
 	u32 *rxlink;
-	int bufsize;
 	unsigned int rxfilter;
 	spinlock_t rxflushlock;
 	spinlock_t rxbuflock;
@@ -427,9 +327,9 @@ struct ath_beacon {
 
 void ath_beacon_tasklet(unsigned long data);
 void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif);
-int ath_beaconq_setup(struct ath_hw *ah);
 int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif);
 void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp);
+int ath_beaconq_config(struct ath_softc *sc);
 
 /*******/
 /* ANI */
@@ -441,14 +341,24 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp);
 #define ATH_LONG_CALINTERVAL      30000   /* 30 seconds */
 #define ATH_RESTART_CALINTERVAL   1200000 /* 20 minutes */
 
-struct ath_ani {
-	bool caldone;
-	int16_t noise_floor;
-	unsigned int longcal_timer;
-	unsigned int shortcal_timer;
-	unsigned int resetcal_timer;
-	unsigned int checkani_timer;
-	struct timer_list timer;
+/* Defines the BT AR_BT_COEX_WGHT used */
+enum ath_stomp_type {
+	ATH_BTCOEX_NO_STOMP,
+	ATH_BTCOEX_STOMP_ALL,
+	ATH_BTCOEX_STOMP_LOW,
+	ATH_BTCOEX_STOMP_NONE
+};
+
+struct ath_btcoex {
+	bool hw_timer_enabled;
+	spinlock_t btcoex_lock;
+	struct timer_list period_timer; /* Timer for BT period */
+	u32 bt_priority_cnt;
+	unsigned long bt_priority_time;
+	int bt_stomp_type; /* Types of BT stomping */
+	u32 btcoex_no_stomp; /* in usec */
+	u32 btcoex_period; /* in usec */
+	struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
 };
 
 /********************/
@@ -484,25 +394,13 @@ struct ath_led {
  * Used when PCI device not fully initialized by bootrom/BIOS
 */
 #define DEFAULT_CACHELINE       32
-#define	ATH_DEFAULT_NOISE_FLOOR -95
 #define ATH_REGCLASSIDS_MAX     10
 #define ATH_CABQ_READY_TIME     80      /* % of beacon interval */
 #define ATH_MAX_SW_RETRIES      10
 #define ATH_CHAN_MAX            255
 #define IEEE80211_WEP_NKID      4       /* number of key ids */
 
-/*
- * The key cache is used for h/w cipher state and also for
- * tracking station state such as the current tx antenna.
- * We also setup a mapping table between key cache slot indices
- * and station state to short-circuit node lookups on rx.
- * Different parts have different size key caches.  We handle
- * up to ATH_KEYMAX entries (could dynamically allocate state).
- */
-#define	ATH_KEYMAX	        128     /* max key cache size we handle */
-
 #define ATH_TXPOWER_MAX         100     /* .5 dBm units */
-#define ATH_RSSI_DUMMY_MARKER   0x127
 #define ATH_RATE_DUMMY_MARKER   0
 
 #define SC_OP_INVALID           BIT(0)
@@ -522,23 +420,17 @@ struct ath_led {
 #define SC_OP_WAIT_FOR_PSPOLL_DATA BIT(17)
 #define SC_OP_WAIT_FOR_TX_ACK   BIT(18)
 #define SC_OP_BEACON_SYNC       BIT(19)
-#define SC_OP_BTCOEX_ENABLED    BIT(20)
 #define SC_OP_BT_PRIORITY_DETECTED BIT(21)
-
-struct ath_bus_ops {
-	void		(*read_cachesize)(struct ath_softc *sc, int *csz);
-	void		(*cleanup)(struct ath_softc *sc);
-	bool		(*eeprom_read)(struct ath_hw *ah, u32 off, u16 *data);
-};
+#define SC_OP_NULLFUNC_COMPLETED BIT(22)
+#define SC_OP_PS_ENABLED	BIT(23)
 
 struct ath_wiphy;
+struct ath_rate_table;
 
 struct ath_softc {
 	struct ieee80211_hw *hw;
 	struct device *dev;
 
-	struct ath_common common;
-
 	spinlock_t wiphy_lock; /* spinlock to protect ath_wiphy data */
 	struct ath_wiphy *pri_wiphy;
 	struct ath_wiphy **sec_wiphy; /* secondary wiphys (virtual radios); may
@@ -565,32 +457,21 @@ struct ath_softc {
 	spinlock_t sc_pm_lock;
 	struct mutex mutex;
 
-	u8 curbssid[ETH_ALEN];
-	u8 bssidmask[ETH_ALEN];
 	u32 intrstatus;
 	u32 sc_flags; /* SC_OP_* */
 	u16 curtxpow;
-	u16 curaid;
 	u8 nbcnvifs;
 	u16 nvifs;
-	u8 tx_chainmask;
-	u8 rx_chainmask;
-	u32 keymax;
-	DECLARE_BITMAP(keymap, ATH_KEYMAX);
-	u8 splitmic;
 	bool ps_enabled;
 	unsigned long ps_usecount;
 	enum ath9k_int imask;
-	enum ath9k_ht_extprotspacing ht_extprotspacing;
-	enum ath9k_ht_macmode tx_chan_width;
 
 	struct ath_config config;
 	struct ath_rx rx;
 	struct ath_tx tx;
 	struct ath_beacon beacon;
-	struct ieee80211_rate rates[IEEE80211_NUM_BANDS][ATH_RATE_MAX];
-	const struct ath_rate_table *hw_rate_table[ATH9K_MODE_MAX];
 	const struct ath_rate_table *cur_rate_table;
+	enum wireless_mode cur_rate_mode;
 	struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
 
 	struct ath_led radio_led;
@@ -605,14 +486,12 @@ struct ath_softc {
 
 	int beacon_interval;
 
-	struct ath_ani ani;
-#ifdef CONFIG_ATH9K_DEBUG
+#ifdef CONFIG_ATH9K_DEBUGFS
 	struct ath9k_debug debug;
 #endif
-	struct ath_bus_ops *bus_ops;
 	struct ath_beacon_config cur_beacon_conf;
 	struct delayed_work tx_complete_work;
-	struct ath_btcoex_info btcoex_info;
+	struct ath_btcoex btcoex;
 };
 
 struct ath_wiphy {
@@ -625,6 +504,7 @@ struct ath_wiphy {
 		ATH_WIPHY_PAUSED,
 		ATH_WIPHY_SCAN,
 	} state;
+	bool idle;
 	int chan_idx;
 	int chan_is_ht;
 };
@@ -634,31 +514,22 @@ int ath_get_hal_qnum(u16 queue, struct ath_softc *sc);
 int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
 int ath_cabq_update(struct ath_softc *);
 
-static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah)
-{
-	return &ah->ah_sc->common;
-}
-
-static inline struct ath_regulatory *ath9k_hw_regulatory(struct ath_hw *ah)
+static inline void ath_read_cachesize(struct ath_common *common, int *csz)
 {
-	return &(ath9k_hw_common(ah)->regulatory);
+	common->bus_ops->read_cachesize(common, csz);
 }
 
-static inline void ath_read_cachesize(struct ath_softc *sc, int *csz)
+static inline void ath_bus_cleanup(struct ath_common *common)
 {
-	sc->bus_ops->read_cachesize(sc, csz);
-}
-
-static inline void ath_bus_cleanup(struct ath_softc *sc)
-{
-	sc->bus_ops->cleanup(sc);
+	common->bus_ops->cleanup(common);
 }
 
 extern struct ieee80211_ops ath9k_ops;
 
 irqreturn_t ath_isr(int irq, void *dev);
 void ath_cleanup(struct ath_softc *sc);
-int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid);
+int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
+		    const struct ath_bus_ops *bus_ops);
 void ath_detach(struct ath_softc *sc);
 const char *ath_mac_bb_name(u32 mac_bb_version);
 const char *ath_rf_name(u16 rf_version);
@@ -668,8 +539,9 @@ void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
 void ath_update_chainmask(struct ath_softc *sc, int is_ht);
 int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
 		    struct ath9k_channel *hchan);
-void ath_radio_enable(struct ath_softc *sc);
-void ath_radio_disable(struct ath_softc *sc);
+
+void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw);
+void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw);
 
 #ifdef CONFIG_PCI
 int ath_pci_init(void);
@@ -705,9 +577,10 @@ void ath9k_wiphy_pause_all_forced(struct ath_softc *sc,
 bool ath9k_wiphy_scanning(struct ath_softc *sc);
 void ath9k_wiphy_work(struct work_struct *work);
 bool ath9k_all_wiphys_idle(struct ath_softc *sc);
+void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle);
 
-void ath9k_iowrite32(struct ath_hw *ah, u32 reg_offset, u32 val);
-unsigned int ath9k_ioread32(struct ath_hw *ah, u32 reg_offset);
+void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue);
+void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue);
 
 int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype);
 #endif /* ATH9K_H */
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 45c4ea57616b..1660ef17aaf5 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -23,10 +23,12 @@
  *  the operating mode of the station (AP or AdHoc).  Parameters are AIFS
  *  settings and channel width min/max
 */
-static int ath_beaconq_config(struct ath_softc *sc)
+int ath_beaconq_config(struct ath_softc *sc)
 {
 	struct ath_hw *ah = sc->sc_ah;
-	struct ath9k_tx_queue_info qi;
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath9k_tx_queue_info qi, qi_be;
+	int qnum;
 
 	ath9k_hw_get_txq_props(ah, sc->beacon.beaconq, &qi);
 	if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
@@ -36,14 +38,17 @@ static int ath_beaconq_config(struct ath_softc *sc)
 		qi.tqi_cwmax = 0;
 	} else {
 		/* Adhoc mode; important thing is to use 2x cwmin. */
-		qi.tqi_aifs = sc->beacon.beacon_qi.tqi_aifs;
-		qi.tqi_cwmin = 2*sc->beacon.beacon_qi.tqi_cwmin;
-		qi.tqi_cwmax = sc->beacon.beacon_qi.tqi_cwmax;
+		qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA,
+				       ATH9K_WME_AC_BE);
+		ath9k_hw_get_txq_props(ah, qnum, &qi_be);
+		qi.tqi_aifs = qi_be.tqi_aifs;
+		qi.tqi_cwmin = 4*qi_be.tqi_cwmin;
+		qi.tqi_cwmax = qi_be.tqi_cwmax;
 	}
 
 	if (!ath9k_hw_set_txq_props(ah, sc->beacon.beaconq, &qi)) {
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"Unable to update h/w beacon queue parameters\n");
+		ath_print(common, ATH_DBG_FATAL,
+			  "Unable to update h/w beacon queue parameters\n");
 		return 0;
 	} else {
 		ath9k_hw_resettxqueue(ah, sc->beacon.beaconq);
@@ -61,11 +66,12 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
 {
 	struct sk_buff *skb = bf->bf_mpdu;
 	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
 	struct ath_desc *ds;
 	struct ath9k_11n_rate_series series[4];
-	const struct ath_rate_table *rt;
 	int flags, antenna, ctsrate = 0, ctsduration = 0;
-	u8 rate;
+	struct ieee80211_supported_band *sband;
+	u8 rate = 0;
 
 	ds = bf->bf_desc;
 	flags = ATH9K_TXDESC_NOACK;
@@ -89,10 +95,10 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
 
 	ds->ds_data = bf->bf_buf_addr;
 
-	rt = sc->cur_rate_table;
-	rate = rt->info[0].ratecode;
+	sband = &sc->sbands[common->hw->conf.channel->band];
+	rate = sband->bitrates[0].hw_value;
 	if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
-		rate |= rt->info[0].short_preamble;
+		rate |= sband->bitrates[0].hw_value_short;
 
 	ath9k_hw_set11n_txdesc(ah, ds, skb->len + FCS_LEN,
 			       ATH9K_PKT_TYPE_BEACON,
@@ -108,7 +114,7 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
 	memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
 	series[0].Tries = 1;
 	series[0].Rate = rate;
-	series[0].ChSel = sc->tx_chainmask;
+	series[0].ChSel = common->tx_chainmask;
 	series[0].RateFlags = (ctsrate) ? ATH9K_RATESERIES_RTS_CTS : 0;
 	ath9k_hw_set11n_ratescenario(ah, ds, ds, 0, ctsrate, ctsduration,
 				     series, 4, 0);
@@ -119,6 +125,7 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
 {
 	struct ath_wiphy *aphy = hw->priv;
 	struct ath_softc *sc = aphy->sc;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct ath_buf *bf;
 	struct ath_vif *avp;
 	struct sk_buff *skb;
@@ -172,7 +179,8 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
 	if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
 		dev_kfree_skb_any(skb);
 		bf->bf_mpdu = NULL;
-		DPRINTF(sc, ATH_DBG_FATAL, "dma_mapping_error on beaconing\n");
+		ath_print(common, ATH_DBG_FATAL,
+			  "dma_mapping_error on beaconing\n");
 		return NULL;
 	}
 
@@ -192,8 +200,8 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
 
 	if (skb && cabq_depth) {
 		if (sc->nvifs > 1) {
-			DPRINTF(sc, ATH_DBG_BEACON,
-				"Flushing previous cabq traffic\n");
+			ath_print(common, ATH_DBG_BEACON,
+				  "Flushing previous cabq traffic\n");
 			ath_draintxq(sc, cabq, false);
 		}
 	}
@@ -216,6 +224,7 @@ static void ath_beacon_start_adhoc(struct ath_softc *sc,
 				   struct ieee80211_vif *vif)
 {
 	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
 	struct ath_buf *bf;
 	struct ath_vif *avp;
 	struct sk_buff *skb;
@@ -233,25 +242,14 @@ static void ath_beacon_start_adhoc(struct ath_softc *sc,
 	/* NB: caller is known to have already stopped tx dma */
 	ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bf->bf_daddr);
 	ath9k_hw_txstart(ah, sc->beacon.beaconq);
-	DPRINTF(sc, ATH_DBG_BEACON, "TXDP%u = %llx (%p)\n",
-		sc->beacon.beaconq, ito64(bf->bf_daddr), bf->bf_desc);
-}
-
-int ath_beaconq_setup(struct ath_hw *ah)
-{
-	struct ath9k_tx_queue_info qi;
-
-	memset(&qi, 0, sizeof(qi));
-	qi.tqi_aifs = 1;
-	qi.tqi_cwmin = 0;
-	qi.tqi_cwmax = 0;
-	/* NB: don't enable any interrupts */
-	return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
+	ath_print(common, ATH_DBG_BEACON, "TXDP%u = %llx (%p)\n",
+		  sc->beacon.beaconq, ito64(bf->bf_daddr), bf->bf_desc);
 }
 
 int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
 {
 	struct ath_softc *sc = aphy->sc;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct ath_vif *avp;
 	struct ath_buf *bf;
 	struct sk_buff *skb;
@@ -309,7 +307,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
 	/* NB: the beacon data buffer must be 32-bit aligned. */
 	skb = ieee80211_beacon_get(sc->hw, vif);
 	if (skb == NULL) {
-		DPRINTF(sc, ATH_DBG_BEACON, "cannot get skb\n");
+		ath_print(common, ATH_DBG_BEACON, "cannot get skb\n");
 		return -ENOMEM;
 	}
 
@@ -333,9 +331,10 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
 		tsfadjust = intval * avp->av_bslot / ATH_BCBUF;
 		avp->tsf_adjust = cpu_to_le64(TU_TO_USEC(tsfadjust));
 
-		DPRINTF(sc, ATH_DBG_BEACON,
-			"stagger beacons, bslot %d intval %u tsfadjust %llu\n",
-			avp->av_bslot, intval, (unsigned long long)tsfadjust);
+		ath_print(common, ATH_DBG_BEACON,
+			  "stagger beacons, bslot %d intval "
+			  "%u tsfadjust %llu\n",
+			  avp->av_bslot, intval, (unsigned long long)tsfadjust);
 
 		((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp =
 			avp->tsf_adjust;
@@ -349,8 +348,8 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
 	if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
 		dev_kfree_skb_any(skb);
 		bf->bf_mpdu = NULL;
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"dma_mapping_error on beacon alloc\n");
+		ath_print(common, ATH_DBG_FATAL,
+			  "dma_mapping_error on beacon alloc\n");
 		return -ENOMEM;
 	}
 
@@ -386,6 +385,7 @@ void ath_beacon_tasklet(unsigned long data)
 {
 	struct ath_softc *sc = (struct ath_softc *)data;
 	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
 	struct ath_buf *bf = NULL;
 	struct ieee80211_vif *vif;
 	struct ath_wiphy *aphy;
@@ -405,12 +405,12 @@ void ath_beacon_tasklet(unsigned long data)
 		sc->beacon.bmisscnt++;
 
 		if (sc->beacon.bmisscnt < BSTUCK_THRESH) {
-			DPRINTF(sc, ATH_DBG_BEACON,
-				"missed %u consecutive beacons\n",
-				sc->beacon.bmisscnt);
+			ath_print(common, ATH_DBG_BEACON,
+				  "missed %u consecutive beacons\n",
+				  sc->beacon.bmisscnt);
 		} else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
-			DPRINTF(sc, ATH_DBG_BEACON,
-				"beacon is officially stuck\n");
+			ath_print(common, ATH_DBG_BEACON,
+				  "beacon is officially stuck\n");
 			sc->sc_flags |= SC_OP_TSF_RESET;
 			ath_reset(sc, false);
 		}
@@ -419,9 +419,9 @@ void ath_beacon_tasklet(unsigned long data)
 	}
 
 	if (sc->beacon.bmisscnt != 0) {
-		DPRINTF(sc, ATH_DBG_BEACON,
-			"resume beacon xmit after %u misses\n",
-			sc->beacon.bmisscnt);
+		ath_print(common, ATH_DBG_BEACON,
+			  "resume beacon xmit after %u misses\n",
+			  sc->beacon.bmisscnt);
 		sc->beacon.bmisscnt = 0;
 	}
 
@@ -447,9 +447,9 @@ void ath_beacon_tasklet(unsigned long data)
 	vif = sc->beacon.bslot[slot];
 	aphy = sc->beacon.bslot_aphy[slot];
 
-	DPRINTF(sc, ATH_DBG_BEACON,
-		"slot %d [tsf %llu tsftu %u intval %u] vif %p\n",
-		slot, tsf, tsftu, intval, vif);
+	ath_print(common, ATH_DBG_BEACON,
+		  "slot %d [tsf %llu tsftu %u intval %u] vif %p\n",
+		  slot, tsf, tsftu, intval, vif);
 
 	bfaddr = 0;
 	if (vif) {
@@ -490,7 +490,7 @@ void ath_beacon_tasklet(unsigned long data)
 		 * are still pending on the queue.
 		 */
 		if (!ath9k_hw_stoptxdma(ah, sc->beacon.beaconq)) {
-			DPRINTF(sc, ATH_DBG_FATAL,
+			ath_print(common, ATH_DBG_FATAL,
 				"beacon queue %u did not stop?\n", sc->beacon.beaconq);
 		}
 
@@ -502,6 +502,19 @@ void ath_beacon_tasklet(unsigned long data)
 	}
 }
 
+static void ath9k_beacon_init(struct ath_softc *sc,
+			      u32 next_beacon,
+			      u32 beacon_period)
+{
+	if (beacon_period & ATH9K_BEACON_RESET_TSF)
+		ath9k_ps_wakeup(sc);
+
+	ath9k_hw_beaconinit(sc->sc_ah, next_beacon, beacon_period);
+
+	if (beacon_period & ATH9K_BEACON_RESET_TSF)
+		ath9k_ps_restore(sc);
+}
+
 /*
  * For multi-bss ap support beacons are either staggered evenly over N slots or
  * burst together.  For the former arrange for the SWBA to be delivered for each
@@ -534,7 +547,7 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
 	/* Set the computed AP beacon timers */
 
 	ath9k_hw_set_interrupts(sc->sc_ah, 0);
-	ath9k_hw_beaconinit(sc->sc_ah, nexttbtt, intval);
+	ath9k_beacon_init(sc, nexttbtt, intval);
 	sc->beacon.bmisscnt = 0;
 	ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
 
@@ -555,6 +568,7 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
 static void ath_beacon_config_sta(struct ath_softc *sc,
 				  struct ath_beacon_config *conf)
 {
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct ath9k_beacon_state bs;
 	int dtimperiod, dtimcount, sleepduration;
 	int cfpperiod, cfpcount;
@@ -651,11 +665,11 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
 	/* TSF out of range threshold fixed at 1 second */
 	bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
 
-	DPRINTF(sc, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
-	DPRINTF(sc, ATH_DBG_BEACON,
-		"bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
-		bs.bs_bmissthreshold, bs.bs_sleepduration,
-		bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
+	ath_print(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
+	ath_print(common, ATH_DBG_BEACON,
+		  "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
+		  bs.bs_bmissthreshold, bs.bs_sleepduration,
+		  bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
 
 	/* Set the computed STA beacon timers */
 
@@ -669,6 +683,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
 				    struct ath_beacon_config *conf,
 				    struct ieee80211_vif *vif)
 {
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	u64 tsf;
 	u32 tsftu, intval, nexttbtt;
 
@@ -689,9 +704,9 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
 		nexttbtt += intval;
 	} while (nexttbtt < tsftu);
 
-	DPRINTF(sc, ATH_DBG_BEACON,
-		"IBSS nexttbtt %u intval %u (%u)\n",
-		nexttbtt, intval, conf->beacon_interval);
+	ath_print(common, ATH_DBG_BEACON,
+		  "IBSS nexttbtt %u intval %u (%u)\n",
+		  nexttbtt, intval, conf->beacon_interval);
 
 	/*
 	 * In IBSS mode enable the beacon timers but only enable SWBA interrupts
@@ -707,7 +722,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
 	/* Set the computed ADHOC beacon timers */
 
 	ath9k_hw_set_interrupts(sc->sc_ah, 0);
-	ath9k_hw_beaconinit(sc->sc_ah, nexttbtt, intval);
+	ath9k_beacon_init(sc, nexttbtt, intval);
 	sc->beacon.bmisscnt = 0;
 	ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
 
@@ -719,6 +734,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
 void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
 {
 	struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	enum nl80211_iftype iftype;
 
 	/* Setup the beacon configuration parameters */
@@ -759,8 +775,8 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
 		ath_beacon_config_sta(sc, cur_conf);
 		break;
 	default:
-		DPRINTF(sc, ATH_DBG_CONFIG,
-			"Unsupported beaconing mode\n");
+		ath_print(common, ATH_DBG_CONFIG,
+			  "Unsupported beaconing mode\n");
 		return;
 	}
 
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 55f607b7699e..fb4ac15f3b93 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -14,10 +14,26 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-#include "ath9k.h"
+#include "hw.h"
 
-static const struct ath_btcoex_config ath_bt_config = { 0, true, true,
-			ATH_BT_COEX_MODE_SLOTTED, true, true, 2, 5, true };
+enum ath_bt_mode {
+	ATH_BT_COEX_MODE_LEGACY,        /* legacy rx_clear mode */
+	ATH_BT_COEX_MODE_UNSLOTTED,     /* untimed/unslotted mode */
+	ATH_BT_COEX_MODE_SLOTTED,       /* slotted mode */
+	ATH_BT_COEX_MODE_DISALBED,      /* coexistence disabled */
+};
+
+struct ath_btcoex_config {
+	u8 bt_time_extend;
+	bool bt_txstate_extend;
+	bool bt_txframe_extend;
+	enum ath_bt_mode bt_mode; /* coexistence mode */
+	bool bt_quiet_collision;
+	bool bt_rxclear_polarity; /* invert rx_clear as WLAN_ACTIVE*/
+	u8 bt_priority_time;
+	u8 bt_first_slot_time;
+	bool bt_hold_rx_clear;
+};
 
 static const u16 ath_subsysid_tbl[] = {
 	AR9280_COEX2WIRE_SUBSYSID,
@@ -29,141 +45,38 @@ static const u16 ath_subsysid_tbl[] = {
  * Checks the subsystem id of the device to see if it
  * supports btcoex
  */
-bool ath_btcoex_supported(u16 subsysid)
+bool ath9k_hw_btcoex_supported(struct ath_hw *ah)
 {
 	int i;
 
-	if (!subsysid)
+	if (!ah->hw_version.subsysid)
 		return false;
 
 	for (i = 0; i < ARRAY_SIZE(ath_subsysid_tbl); i++)
-		if (subsysid == ath_subsysid_tbl[i])
+		if (ah->hw_version.subsysid == ath_subsysid_tbl[i])
 			return true;
 
 	return false;
 }
 
-/*
- * Detects if there is any priority bt traffic
- */
-static void ath_detect_bt_priority(struct ath_softc *sc)
-{
-	struct ath_btcoex_info *btinfo = &sc->btcoex_info;
-
-	if (ath9k_hw_gpio_get(sc->sc_ah, btinfo->btpriority_gpio))
-		btinfo->bt_priority_cnt++;
-
-	if (time_after(jiffies, btinfo->bt_priority_time +
-			msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
-		if (btinfo->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
-			DPRINTF(sc, ATH_DBG_BTCOEX,
-				"BT priority traffic detected");
-			sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
-		} else {
-			sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
-		}
-
-		btinfo->bt_priority_cnt = 0;
-		btinfo->bt_priority_time = jiffies;
-	}
-}
-
-/*
- * Configures appropriate weight based on stomp type.
- */
-static void ath_btcoex_bt_stomp(struct ath_softc *sc,
-				struct ath_btcoex_info *btinfo,
-				int stomp_type)
-{
-
-	switch (stomp_type) {
-	case ATH_BTCOEX_STOMP_ALL:
-		ath_btcoex_set_weight(btinfo, AR_BT_COEX_WGHT,
-				      AR_STOMP_ALL_WLAN_WGHT);
-		break;
-	case ATH_BTCOEX_STOMP_LOW:
-		ath_btcoex_set_weight(btinfo, AR_BT_COEX_WGHT,
-				      AR_STOMP_LOW_WLAN_WGHT);
-		break;
-	case ATH_BTCOEX_STOMP_NONE:
-		ath_btcoex_set_weight(btinfo, AR_BT_COEX_WGHT,
-				      AR_STOMP_NONE_WLAN_WGHT);
-		break;
-	default:
-		DPRINTF(sc, ATH_DBG_BTCOEX, "Invalid Stomptype\n");
-		break;
-	}
-
-	ath9k_hw_btcoex_enable(sc->sc_ah);
-}
-
-/*
- * This is the master bt coex timer which runs for every
- * 45ms, bt traffic will be given priority during 55% of this
- * period while wlan gets remaining 45%
- */
-
-static void ath_btcoex_period_timer(unsigned long data)
-{
-	struct ath_softc *sc = (struct ath_softc *) data;
-	struct ath_btcoex_info *btinfo = &sc->btcoex_info;
-
-	ath_detect_bt_priority(sc);
-
-	spin_lock_bh(&btinfo->btcoex_lock);
-
-	ath_btcoex_bt_stomp(sc, btinfo, btinfo->bt_stomp_type);
-
-	spin_unlock_bh(&btinfo->btcoex_lock);
-
-	if (btinfo->btcoex_period != btinfo->btcoex_no_stomp) {
-		if (btinfo->hw_timer_enabled)
-			ath_gen_timer_stop(sc->sc_ah, btinfo->no_stomp_timer);
-
-		ath_gen_timer_start(sc->sc_ah,
-			btinfo->no_stomp_timer,
-			(ath9k_hw_gettsf32(sc->sc_ah) +
-				btinfo->btcoex_no_stomp),
-				btinfo->btcoex_no_stomp * 10);
-		btinfo->hw_timer_enabled = true;
-	}
-
-	mod_timer(&btinfo->period_timer, jiffies +
-				  msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
-}
-
-/*
- * Generic tsf based hw timer which configures weight
- * registers to time slice between wlan and bt traffic
- */
-
-static void ath_btcoex_no_stomp_timer(void *arg)
-{
-	struct ath_softc *sc = (struct ath_softc *)arg;
-	struct ath_btcoex_info *btinfo = &sc->btcoex_info;
-
-	DPRINTF(sc, ATH_DBG_BTCOEX, "no stomp timer running \n");
-
-	spin_lock_bh(&btinfo->btcoex_lock);
-
-	if (btinfo->bt_stomp_type == ATH_BTCOEX_STOMP_LOW)
-		ath_btcoex_bt_stomp(sc, btinfo, ATH_BTCOEX_STOMP_NONE);
-	 else if (btinfo->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
-		ath_btcoex_bt_stomp(sc, btinfo, ATH_BTCOEX_STOMP_LOW);
-
-	spin_unlock_bh(&btinfo->btcoex_lock);
-}
-
-static int ath_init_btcoex_info(struct ath_hw *hw,
-				struct ath_btcoex_info *btcoex_info)
+void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
 {
+	struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+	const struct ath_btcoex_config ath_bt_config = {
+		.bt_time_extend = 0,
+		.bt_txstate_extend = true,
+		.bt_txframe_extend = true,
+		.bt_mode = ATH_BT_COEX_MODE_SLOTTED,
+		.bt_quiet_collision = true,
+		.bt_rxclear_polarity = true,
+		.bt_priority_time = 2,
+		.bt_first_slot_time = 5,
+		.bt_hold_rx_clear = true,
+	};
 	u32 i;
-	int qnum;
 
-	qnum = ath_tx_get_qnum(hw->ah_sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
-
-	btcoex_info->bt_coex_mode =
-		(btcoex_info->bt_coex_mode & AR_BT_QCU_THRESH) |
+	btcoex_hw->bt_coex_mode =
+		(btcoex_hw->bt_coex_mode & AR_BT_QCU_THRESH) |
 		SM(ath_bt_config.bt_time_extend, AR_BT_TIME_EXTEND) |
 		SM(ath_bt_config.bt_txstate_extend, AR_BT_TXSTATE_EXTEND) |
 		SM(ath_bt_config.bt_txframe_extend, AR_BT_TX_FRAME_EXTEND) |
@@ -174,167 +87,141 @@ static int ath_init_btcoex_info(struct ath_hw *hw,
 		SM(ath_bt_config.bt_first_slot_time, AR_BT_FIRST_SLOT_TIME) |
 		SM(qnum, AR_BT_QCU_THRESH);
 
-	btcoex_info->bt_coex_mode2 =
+	btcoex_hw->bt_coex_mode2 =
 		SM(ath_bt_config.bt_hold_rx_clear, AR_BT_HOLD_RX_CLEAR) |
 		SM(ATH_BTCOEX_BMISS_THRESH, AR_BT_BCN_MISS_THRESH) |
 		AR_BT_DISABLE_BT_ANT;
 
-	btcoex_info->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+	for (i = 0; i < 32; i++)
+		ah->hw_gen_timers.gen_timer_index[(debruijn32 << i) >> 27] = i;
+}
+EXPORT_SYMBOL(ath9k_hw_init_btcoex_hw);
 
-	btcoex_info->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
+void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah)
+{
+	struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
 
-	btcoex_info->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
-		btcoex_info->btcoex_period / 100;
+	/* connect bt_active to baseband */
+	REG_CLR_BIT(ah, AR_GPIO_INPUT_EN_VAL,
+		    (AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF |
+		     AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_DEF));
 
-	for (i = 0; i < 32; i++)
-		hw->hw_gen_timers.gen_timer_index[(debruijn32 << i) >> 27] = i;
+	REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
+		    AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB);
 
-	setup_timer(&btcoex_info->period_timer, ath_btcoex_period_timer,
-			(unsigned long) hw->ah_sc);
+	/* Set input mux for bt_active to gpio pin */
+	REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
+		      AR_GPIO_INPUT_MUX1_BT_ACTIVE,
+		      btcoex_hw->btactive_gpio);
 
-	btcoex_info->no_stomp_timer = ath_gen_timer_alloc(hw,
-			ath_btcoex_no_stomp_timer,
-			ath_btcoex_no_stomp_timer,
-			(void *)hw->ah_sc, AR_FIRST_NDP_TIMER);
+	/* Configure the desired gpio port for input */
+	ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btactive_gpio);
+}
+EXPORT_SYMBOL(ath9k_hw_btcoex_init_2wire);
+
+void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah)
+{
+	struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
 
-	if (btcoex_info->no_stomp_timer == NULL)
-		return -ENOMEM;
+	/* btcoex 3-wire */
+	REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
+			(AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB |
+			 AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB));
 
-	spin_lock_init(&btcoex_info->btcoex_lock);
+	/* Set input mux for bt_prority_async and
+	 *                  bt_active_async to GPIO pins */
+	REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
+			AR_GPIO_INPUT_MUX1_BT_ACTIVE,
+			btcoex_hw->btactive_gpio);
 
-	return 0;
+	REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
+			AR_GPIO_INPUT_MUX1_BT_PRIORITY,
+			btcoex_hw->btpriority_gpio);
+
+	/* Configure the desired GPIO ports for input */
+
+	ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btactive_gpio);
+	ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btpriority_gpio);
 }
+EXPORT_SYMBOL(ath9k_hw_btcoex_init_3wire);
 
-int ath9k_hw_btcoex_init(struct ath_hw *ah)
+static void ath9k_hw_btcoex_enable_2wire(struct ath_hw *ah)
 {
-	struct ath_btcoex_info *btcoex_info = &ah->ah_sc->btcoex_info;
-	int ret = 0;
-
-	if (btcoex_info->btcoex_scheme == ATH_BTCOEX_CFG_2WIRE) {
-		/* connect bt_active to baseband */
-		REG_CLR_BIT(ah, AR_GPIO_INPUT_EN_VAL,
-				(AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF |
-				 AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_DEF));
-
-		REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
-				AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB);
-
-		/* Set input mux for bt_active to gpio pin */
-		REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
-				AR_GPIO_INPUT_MUX1_BT_ACTIVE,
-				btcoex_info->btactive_gpio);
-
-		/* Configure the desired gpio port for input */
-		ath9k_hw_cfg_gpio_input(ah, btcoex_info->btactive_gpio);
-	} else {
-		/* btcoex 3-wire */
-		REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
-				(AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB |
-				 AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB));
-
-		/* Set input mux for bt_prority_async and
-		 *                  bt_active_async to GPIO pins */
-		REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
-				AR_GPIO_INPUT_MUX1_BT_ACTIVE,
-				btcoex_info->btactive_gpio);
-
-		REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
-				AR_GPIO_INPUT_MUX1_BT_PRIORITY,
-				btcoex_info->btpriority_gpio);
-
-		/* Configure the desired GPIO ports for input */
-
-		ath9k_hw_cfg_gpio_input(ah, btcoex_info->btactive_gpio);
-		ath9k_hw_cfg_gpio_input(ah, btcoex_info->btpriority_gpio);
-
-		ret = ath_init_btcoex_info(ah, btcoex_info);
-	}
+	struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
 
-	return ret;
+	/* Configure the desired GPIO port for TX_FRAME output */
+	ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
+			    AR_GPIO_OUTPUT_MUX_AS_TX_FRAME);
 }
 
-void ath9k_hw_btcoex_enable(struct ath_hw *ah)
+void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
+				u32 bt_weight,
+				u32 wlan_weight)
 {
-	struct ath_btcoex_info *btcoex_info = &ah->ah_sc->btcoex_info;
-
-	if (btcoex_info->btcoex_scheme == ATH_BTCOEX_CFG_2WIRE) {
-		/* Configure the desired GPIO port for TX_FRAME output */
-		ath9k_hw_cfg_output(ah, btcoex_info->wlanactive_gpio,
-				AR_GPIO_OUTPUT_MUX_AS_TX_FRAME);
-	} else {
-		/*
-		 * Program coex mode and weight registers to
-		 * enable coex 3-wire
-		 */
-		REG_WRITE(ah, AR_BT_COEX_MODE, btcoex_info->bt_coex_mode);
-		REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex_info->bt_coex_weights);
-		REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex_info->bt_coex_mode2);
-
-		REG_RMW_FIELD(ah, AR_QUIET1,
-				AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
-		REG_RMW_FIELD(ah, AR_PCU_MISC,
-				AR_PCU_BT_ANT_PREVENT_RX, 0);
-
-		ath9k_hw_cfg_output(ah, btcoex_info->wlanactive_gpio,
-				AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL);
-	}
+	struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
 
-	REG_RMW(ah, AR_GPIO_PDPU,
-		(0x2 << (btcoex_info->btactive_gpio * 2)),
-		(0x3 << (btcoex_info->btactive_gpio * 2)));
-
-	ah->ah_sc->sc_flags |= SC_OP_BTCOEX_ENABLED;
+	btcoex_hw->bt_coex_weights = SM(bt_weight, AR_BTCOEX_BT_WGHT) |
+				     SM(wlan_weight, AR_BTCOEX_WL_WGHT);
 }
+EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
 
-void ath9k_hw_btcoex_disable(struct ath_hw *ah)
+static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
 {
-	struct ath_btcoex_info *btcoex_info = &ah->ah_sc->btcoex_info;
+	struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
 
-	ath9k_hw_set_gpio(ah, btcoex_info->wlanactive_gpio, 0);
+	/*
+	 * Program coex mode and weight registers to
+	 * enable coex 3-wire
+	 */
+	REG_WRITE(ah, AR_BT_COEX_MODE, btcoex_hw->bt_coex_mode);
+	REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex_hw->bt_coex_weights);
+	REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex_hw->bt_coex_mode2);
 
-	ath9k_hw_cfg_output(ah, btcoex_info->wlanactive_gpio,
-			AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+	REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
+	REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
 
-	if (btcoex_info->btcoex_scheme == ATH_BTCOEX_CFG_3WIRE) {
-		REG_WRITE(ah, AR_BT_COEX_MODE, AR_BT_QUIET | AR_BT_MODE);
-		REG_WRITE(ah, AR_BT_COEX_WEIGHT, 0);
-		REG_WRITE(ah, AR_BT_COEX_MODE2, 0);
-	}
-
-	ah->ah_sc->sc_flags &= ~SC_OP_BTCOEX_ENABLED;
+	ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
+			    AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL);
 }
 
-/*
- * Pause btcoex timer and bt duty cycle timer
- */
-void ath_btcoex_timer_pause(struct ath_softc *sc,
-			    struct ath_btcoex_info *btinfo)
+void ath9k_hw_btcoex_enable(struct ath_hw *ah)
 {
+	struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
 
-	del_timer_sync(&btinfo->period_timer);
+	switch (btcoex_hw->scheme) {
+	case ATH_BTCOEX_CFG_NONE:
+		break;
+	case ATH_BTCOEX_CFG_2WIRE:
+		ath9k_hw_btcoex_enable_2wire(ah);
+		break;
+	case ATH_BTCOEX_CFG_3WIRE:
+		ath9k_hw_btcoex_enable_3wire(ah);
+		break;
+	}
 
-	if (btinfo->hw_timer_enabled)
-		ath_gen_timer_stop(sc->sc_ah, btinfo->no_stomp_timer);
+	REG_RMW(ah, AR_GPIO_PDPU,
+		(0x2 << (btcoex_hw->btactive_gpio * 2)),
+		(0x3 << (btcoex_hw->btactive_gpio * 2)));
 
-	btinfo->hw_timer_enabled = false;
+	ah->btcoex_hw.enabled = true;
 }
+EXPORT_SYMBOL(ath9k_hw_btcoex_enable);
 
-/*
- * (Re)start btcoex timers
- */
-void ath_btcoex_timer_resume(struct ath_softc *sc,
-			     struct ath_btcoex_info *btinfo)
+void ath9k_hw_btcoex_disable(struct ath_hw *ah)
 {
+	struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
 
-	DPRINTF(sc, ATH_DBG_BTCOEX, "Starting btcoex timers");
+	ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
 
-	/* make sure duty cycle timer is also stopped when resuming */
-	if (btinfo->hw_timer_enabled)
-		ath_gen_timer_stop(sc->sc_ah, btinfo->no_stomp_timer);
+	ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
+			AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
 
-	btinfo->bt_priority_cnt = 0;
-	btinfo->bt_priority_time = jiffies;
-	sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
+	if (btcoex_hw->scheme == ATH_BTCOEX_CFG_3WIRE) {
+		REG_WRITE(ah, AR_BT_COEX_MODE, AR_BT_QUIET | AR_BT_MODE);
+		REG_WRITE(ah, AR_BT_COEX_WEIGHT, 0);
+		REG_WRITE(ah, AR_BT_COEX_MODE2, 0);
+	}
 
-	mod_timer(&btinfo->period_timer, jiffies);
+	ah->btcoex_hw.enabled = false;
 }
+EXPORT_SYMBOL(ath9k_hw_btcoex_disable);
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 297b027fd3c3..1ba31a73317c 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -17,6 +17,8 @@
 #ifndef BTCOEX_H
 #define BTCOEX_H
 
+#include "hw.h"
+
 #define ATH_WLANACTIVE_GPIO	5
 #define ATH_BTACTIVE_GPIO	6
 #define ATH_BTPRIORITY_GPIO	7
@@ -34,67 +36,25 @@ enum ath_btcoex_scheme {
 	ATH_BTCOEX_CFG_3WIRE,
 };
 
-enum ath_stomp_type {
-	ATH_BTCOEX_NO_STOMP,
-	ATH_BTCOEX_STOMP_ALL,
-	ATH_BTCOEX_STOMP_LOW,
-	ATH_BTCOEX_STOMP_NONE
-};
-
-enum ath_bt_mode {
-	ATH_BT_COEX_MODE_LEGACY,	/* legacy rx_clear mode */
-	ATH_BT_COEX_MODE_UNSLOTTED,	/* untimed/unslotted mode */
-	ATH_BT_COEX_MODE_SLOTTED,	/* slotted mode */
-	ATH_BT_COEX_MODE_DISALBED,	/* coexistence disabled */
-};
-
-struct ath_btcoex_config {
-	u8 bt_time_extend;
-	bool bt_txstate_extend;
-	bool bt_txframe_extend;
-	enum ath_bt_mode bt_mode; /* coexistence mode */
-	bool bt_quiet_collision;
-	bool bt_rxclear_polarity; /* invert rx_clear as WLAN_ACTIVE*/
-	u8 bt_priority_time;
-	u8 bt_first_slot_time;
-	bool bt_hold_rx_clear;
-};
-
-struct ath_btcoex_info {
-	enum ath_btcoex_scheme btcoex_scheme;
+struct ath_btcoex_hw {
+	enum ath_btcoex_scheme scheme;
+	bool enabled;
 	u8 wlanactive_gpio;
 	u8 btactive_gpio;
 	u8 btpriority_gpio;
-	u8 bt_duty_cycle; 	/* BT duty cycle in percentage */
-	int bt_stomp_type; 	/* Types of BT stomping */
 	u32 bt_coex_mode; 	/* Register setting for AR_BT_COEX_MODE */
 	u32 bt_coex_weights; 	/* Register setting for AR_BT_COEX_WEIGHT */
 	u32 bt_coex_mode2; 	/* Register setting for AR_BT_COEX_MODE2 */
-	u32 btcoex_no_stomp;   /* in usec */
-	u32 btcoex_period;     	/* in usec */
-	u32 bt_priority_cnt;
-	unsigned long bt_priority_time;
-	bool hw_timer_enabled;
-	spinlock_t btcoex_lock;
-	struct timer_list period_timer;      /* Timer for BT period */
-	struct ath_gen_timer *no_stomp_timer; /*Timer for no BT stomping*/
 };
 
-bool ath_btcoex_supported(u16 subsysid);
-int ath9k_hw_btcoex_init(struct ath_hw *ah);
+bool ath9k_hw_btcoex_supported(struct ath_hw *ah);
+void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah);
+void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah);
+void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum);
+void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
+				u32 bt_weight,
+				u32 wlan_weight);
 void ath9k_hw_btcoex_enable(struct ath_hw *ah);
 void ath9k_hw_btcoex_disable(struct ath_hw *ah);
-void ath_btcoex_timer_resume(struct ath_softc *sc,
-			     struct ath_btcoex_info *btinfo);
-void ath_btcoex_timer_pause(struct ath_softc *sc,
-			    struct ath_btcoex_info *btinfo);
-
-static inline void ath_btcoex_set_weight(struct ath_btcoex_info *btcoex_info,
-					 u32 bt_weight,
-					 u32 wlan_weight)
-{
-	btcoex_info->bt_coex_weights = SM(bt_weight, AR_BTCOEX_BT_WGHT) |
-				       SM(wlan_weight, AR_BTCOEX_WL_WGHT);
-}
 
 #endif
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 0ad6d0b76e9e..238a5744d8e9 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -14,7 +14,7 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-#include "ath9k.h"
+#include "hw.h"
 
 /* We can tune this as we go by monitoring really low values */
 #define ATH9K_NF_TOO_LOW	-60
@@ -26,11 +26,11 @@
 static bool ath9k_hw_nf_in_range(struct ath_hw *ah, s16 nf)
 {
 	if (nf > ATH9K_NF_TOO_LOW) {
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-			"noise floor value detected (%d) is "
-			"lower than what we think is a "
-			"reasonable value (%d)\n",
-			nf, ATH9K_NF_TOO_LOW);
+		ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+			  "noise floor value detected (%d) is "
+			  "lower than what we think is a "
+			  "reasonable value (%d)\n",
+			  nf, ATH9K_NF_TOO_LOW);
 		return false;
 	}
 	return true;
@@ -89,6 +89,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath9k_nfcal_hist *h,
 static void ath9k_hw_do_getnf(struct ath_hw *ah,
 			      int16_t nfarray[NUM_NF_READINGS])
 {
+	struct ath_common *common = ath9k_hw_common(ah);
 	int16_t nf;
 
 	if (AR_SREV_9280_10_OR_LATER(ah))
@@ -98,8 +99,8 @@ static void ath9k_hw_do_getnf(struct ath_hw *ah,
 
 	if (nf & 0x100)
 		nf = 0 - ((nf ^ 0x1ff) + 1);
-	DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-		"NF calibrated [ctl] [chain 0] is %d\n", nf);
+	ath_print(common, ATH_DBG_CALIBRATE,
+		  "NF calibrated [ctl] [chain 0] is %d\n", nf);
 	nfarray[0] = nf;
 
 	if (!AR_SREV_9285(ah)) {
@@ -112,8 +113,8 @@ static void ath9k_hw_do_getnf(struct ath_hw *ah,
 
 		if (nf & 0x100)
 			nf = 0 - ((nf ^ 0x1ff) + 1);
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-				"NF calibrated [ctl] [chain 1] is %d\n", nf);
+		ath_print(common, ATH_DBG_CALIBRATE,
+			  "NF calibrated [ctl] [chain 1] is %d\n", nf);
 		nfarray[1] = nf;
 
 		if (!AR_SREV_9280(ah) && !AR_SREV_9287(ah)) {
@@ -121,8 +122,8 @@ static void ath9k_hw_do_getnf(struct ath_hw *ah,
 					AR_PHY_CH2_MINCCA_PWR);
 			if (nf & 0x100)
 				nf = 0 - ((nf ^ 0x1ff) + 1);
-			DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-				"NF calibrated [ctl] [chain 2] is %d\n", nf);
+			ath_print(common, ATH_DBG_CALIBRATE,
+				  "NF calibrated [ctl] [chain 2] is %d\n", nf);
 			nfarray[2] = nf;
 		}
 	}
@@ -136,8 +137,8 @@ static void ath9k_hw_do_getnf(struct ath_hw *ah,
 
 	if (nf & 0x100)
 		nf = 0 - ((nf ^ 0x1ff) + 1);
-	DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-		"NF calibrated [ext] [chain 0] is %d\n", nf);
+	ath_print(common, ATH_DBG_CALIBRATE,
+		  "NF calibrated [ext] [chain 0] is %d\n", nf);
 	nfarray[3] = nf;
 
 	if (!AR_SREV_9285(ah)) {
@@ -150,8 +151,8 @@ static void ath9k_hw_do_getnf(struct ath_hw *ah,
 
 		if (nf & 0x100)
 			nf = 0 - ((nf ^ 0x1ff) + 1);
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-				"NF calibrated [ext] [chain 1] is %d\n", nf);
+		ath_print(common, ATH_DBG_CALIBRATE,
+			  "NF calibrated [ext] [chain 1] is %d\n", nf);
 		nfarray[4] = nf;
 
 		if (!AR_SREV_9280(ah) && !AR_SREV_9287(ah)) {
@@ -159,8 +160,8 @@ static void ath9k_hw_do_getnf(struct ath_hw *ah,
 					AR_PHY_CH2_EXT_MINCCA_PWR);
 			if (nf & 0x100)
 				nf = 0 - ((nf ^ 0x1ff) + 1);
-			DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-				"NF calibrated [ext] [chain 2] is %d\n", nf);
+			ath_print(common, ATH_DBG_CALIBRATE,
+				  "NF calibrated [ext] [chain 2] is %d\n", nf);
 			nfarray[5] = nf;
 		}
 	}
@@ -188,6 +189,8 @@ static bool getNoiseFloorThresh(struct ath_hw *ah,
 static void ath9k_hw_setup_calibration(struct ath_hw *ah,
 				       struct ath9k_cal_list *currCal)
 {
+	struct ath_common *common = ath9k_hw_common(ah);
+
 	REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0),
 		      AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX,
 		      currCal->calData->calCountMax);
@@ -195,23 +198,23 @@ static void ath9k_hw_setup_calibration(struct ath_hw *ah,
 	switch (currCal->calData->calType) {
 	case IQ_MISMATCH_CAL:
 		REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-			"starting IQ Mismatch Calibration\n");
+		ath_print(common, ATH_DBG_CALIBRATE,
+			  "starting IQ Mismatch Calibration\n");
 		break;
 	case ADC_GAIN_CAL:
 		REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-			"starting ADC Gain Calibration\n");
+		ath_print(common, ATH_DBG_CALIBRATE,
+			  "starting ADC Gain Calibration\n");
 		break;
 	case ADC_DC_CAL:
 		REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-			"starting ADC DC Calibration\n");
+		ath_print(common, ATH_DBG_CALIBRATE,
+			  "starting ADC DC Calibration\n");
 		break;
 	case ADC_DC_INIT_CAL:
 		REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_INIT);
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-			"starting Init ADC DC Calibration\n");
+		ath_print(common, ATH_DBG_CALIBRATE,
+			  "starting Init ADC DC Calibration\n");
 		break;
 	}
 
@@ -278,7 +281,7 @@ static bool ath9k_hw_per_calibration(struct ath_hw *ah,
 static bool ath9k_hw_iscal_supported(struct ath_hw *ah,
 				     enum ath9k_cal_types calType)
 {
-	struct ieee80211_conf *conf = &ah->ah_sc->hw->conf;
+	struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
 
 	switch (calType & ah->supp_cals) {
 	case IQ_MISMATCH_CAL: /* Both 2 GHz and 5 GHz support OFDM */
@@ -304,11 +307,11 @@ static void ath9k_hw_iqcal_collect(struct ath_hw *ah)
 			REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
 		ah->totalIqCorrMeas[i] +=
 			(int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-			"%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
-			ah->cal_samples, i, ah->totalPowerMeasI[i],
-			ah->totalPowerMeasQ[i],
-			ah->totalIqCorrMeas[i]);
+		ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+			  "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
+			  ah->cal_samples, i, ah->totalPowerMeasI[i],
+			  ah->totalPowerMeasQ[i],
+			  ah->totalIqCorrMeas[i]);
 	}
 }
 
@@ -326,14 +329,14 @@ static void ath9k_hw_adc_gaincal_collect(struct ath_hw *ah)
 		ah->totalAdcQEvenPhase[i] +=
 			REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
 
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-			"%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
-			"oddq=0x%08x; evenq=0x%08x;\n",
-			ah->cal_samples, i,
-			ah->totalAdcIOddPhase[i],
-			ah->totalAdcIEvenPhase[i],
-			ah->totalAdcQOddPhase[i],
-			ah->totalAdcQEvenPhase[i]);
+		ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+			  "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
+			  "oddq=0x%08x; evenq=0x%08x;\n",
+			  ah->cal_samples, i,
+			  ah->totalAdcIOddPhase[i],
+			  ah->totalAdcIEvenPhase[i],
+			  ah->totalAdcQOddPhase[i],
+			  ah->totalAdcQEvenPhase[i]);
 	}
 }
 
@@ -351,19 +354,20 @@ static void ath9k_hw_adc_dccal_collect(struct ath_hw *ah)
 		ah->totalAdcDcOffsetQEvenPhase[i] +=
 			(int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
 
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-			"%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
-			"oddq=0x%08x; evenq=0x%08x;\n",
-			ah->cal_samples, i,
-			ah->totalAdcDcOffsetIOddPhase[i],
-			ah->totalAdcDcOffsetIEvenPhase[i],
-			ah->totalAdcDcOffsetQOddPhase[i],
-			ah->totalAdcDcOffsetQEvenPhase[i]);
+		ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+			  "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
+			  "oddq=0x%08x; evenq=0x%08x;\n",
+			  ah->cal_samples, i,
+			  ah->totalAdcDcOffsetIOddPhase[i],
+			  ah->totalAdcDcOffsetIEvenPhase[i],
+			  ah->totalAdcDcOffsetQOddPhase[i],
+			  ah->totalAdcDcOffsetQEvenPhase[i]);
 	}
 }
 
 static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
 {
+	struct ath_common *common = ath9k_hw_common(ah);
 	u32 powerMeasQ, powerMeasI, iqCorrMeas;
 	u32 qCoffDenom, iCoffDenom;
 	int32_t qCoff, iCoff;
@@ -374,13 +378,13 @@ static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
 		powerMeasQ = ah->totalPowerMeasQ[i];
 		iqCorrMeas = ah->totalIqCorrMeas[i];
 
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-			"Starting IQ Cal and Correction for Chain %d\n",
-			i);
+		ath_print(common, ATH_DBG_CALIBRATE,
+			  "Starting IQ Cal and Correction for Chain %d\n",
+			  i);
 
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-			"Orignal: Chn %diq_corr_meas = 0x%08x\n",
-			i, ah->totalIqCorrMeas[i]);
+		ath_print(common, ATH_DBG_CALIBRATE,
+			  "Orignal: Chn %diq_corr_meas = 0x%08x\n",
+			  i, ah->totalIqCorrMeas[i]);
 
 		iqCorrNeg = 0;
 
@@ -389,27 +393,28 @@ static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
 			iqCorrNeg = 1;
 		}
 
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-			"Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-			"Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
-			iqCorrNeg);
+		ath_print(common, ATH_DBG_CALIBRATE,
+			  "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
+		ath_print(common, ATH_DBG_CALIBRATE,
+			  "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
+		ath_print(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
+			  iqCorrNeg);
 
 		iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128;
 		qCoffDenom = powerMeasQ / 64;
 
-		if (powerMeasQ != 0) {
+		if ((powerMeasQ != 0) && (iCoffDenom != 0) &&
+		    (qCoffDenom != 0)) {
 			iCoff = iqCorrMeas / iCoffDenom;
 			qCoff = powerMeasI / qCoffDenom - 64;
-			DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-				"Chn %d iCoff = 0x%08x\n", i, iCoff);
-			DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-				"Chn %d qCoff = 0x%08x\n", i, qCoff);
+			ath_print(common, ATH_DBG_CALIBRATE,
+				  "Chn %d iCoff = 0x%08x\n", i, iCoff);
+			ath_print(common, ATH_DBG_CALIBRATE,
+				  "Chn %d qCoff = 0x%08x\n", i, qCoff);
 
 			iCoff = iCoff & 0x3f;
-			DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-				"New: Chn %d iCoff = 0x%08x\n", i, iCoff);
+			ath_print(common, ATH_DBG_CALIBRATE,
+				  "New: Chn %d iCoff = 0x%08x\n", i, iCoff);
 			if (iqCorrNeg == 0x0)
 				iCoff = 0x40 - iCoff;
 
@@ -418,9 +423,9 @@ static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
 			else if (qCoff <= -16)
 				qCoff = 16;
 
-			DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-				"Chn %d : iCoff = 0x%x  qCoff = 0x%x\n",
-				i, iCoff, qCoff);
+			ath_print(common, ATH_DBG_CALIBRATE,
+				  "Chn %d : iCoff = 0x%x  qCoff = 0x%x\n",
+				  i, iCoff, qCoff);
 
 			REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
 				      AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF,
@@ -428,9 +433,9 @@ static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
 			REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
 				      AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
 				      qCoff);
-			DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-				"IQ Cal and Correction done for Chain %d\n",
-				i);
+			ath_print(common, ATH_DBG_CALIBRATE,
+				  "IQ Cal and Correction done for Chain %d\n",
+				  i);
 		}
 	}
 
@@ -440,6 +445,7 @@ static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
 
 static void ath9k_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
 {
+	struct ath_common *common = ath9k_hw_common(ah);
 	u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset, qEvenMeasOffset;
 	u32 qGainMismatch, iGainMismatch, val, i;
 
@@ -449,21 +455,21 @@ static void ath9k_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
 		qOddMeasOffset = ah->totalAdcQOddPhase[i];
 		qEvenMeasOffset = ah->totalAdcQEvenPhase[i];
 
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-			"Starting ADC Gain Cal for Chain %d\n", i);
-
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-			"Chn %d pwr_meas_odd_i = 0x%08x\n", i,
-			iOddMeasOffset);
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-			"Chn %d pwr_meas_even_i = 0x%08x\n", i,
-			iEvenMeasOffset);
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-			"Chn %d pwr_meas_odd_q = 0x%08x\n", i,
-			qOddMeasOffset);
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-			"Chn %d pwr_meas_even_q = 0x%08x\n", i,
-			qEvenMeasOffset);
+		ath_print(common, ATH_DBG_CALIBRATE,
+			  "Starting ADC Gain Cal for Chain %d\n", i);
+
+		ath_print(common, ATH_DBG_CALIBRATE,
+			  "Chn %d pwr_meas_odd_i = 0x%08x\n", i,
+			  iOddMeasOffset);
+		ath_print(common, ATH_DBG_CALIBRATE,
+			  "Chn %d pwr_meas_even_i = 0x%08x\n", i,
+			  iEvenMeasOffset);
+		ath_print(common, ATH_DBG_CALIBRATE,
+			  "Chn %d pwr_meas_odd_q = 0x%08x\n", i,
+			  qOddMeasOffset);
+		ath_print(common, ATH_DBG_CALIBRATE,
+			  "Chn %d pwr_meas_even_q = 0x%08x\n", i,
+			  qEvenMeasOffset);
 
 		if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
 			iGainMismatch =
@@ -473,20 +479,20 @@ static void ath9k_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
 				((qOddMeasOffset * 32) /
 				 qEvenMeasOffset) & 0x3f;
 
-			DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-				"Chn %d gain_mismatch_i = 0x%08x\n", i,
-				iGainMismatch);
-			DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-				"Chn %d gain_mismatch_q = 0x%08x\n", i,
-				qGainMismatch);
+			ath_print(common, ATH_DBG_CALIBRATE,
+				  "Chn %d gain_mismatch_i = 0x%08x\n", i,
+				  iGainMismatch);
+			ath_print(common, ATH_DBG_CALIBRATE,
+				  "Chn %d gain_mismatch_q = 0x%08x\n", i,
+				  qGainMismatch);
 
 			val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
 			val &= 0xfffff000;
 			val |= (qGainMismatch) | (iGainMismatch << 6);
 			REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
 
-			DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-				"ADC Gain Cal done for Chain %d\n", i);
+			ath_print(common, ATH_DBG_CALIBRATE,
+				  "ADC Gain Cal done for Chain %d\n", i);
 		}
 	}
 
@@ -497,6 +503,7 @@ static void ath9k_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
 
 static void ath9k_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
 {
+	struct ath_common *common = ath9k_hw_common(ah);
 	u32 iOddMeasOffset, iEvenMeasOffset, val, i;
 	int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch;
 	const struct ath9k_percal_data *calData =
@@ -510,41 +517,41 @@ static void ath9k_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
 		qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i];
 		qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i];
 
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-			"Starting ADC DC Offset Cal for Chain %d\n", i);
-
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-			"Chn %d pwr_meas_odd_i = %d\n", i,
-			iOddMeasOffset);
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-			"Chn %d pwr_meas_even_i = %d\n", i,
-			iEvenMeasOffset);
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-			"Chn %d pwr_meas_odd_q = %d\n", i,
-			qOddMeasOffset);
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-			"Chn %d pwr_meas_even_q = %d\n", i,
-			qEvenMeasOffset);
+		ath_print(common, ATH_DBG_CALIBRATE,
+			   "Starting ADC DC Offset Cal for Chain %d\n", i);
+
+		ath_print(common, ATH_DBG_CALIBRATE,
+			  "Chn %d pwr_meas_odd_i = %d\n", i,
+			  iOddMeasOffset);
+		ath_print(common, ATH_DBG_CALIBRATE,
+			  "Chn %d pwr_meas_even_i = %d\n", i,
+			  iEvenMeasOffset);
+		ath_print(common, ATH_DBG_CALIBRATE,
+			  "Chn %d pwr_meas_odd_q = %d\n", i,
+			  qOddMeasOffset);
+		ath_print(common, ATH_DBG_CALIBRATE,
+			  "Chn %d pwr_meas_even_q = %d\n", i,
+			  qEvenMeasOffset);
 
 		iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
 			       numSamples) & 0x1ff;
 		qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
 			       numSamples) & 0x1ff;
 
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-			"Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
-			iDcMismatch);
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-			"Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
-			qDcMismatch);
+		ath_print(common, ATH_DBG_CALIBRATE,
+			  "Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
+			  iDcMismatch);
+		ath_print(common, ATH_DBG_CALIBRATE,
+			  "Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
+			  qDcMismatch);
 
 		val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
 		val &= 0xc0000fff;
 		val |= (qDcMismatch << 12) | (iDcMismatch << 21);
 		REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
 
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-			"ADC DC Offset Cal done for Chain %d\n", i);
+		ath_print(common, ATH_DBG_CALIBRATE,
+			  "ADC DC Offset Cal done for Chain %d\n", i);
 	}
 
 	REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
@@ -555,7 +562,8 @@ static void ath9k_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
 /* This is done for the currently configured channel */
 bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
 {
-	struct ieee80211_conf *conf = &ah->ah_sc->hw->conf;
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ieee80211_conf *conf = &common->hw->conf;
 	struct ath9k_cal_list *currCal = ah->cal_list_curr;
 
 	if (!ah->curchan)
@@ -568,24 +576,25 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
 		return true;
 
 	if (currCal->calState != CAL_DONE) {
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-			"Calibration state incorrect, %d\n",
-			currCal->calState);
+		ath_print(common, ATH_DBG_CALIBRATE,
+			  "Calibration state incorrect, %d\n",
+			  currCal->calState);
 		return true;
 	}
 
 	if (!ath9k_hw_iscal_supported(ah, currCal->calData->calType))
 		return true;
 
-	DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-		"Resetting Cal %d state for channel %u\n",
-		currCal->calData->calType, conf->channel->center_freq);
+	ath_print(common, ATH_DBG_CALIBRATE,
+		  "Resetting Cal %d state for channel %u\n",
+		  currCal->calData->calType, conf->channel->center_freq);
 
 	ah->curchan->CalValid &= ~currCal->calData->calType;
 	currCal->calState = CAL_WAITING;
 
 	return false;
 }
+EXPORT_SYMBOL(ath9k_hw_reset_calvalid);
 
 void ath9k_hw_start_nfcal(struct ath_hw *ah)
 {
@@ -645,11 +654,11 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
 		    AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
 	REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
 
-	for (j = 0; j < 1000; j++) {
+	for (j = 0; j < 5; j++) {
 		if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
 		     AR_PHY_AGC_CONTROL_NF) == 0)
 			break;
-		udelay(10);
+		udelay(50);
 	}
 
 	for (i = 0; i < NUM_NF_READINGS; i++) {
@@ -665,6 +674,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
 int16_t ath9k_hw_getnf(struct ath_hw *ah,
 		       struct ath9k_channel *chan)
 {
+	struct ath_common *common = ath9k_hw_common(ah);
 	int16_t nf, nfThresh;
 	int16_t nfarray[NUM_NF_READINGS] = { 0 };
 	struct ath9k_nfcal_hist *h;
@@ -672,8 +682,8 @@ int16_t ath9k_hw_getnf(struct ath_hw *ah,
 
 	chan->channelFlags &= (~CHANNEL_CW_INT);
 	if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-			"NF did not complete in calibration window\n");
+		ath_print(common, ATH_DBG_CALIBRATE,
+			  "NF did not complete in calibration window\n");
 		nf = 0;
 		chan->rawNoiseFloor = nf;
 		return chan->rawNoiseFloor;
@@ -682,10 +692,10 @@ int16_t ath9k_hw_getnf(struct ath_hw *ah,
 		nf = nfarray[0];
 		if (getNoiseFloorThresh(ah, c->band, &nfThresh)
 		    && nf > nfThresh) {
-			DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-				"noise floor failed detected; "
-				"detected %d, threshold %d\n",
-				nf, nfThresh);
+			ath_print(common, ATH_DBG_CALIBRATE,
+				  "noise floor failed detected; "
+				  "detected %d, threshold %d\n",
+				  nf, nfThresh);
 			chan->channelFlags |= CHANNEL_CW_INT;
 		}
 	}
@@ -737,51 +747,73 @@ s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
 
 	return nf;
 }
+EXPORT_SYMBOL(ath9k_hw_getchan_noise);
 
-static void ath9k_olc_temp_compensation(struct ath_hw *ah)
+static void ath9k_olc_temp_compensation_9287(struct ath_hw *ah)
 {
-	u32 rddata, i;
-	int delta, currPDADC, regval, slope;
+	u32 rddata;
+	int32_t delta, currPDADC, slope;
 
 	rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
 	currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
 
+	if (ah->initPDADC == 0 || currPDADC == 0) {
+		/*
+		 * Zero value indicates that no frames have been transmitted yet,
+		 * can't do temperature compensation until frames are transmitted.
+		 */
+		return;
+	} else {
+		slope = ah->eep_ops->get_eeprom(ah, EEP_TEMPSENSE_SLOPE);
+
+		if (slope == 0) { /* to avoid divide by zero case */
+			delta = 0;
+		} else {
+			delta = ((currPDADC - ah->initPDADC)*4) / slope;
+		}
+		REG_RMW_FIELD(ah, AR_PHY_CH0_TX_PWRCTRL11,
+			      AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
+		REG_RMW_FIELD(ah, AR_PHY_CH1_TX_PWRCTRL11,
+			      AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
+	}
+}
+
+static void ath9k_olc_temp_compensation(struct ath_hw *ah)
+{
+	u32 rddata, i;
+	int delta, currPDADC, regval;
 
 	if (OLC_FOR_AR9287_10_LATER) {
+		ath9k_olc_temp_compensation_9287(ah);
+	} else {
+		rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
+		currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
+
 		if (ah->initPDADC == 0 || currPDADC == 0) {
 			return;
 		} else {
-			slope = ah->eep_ops->get_eeprom(ah, EEP_TEMPSENSE_SLOPE);
-			if (slope == 0)
-				delta = 0;
+			if (ah->eep_ops->get_eeprom(ah, EEP_DAC_HPWR_5G))
+				delta = (currPDADC - ah->initPDADC + 4) / 8;
 			else
-				delta = ((currPDADC - ah->initPDADC)*4) / slope;
-			REG_RMW_FIELD(ah, AR_PHY_CH0_TX_PWRCTRL11,
-					AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
-			REG_RMW_FIELD(ah, AR_PHY_CH1_TX_PWRCTRL11,
-					AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
-		}
-	} else {
-		if (ah->eep_ops->get_eeprom(ah, EEP_DAC_HPWR_5G))
-			delta = (currPDADC - ah->initPDADC + 4) / 8;
-		else
-			delta = (currPDADC - ah->initPDADC + 5) / 10;
-
-		if (delta != ah->PDADCdelta) {
-			ah->PDADCdelta = delta;
-			for (i = 1; i < AR9280_TX_GAIN_TABLE_SIZE; i++) {
-				regval = ah->originalGain[i] - delta;
-				if (regval < 0)
-					regval = 0;
-
-				REG_RMW_FIELD(ah, AR_PHY_TX_GAIN_TBL1 + i * 4,
-						AR_PHY_TX_GAIN, regval);
+				delta = (currPDADC - ah->initPDADC + 5) / 10;
+
+			if (delta != ah->PDADCdelta) {
+				ah->PDADCdelta = delta;
+				for (i = 1; i < AR9280_TX_GAIN_TABLE_SIZE; i++) {
+					regval = ah->originalGain[i] - delta;
+					if (regval < 0)
+						regval = 0;
+
+					REG_RMW_FIELD(ah,
+						      AR_PHY_TX_GAIN_TBL1 + i * 4,
+						      AR_PHY_TX_GAIN, regval);
+				}
 			}
 		}
 	}
 }
 
-static void ath9k_hw_9271_pa_cal(struct ath_hw *ah)
+static void ath9k_hw_9271_pa_cal(struct ath_hw *ah, bool is_reset)
 {
 	u32 regVal;
 	unsigned int i;
@@ -845,7 +877,7 @@ static void ath9k_hw_9271_pa_cal(struct ath_hw *ah)
 	REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9271_AN_RF2G6_OFFS, 0);
 
 	/* find off_6_1; */
-	for (i = 6; i >= 0; i--) {
+	for (i = 6; i > 0; i--) {
 		regVal = REG_READ(ah, 0x7834);
 		regVal |= (1 << (20 + i));
 		REG_WRITE(ah, 0x7834, regVal);
@@ -857,10 +889,19 @@ static void ath9k_hw_9271_pa_cal(struct ath_hw *ah)
 		REG_WRITE(ah, 0x7834, regVal);
 	}
 
-	/*  Empirical offset correction  */
-#if 0
-	REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9271_AN_RF2G6_OFFS, 0x20);
-#endif
+	regVal = (regVal >>20) & 0x7f;
+
+	/* Update PA cal info */
+	if ((!is_reset) && (ah->pacal_info.prev_offset == regVal)) {
+		if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
+			ah->pacal_info.max_skipcount =
+				2 * ah->pacal_info.max_skipcount;
+		ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
+	} else {
+		ah->pacal_info.max_skipcount = 1;
+		ah->pacal_info.skipcount = 0;
+		ah->pacal_info.prev_offset = regVal;
+	}
 
 	regVal = REG_READ(ah, 0x7834);
 	regVal |= 0x1;
@@ -875,7 +916,7 @@ static void ath9k_hw_9271_pa_cal(struct ath_hw *ah)
 
 static inline void ath9k_hw_9285_pa_cal(struct ath_hw *ah, bool is_reset)
 {
-
+	struct ath_common *common = ath9k_hw_common(ah);
 	u32 regVal;
 	int i, offset, offs_6_1, offs_0;
 	u32 ccomp_org, reg_field;
@@ -889,7 +930,7 @@ static inline void ath9k_hw_9285_pa_cal(struct ath_hw *ah, bool is_reset)
 		{ 0x7838, 0 },
 	};
 
-	DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "Running PA Calibration\n");
+	ath_print(common, ATH_DBG_CALIBRATE, "Running PA Calibration\n");
 
 	/* PA CAL is not needed for high power solution */
 	if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) ==
@@ -1011,7 +1052,7 @@ bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
 	if (longcal) {
 		/* Do periodic PAOffset Cal */
 		if (AR_SREV_9271(ah))
-			ath9k_hw_9271_pa_cal(ah);
+			ath9k_hw_9271_pa_cal(ah, false);
 		else if (AR_SREV_9285_11_OR_LATER(ah)) {
 			if (!ah->pacal_info.skipcount)
 				ath9k_hw_9285_pa_cal(ah, false);
@@ -1036,9 +1077,13 @@ bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
 
 	return iscaldone;
 }
+EXPORT_SYMBOL(ath9k_hw_calibrate);
 
+/* Carrier leakage Calibration fix */
 static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan)
 {
+	struct ath_common *common = ath9k_hw_common(ah);
+
 	REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
 	if (IS_CHAN_HT20(chan)) {
 		REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
@@ -1049,9 +1094,9 @@ static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan)
 		REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
 		if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
 				  AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) {
-			DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "offset "
-				"calibration failed to complete in "
-				"1ms; noisy ??\n");
+			ath_print(common, ATH_DBG_CALIBRATE, "offset "
+				  "calibration failed to complete in "
+				  "1ms; noisy ??\n");
 			return false;
 		}
 		REG_CLR_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
@@ -1064,8 +1109,8 @@ static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan)
 	REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
 	if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
 			  0, AH_WAIT_TIMEOUT)) {
-		DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "offset calibration "
-				"failed to complete in 1ms; noisy ??\n");
+		ath_print(common, ATH_DBG_CALIBRATE, "offset calibration "
+			  "failed to complete in 1ms; noisy ??\n");
 		return false;
 	}
 
@@ -1078,7 +1123,9 @@ static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan)
 
 bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
 {
-	if (AR_SREV_9285_12_OR_LATER(ah)) {
+	struct ath_common *common = ath9k_hw_common(ah);
+
+	if (AR_SREV_9271(ah) || AR_SREV_9285_12_OR_LATER(ah)) {
 		if (!ar9285_clc(ah, chan))
 			return false;
 	} else {
@@ -1098,9 +1145,9 @@ bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
 		/* Poll for offset calibration complete */
 		if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
 				   0, AH_WAIT_TIMEOUT)) {
-			DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-				"offset calibration failed to complete in 1ms; "
-				"noisy environment?\n");
+			ath_print(common, ATH_DBG_CALIBRATE,
+				  "offset calibration failed to "
+				  "complete in 1ms; noisy environment?\n");
 			return false;
 		}
 
@@ -1114,7 +1161,9 @@ bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
 	}
 
 	/* Do PA Calibration */
-	if (AR_SREV_9285_11_OR_LATER(ah))
+	if (AR_SREV_9271(ah))
+		ath9k_hw_9271_pa_cal(ah, true);
+	else if (AR_SREV_9285_11_OR_LATER(ah))
 		ath9k_hw_9285_pa_cal(ah, true);
 
 	/* Do NF Calibration after DC offset and other calibrations */
@@ -1128,20 +1177,20 @@ bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
 		if (ath9k_hw_iscal_supported(ah, ADC_GAIN_CAL)) {
 			INIT_CAL(&ah->adcgain_caldata);
 			INSERT_CAL(ah, &ah->adcgain_caldata);
-			DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-				"enabling ADC Gain Calibration.\n");
+			ath_print(common, ATH_DBG_CALIBRATE,
+				  "enabling ADC Gain Calibration.\n");
 		}
 		if (ath9k_hw_iscal_supported(ah, ADC_DC_CAL)) {
 			INIT_CAL(&ah->adcdc_caldata);
 			INSERT_CAL(ah, &ah->adcdc_caldata);
-			DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-				"enabling ADC DC Calibration.\n");
+			ath_print(common, ATH_DBG_CALIBRATE,
+				  "enabling ADC DC Calibration.\n");
 		}
 		if (ath9k_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) {
 			INIT_CAL(&ah->iq_caldata);
 			INSERT_CAL(ah, &ah->iq_caldata);
-			DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
-				"enabling IQ Calibration.\n");
+			ath_print(common, ATH_DBG_CALIBRATE,
+				  "enabling IQ Calibration.\n");
 		}
 
 		ah->cal_list_curr = ah->cal_list;
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index 9028ab193e42..b2c873e97485 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -17,6 +17,8 @@
 #ifndef CALIB_H
 #define CALIB_H
 
+#include "hw.h"
+
 extern const struct ath9k_percal_data iq_cal_multi_sample;
 extern const struct ath9k_percal_data iq_cal_single_sample;
 extern const struct ath9k_percal_data adc_gain_cal_multi_sample;
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
new file mode 100644
index 000000000000..4d775ae141db
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright (c) 2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Module for common driver code between ath9k and ath9k_htc
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "common.h"
+
+MODULE_AUTHOR("Atheros Communications");
+MODULE_DESCRIPTION("Shared library for Atheros wireless 802.11n LAN cards.");
+MODULE_LICENSE("Dual BSD/GPL");
+
+/* Common RX processing */
+
+/* Assumes you've already done the endian to CPU conversion */
+static bool ath9k_rx_accept(struct ath_common *common,
+			    struct sk_buff *skb,
+			    struct ieee80211_rx_status *rxs,
+			    struct ath_rx_status *rx_stats,
+			    bool *decrypt_error)
+{
+	struct ath_hw *ah = common->ah;
+	struct ieee80211_hdr *hdr;
+	__le16 fc;
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+	fc = hdr->frame_control;
+
+	if (!rx_stats->rs_datalen)
+		return false;
+        /*
+         * rs_status follows rs_datalen so if rs_datalen is too large
+         * we can take a hint that hardware corrupted it, so ignore
+         * those frames.
+         */
+	if (rx_stats->rs_datalen > common->rx_bufsize)
+		return false;
+
+	/*
+	 * rs_more indicates chained descriptors which can be used
+	 * to link buffers together for a sort of scatter-gather
+	 * operation.
+	 *
+	 * The rx_stats->rs_status will not be set until the end of the
+	 * chained descriptors so it can be ignored if rs_more is set. The
+	 * rs_more will be false at the last element of the chained
+	 * descriptors.
+	 */
+	if (!rx_stats->rs_more && rx_stats->rs_status != 0) {
+		if (rx_stats->rs_status & ATH9K_RXERR_CRC)
+			rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
+		if (rx_stats->rs_status & ATH9K_RXERR_PHY)
+			return false;
+
+		if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
+			*decrypt_error = true;
+		} else if (rx_stats->rs_status & ATH9K_RXERR_MIC) {
+			if (ieee80211_is_ctl(fc))
+				/*
+				 * Sometimes, we get invalid
+				 * MIC failures on valid control frames.
+				 * Remove these mic errors.
+				 */
+				rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
+			else
+				rxs->flag |= RX_FLAG_MMIC_ERROR;
+		}
+		/*
+		 * Reject error frames with the exception of
+		 * decryption and MIC failures. For monitor mode,
+		 * we also ignore the CRC error.
+		 */
+		if (ah->opmode == NL80211_IFTYPE_MONITOR) {
+			if (rx_stats->rs_status &
+			    ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
+			      ATH9K_RXERR_CRC))
+				return false;
+		} else {
+			if (rx_stats->rs_status &
+			    ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
+				return false;
+			}
+		}
+	}
+	return true;
+}
+
+static u8 ath9k_process_rate(struct ath_common *common,
+			     struct ieee80211_hw *hw,
+			     struct ath_rx_status *rx_stats,
+			     struct ieee80211_rx_status *rxs,
+			     struct sk_buff *skb)
+{
+	struct ieee80211_supported_band *sband;
+	enum ieee80211_band band;
+	unsigned int i = 0;
+
+	band = hw->conf.channel->band;
+	sband = hw->wiphy->bands[band];
+
+	if (rx_stats->rs_rate & 0x80) {
+		/* HT rate */
+		rxs->flag |= RX_FLAG_HT;
+		if (rx_stats->rs_flags & ATH9K_RX_2040)
+			rxs->flag |= RX_FLAG_40MHZ;
+		if (rx_stats->rs_flags & ATH9K_RX_GI)
+			rxs->flag |= RX_FLAG_SHORT_GI;
+		return rx_stats->rs_rate & 0x7f;
+	}
+
+	for (i = 0; i < sband->n_bitrates; i++) {
+		if (sband->bitrates[i].hw_value == rx_stats->rs_rate)
+			return i;
+		if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
+			rxs->flag |= RX_FLAG_SHORTPRE;
+			return i;
+		}
+	}
+
+	/* No valid hardware bitrate found -- we should not get here */
+	ath_print(common, ATH_DBG_XMIT, "unsupported hw bitrate detected "
+		  "0x%02x using 1 Mbit\n", rx_stats->rs_rate);
+	if ((common->debug_mask & ATH_DBG_XMIT))
+		print_hex_dump_bytes("", DUMP_PREFIX_NONE, skb->data, skb->len);
+
+        return 0;
+}
+
+static void ath9k_process_rssi(struct ath_common *common,
+			       struct ieee80211_hw *hw,
+			       struct sk_buff *skb,
+			       struct ath_rx_status *rx_stats)
+{
+	struct ath_hw *ah = common->ah;
+	struct ieee80211_sta *sta;
+	struct ieee80211_hdr *hdr;
+	struct ath_node *an;
+	int last_rssi = ATH_RSSI_DUMMY_MARKER;
+	__le16 fc;
+
+	hdr = (struct ieee80211_hdr *)skb->data;
+	fc = hdr->frame_control;
+
+	rcu_read_lock();
+	/*
+	 * XXX: use ieee80211_find_sta! This requires quite a bit of work
+	 * under the current ath9k virtual wiphy implementation as we have
+	 * no way of tying a vif to wiphy. Typically vifs are attached to
+	 * at least one sdata of a wiphy on mac80211 but with ath9k virtual
+	 * wiphy you'd have to iterate over every wiphy and each sdata.
+	 */
+	sta = ieee80211_find_sta_by_hw(hw, hdr->addr2);
+	if (sta) {
+		an = (struct ath_node *) sta->drv_priv;
+		if (rx_stats->rs_rssi != ATH9K_RSSI_BAD &&
+		   !rx_stats->rs_moreaggr)
+			ATH_RSSI_LPF(an->last_rssi, rx_stats->rs_rssi);
+		last_rssi = an->last_rssi;
+	}
+	rcu_read_unlock();
+
+	if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
+		rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
+					      ATH_RSSI_EP_MULTIPLIER);
+	if (rx_stats->rs_rssi < 0)
+		rx_stats->rs_rssi = 0;
+
+	/* Update Beacon RSSI, this is used by ANI. */
+	if (ieee80211_is_beacon(fc))
+		ah->stats.avgbrssi = rx_stats->rs_rssi;
+}
+
+/*
+ * For Decrypt or Demic errors, we only mark packet status here and always push
+ * up the frame up to let mac80211 handle the actual error case, be it no
+ * decryption key or real decryption error. This let us keep statistics there.
+ */
+int ath9k_cmn_rx_skb_preprocess(struct ath_common *common,
+				struct ieee80211_hw *hw,
+				struct sk_buff *skb,
+				struct ath_rx_status *rx_stats,
+				struct ieee80211_rx_status *rx_status,
+				bool *decrypt_error)
+{
+	struct ath_hw *ah = common->ah;
+
+	memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
+	if (!ath9k_rx_accept(common, skb, rx_status, rx_stats, decrypt_error))
+		return -EINVAL;
+
+	ath9k_process_rssi(common, hw, skb, rx_stats);
+
+	rx_status->rate_idx = ath9k_process_rate(common, hw,
+						 rx_stats, rx_status, skb);
+	rx_status->mactime = ath9k_hw_extend_tsf(ah, rx_stats->rs_tstamp);
+	rx_status->band = hw->conf.channel->band;
+	rx_status->freq = hw->conf.channel->center_freq;
+	rx_status->noise = common->ani.noise_floor;
+	rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
+	rx_status->antenna = rx_stats->rs_antenna;
+	rx_status->flag |= RX_FLAG_TSFT;
+
+	return 0;
+}
+EXPORT_SYMBOL(ath9k_cmn_rx_skb_preprocess);
+
+void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
+				  struct sk_buff *skb,
+				  struct ath_rx_status *rx_stats,
+				  struct ieee80211_rx_status *rxs,
+				  bool decrypt_error)
+{
+	struct ath_hw *ah = common->ah;
+	struct ieee80211_hdr *hdr;
+	int hdrlen, padpos, padsize;
+	u8 keyix;
+	__le16 fc;
+
+	/* see if any padding is done by the hw and remove it */
+	hdr = (struct ieee80211_hdr *) skb->data;
+	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+	fc = hdr->frame_control;
+	padpos = ath9k_cmn_padpos(hdr->frame_control);
+
+	/* The MAC header is padded to have 32-bit boundary if the
+	 * packet payload is non-zero. The general calculation for
+	 * padsize would take into account odd header lengths:
+	 * padsize = (4 - padpos % 4) % 4; However, since only
+	 * even-length headers are used, padding can only be 0 or 2
+	 * bytes and we can optimize this a bit. In addition, we must
+	 * not try to remove padding from short control frames that do
+	 * not have payload. */
+	padsize = padpos & 3;
+	if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
+		memmove(skb->data + padsize, skb->data, padpos);
+		skb_pull(skb, padsize);
+	}
+
+	keyix = rx_stats->rs_keyix;
+
+	if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error) {
+		rxs->flag |= RX_FLAG_DECRYPTED;
+	} else if (ieee80211_has_protected(fc)
+		   && !decrypt_error && skb->len >= hdrlen + 4) {
+		keyix = skb->data[hdrlen + 3] >> 6;
+
+		if (test_bit(keyix, common->keymap))
+			rxs->flag |= RX_FLAG_DECRYPTED;
+	}
+	if (ah->sw_mgmt_crypto &&
+	    (rxs->flag & RX_FLAG_DECRYPTED) &&
+	    ieee80211_is_mgmt(fc))
+		/* Use software decrypt for management frames. */
+		rxs->flag &= ~RX_FLAG_DECRYPTED;
+}
+EXPORT_SYMBOL(ath9k_cmn_rx_skb_postprocess);
+
+int ath9k_cmn_padpos(__le16 frame_control)
+{
+	int padpos = 24;
+	if (ieee80211_has_a4(frame_control)) {
+		padpos += ETH_ALEN;
+	}
+	if (ieee80211_is_data_qos(frame_control)) {
+		padpos += IEEE80211_QOS_CTL_LEN;
+	}
+
+	return padpos;
+}
+EXPORT_SYMBOL(ath9k_cmn_padpos);
+
+static int __init ath9k_cmn_init(void)
+{
+	return 0;
+}
+module_init(ath9k_cmn_init);
+
+static void __exit ath9k_cmn_exit(void)
+{
+	return;
+}
+module_exit(ath9k_cmn_exit);
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
new file mode 100644
index 000000000000..042999c2fe9c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <net/mac80211.h>
+
+#include "../ath.h"
+#include "../debug.h"
+
+#include "hw.h"
+
+/* Common header for Atheros 802.11n base driver cores */
+
+#define WME_NUM_TID             16
+#define WME_BA_BMP_SIZE         64
+#define WME_MAX_BA              WME_BA_BMP_SIZE
+#define ATH_TID_MAX_BUFS        (2 * WME_MAX_BA)
+
+#define WME_AC_BE   0
+#define WME_AC_BK   1
+#define WME_AC_VI   2
+#define WME_AC_VO   3
+#define WME_NUM_AC  4
+
+#define ATH_RSSI_DUMMY_MARKER   0x127
+#define ATH_RSSI_LPF_LEN 		10
+#define RSSI_LPF_THRESHOLD		-20
+#define ATH_RSSI_EP_MULTIPLIER     (1<<7)
+#define ATH_EP_MUL(x, mul)         ((x) * (mul))
+#define ATH_RSSI_IN(x)             (ATH_EP_MUL((x), ATH_RSSI_EP_MULTIPLIER))
+#define ATH_LPF_RSSI(x, y, len) \
+    ((x != ATH_RSSI_DUMMY_MARKER) ? (((x) * ((len) - 1) + (y)) / (len)) : (y))
+#define ATH_RSSI_LPF(x, y) do {                     			\
+    if ((y) >= RSSI_LPF_THRESHOLD)                         		\
+	x = ATH_LPF_RSSI((x), ATH_RSSI_IN((y)), ATH_RSSI_LPF_LEN);  	\
+} while (0)
+#define ATH_EP_RND(x, mul) 						\
+	((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
+
+struct ath_atx_ac {
+	int sched;
+	int qnum;
+	struct list_head list;
+	struct list_head tid_q;
+};
+
+struct ath_buf_state {
+	int bfs_nframes;
+	u16 bfs_al;
+	u16 bfs_frmlen;
+	int bfs_seqno;
+	int bfs_tidno;
+	int bfs_retries;
+	u8 bf_type;
+	u32 bfs_keyix;
+	enum ath9k_key_type bfs_keytype;
+};
+
+struct ath_buf {
+	struct list_head list;
+	struct ath_buf *bf_lastbf;	/* last buf of this unit (a frame or
+					   an aggregate) */
+	struct ath_buf *bf_next;	/* next subframe in the aggregate */
+	struct sk_buff *bf_mpdu;	/* enclosing frame structure */
+	struct ath_desc *bf_desc;	/* virtual addr of desc */
+	dma_addr_t bf_daddr;		/* physical addr of desc */
+	dma_addr_t bf_buf_addr;		/* physical addr of data buffer */
+	bool bf_stale;
+	bool bf_isnullfunc;
+	u16 bf_flags;
+	struct ath_buf_state bf_state;
+	dma_addr_t bf_dmacontext;
+	struct ath_wiphy *aphy;
+};
+
+struct ath_atx_tid {
+	struct list_head list;
+	struct list_head buf_q;
+	struct ath_node *an;
+	struct ath_atx_ac *ac;
+	struct ath_buf *tx_buf[ATH_TID_MAX_BUFS];
+	u16 seq_start;
+	u16 seq_next;
+	u16 baw_size;
+	int tidno;
+	int baw_head;   /* first un-acked tx buffer */
+	int baw_tail;   /* next unused tx buffer slot */
+	int sched;
+	int paused;
+	u8 state;
+};
+
+struct ath_node {
+	struct ath_common *common;
+	struct ath_atx_tid tid[WME_NUM_TID];
+	struct ath_atx_ac ac[WME_NUM_AC];
+	u16 maxampdu;
+	u8 mpdudensity;
+	int last_rssi;
+};
+
+int ath9k_cmn_rx_skb_preprocess(struct ath_common *common,
+				struct ieee80211_hw *hw,
+				struct sk_buff *skb,
+				struct ath_rx_status *rx_stats,
+				struct ieee80211_rx_status *rx_status,
+				bool *decrypt_error);
+
+void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
+				  struct sk_buff *skb,
+				  struct ath_rx_status *rx_stats,
+				  struct ieee80211_rx_status *rxs,
+				  bool decrypt_error);
+
+int ath9k_cmn_padpos(__le16 frame_control);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 2be4c2252047..b66f72dbf7b9 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -18,40 +18,30 @@
 
 #include "ath9k.h"
 
-static unsigned int ath9k_debug = DBG_DEFAULT;
-module_param_named(debug, ath9k_debug, uint, 0);
+#define REG_WRITE_D(_ah, _reg, _val) \
+	ath9k_hw_common(_ah)->ops->write((_ah), (_val), (_reg))
+#define REG_READ_D(_ah, _reg) \
+	ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
 
 static struct dentry *ath9k_debugfs_root;
 
-void DPRINTF(struct ath_softc *sc, int dbg_mask, const char *fmt, ...)
-{
-	if (!sc)
-		return;
-
-	if (sc->debug.debug_mask & dbg_mask) {
-		va_list args;
-
-		va_start(args, fmt);
-		printk(KERN_DEBUG "ath9k: ");
-		vprintk(fmt, args);
-		va_end(args);
-	}
-}
-
 static int ath9k_debugfs_open(struct inode *inode, struct file *file)
 {
 	file->private_data = inode->i_private;
 	return 0;
 }
 
+#ifdef CONFIG_ATH_DEBUG
+
 static ssize_t read_file_debug(struct file *file, char __user *user_buf,
 			     size_t count, loff_t *ppos)
 {
 	struct ath_softc *sc = file->private_data;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	char buf[32];
 	unsigned int len;
 
-	len = snprintf(buf, sizeof(buf), "0x%08x\n", sc->debug.debug_mask);
+	len = snprintf(buf, sizeof(buf), "0x%08x\n", common->debug_mask);
 	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
 
@@ -59,6 +49,7 @@ static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
 			     size_t count, loff_t *ppos)
 {
 	struct ath_softc *sc = file->private_data;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	unsigned long mask;
 	char buf[32];
 	ssize_t len;
@@ -71,7 +62,7 @@ static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
 	if (strict_strtoul(buf, 0, &mask))
 		return -EINVAL;
 
-	sc->debug.debug_mask = mask;
+	common->debug_mask = mask;
 	return count;
 }
 
@@ -82,6 +73,8 @@ static const struct file_operations fops_debug = {
 	.owner = THIS_MODULE
 };
 
+#endif
+
 static ssize_t read_file_dma(struct file *file, char __user *user_buf,
 			     size_t count, loff_t *ppos)
 {
@@ -95,7 +88,7 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
 
 	ath9k_ps_wakeup(sc);
 
-	REG_WRITE(ah, AR_MACMISC,
+	REG_WRITE_D(ah, AR_MACMISC,
 		  ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
 		   (AR_MACMISC_MISC_OBS_BUS_1 <<
 		    AR_MACMISC_MISC_OBS_BUS_MSB_S)));
@@ -107,7 +100,7 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
 		if (i % 4 == 0)
 			len += snprintf(buf + len, sizeof(buf) - len, "\n");
 
-		val[i] = REG_READ(ah, AR_DMADBG_0 + (i * sizeof(u32)));
+		val[i] = REG_READ_D(ah, AR_DMADBG_0 + (i * sizeof(u32)));
 		len += snprintf(buf + len, sizeof(buf) - len, "%d: %08x ",
 				i, val[i]);
 	}
@@ -157,9 +150,9 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
 		(val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
 
 	len += snprintf(buf + len, sizeof(buf) - len, "pcu observe: 0x%x \n",
-			REG_READ(ah, AR_OBS_BUS_1));
+			REG_READ_D(ah, AR_OBS_BUS_1));
 	len += snprintf(buf + len, sizeof(buf) - len,
-			"AR_CR: 0x%x \n", REG_READ(ah, AR_CR));
+			"AR_CR: 0x%x \n", REG_READ_D(ah, AR_CR));
 
 	ath9k_ps_restore(sc);
 
@@ -266,18 +259,11 @@ static const struct file_operations fops_interrupt = {
 	.owner = THIS_MODULE
 };
 
-void ath_debug_stat_rc(struct ath_softc *sc, struct sk_buff *skb)
+void ath_debug_stat_rc(struct ath_softc *sc, int final_rate)
 {
-	struct ath_tx_info_priv *tx_info_priv = NULL;
-	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-	struct ieee80211_tx_rate *rates = tx_info->status.rates;
-	int final_ts_idx, idx;
 	struct ath_rc_stats *stats;
 
-	tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
-	final_ts_idx = tx_info_priv->tx.ts_rateindex;
-	idx = rates[final_ts_idx].idx;
-	stats = &sc->debug.stats.rcstats[idx];
+	stats = &sc->debug.stats.rcstats[final_rate];
 	stats->success++;
 }
 
@@ -376,12 +362,12 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
 				aphy->chan_idx, aphy->chan_is_ht);
 	}
 
-	put_unaligned_le32(REG_READ(sc->sc_ah, AR_STA_ID0), addr);
-	put_unaligned_le16(REG_READ(sc->sc_ah, AR_STA_ID1) & 0xffff, addr + 4);
+	put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_STA_ID0), addr);
+	put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_STA_ID1) & 0xffff, addr + 4);
 	len += snprintf(buf + len, sizeof(buf) - len,
 			"addr: %pM\n", addr);
-	put_unaligned_le32(REG_READ(sc->sc_ah, AR_BSSMSKL), addr);
-	put_unaligned_le16(REG_READ(sc->sc_ah, AR_BSSMSKU) & 0xffff, addr + 4);
+	put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_BSSMSKL), addr);
+	put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_BSSMSKU) & 0xffff, addr + 4);
 	len += snprintf(buf + len, sizeof(buf) - len,
 			"addrmask: %pM\n", addr);
 
@@ -568,9 +554,10 @@ static const struct file_operations fops_xmit = {
 	.owner = THIS_MODULE
 };
 
-int ath9k_init_debug(struct ath_softc *sc)
+int ath9k_init_debug(struct ath_hw *ah)
 {
-	sc->debug.debug_mask = ath9k_debug;
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath_softc *sc = (struct ath_softc *) common->priv;
 
 	if (!ath9k_debugfs_root)
 		return -ENOENT;
@@ -580,10 +567,12 @@ int ath9k_init_debug(struct ath_softc *sc)
 	if (!sc->debug.debugfs_phy)
 		goto err;
 
+#ifdef CONFIG_ATH_DEBUG
 	sc->debug.debugfs_debug = debugfs_create_file("debug",
 		S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_debug);
 	if (!sc->debug.debugfs_debug)
 		goto err;
+#endif
 
 	sc->debug.debugfs_dma = debugfs_create_file("dma", S_IRUSR,
 				       sc->debug.debugfs_phy, sc, &fops_dma);
@@ -619,12 +608,15 @@ int ath9k_init_debug(struct ath_softc *sc)
 
 	return 0;
 err:
-	ath9k_exit_debug(sc);
+	ath9k_exit_debug(ah);
 	return -ENOMEM;
 }
 
-void ath9k_exit_debug(struct ath_softc *sc)
+void ath9k_exit_debug(struct ath_hw *ah)
 {
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath_softc *sc = (struct ath_softc *) common->priv;
+
 	debugfs_remove(sc->debug.debugfs_xmit);
 	debugfs_remove(sc->debug.debugfs_wiphy);
 	debugfs_remove(sc->debug.debugfs_rcstat);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 7241f4748338..536663e3ee11 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -17,36 +17,19 @@
 #ifndef DEBUG_H
 #define DEBUG_H
 
-enum ATH_DEBUG {
-	ATH_DBG_RESET		= 0x00000001,
-	ATH_DBG_QUEUE		= 0x00000002,
-	ATH_DBG_EEPROM		= 0x00000004,
-	ATH_DBG_CALIBRATE	= 0x00000008,
-	ATH_DBG_INTERRUPT	= 0x00000010,
-	ATH_DBG_REGULATORY	= 0x00000020,
-	ATH_DBG_ANI		= 0x00000040,
-	ATH_DBG_XMIT		= 0x00000080,
-	ATH_DBG_BEACON		= 0x00000100,
-	ATH_DBG_CONFIG		= 0x00000200,
-	ATH_DBG_FATAL		= 0x00000400,
-	ATH_DBG_PS		= 0x00000800,
-	ATH_DBG_HWTIMER		= 0x00001000,
-	ATH_DBG_BTCOEX		= 0x00002000,
-	ATH_DBG_ANY		= 0xffffffff
-};
-
-#define DBG_DEFAULT (ATH_DBG_FATAL)
+#include "hw.h"
+#include "rc.h"
 
 struct ath_txq;
 struct ath_buf;
 
-#ifdef CONFIG_ATH9K_DEBUG
+#ifdef CONFIG_ATH9K_DEBUGFS
 #define TX_STAT_INC(q, c) sc->debug.stats.txstats[q].c++
 #else
 #define TX_STAT_INC(q, c) do { } while (0)
 #endif
 
-#ifdef CONFIG_ATH9K_DEBUG
+#ifdef CONFIG_ATH9K_DEBUGFS
 
 /**
  * struct ath_interrupt_stats - Contains statistics about interrupts
@@ -140,7 +123,6 @@ struct ath_stats {
 };
 
 struct ath9k_debug {
-	int debug_mask;
 	struct dentry *debugfs_phy;
 	struct dentry *debugfs_debug;
 	struct dentry *debugfs_dma;
@@ -151,13 +133,13 @@ struct ath9k_debug {
 	struct ath_stats stats;
 };
 
-void DPRINTF(struct ath_softc *sc, int dbg_mask, const char *fmt, ...);
-int ath9k_init_debug(struct ath_softc *sc);
-void ath9k_exit_debug(struct ath_softc *sc);
+int ath9k_init_debug(struct ath_hw *ah);
+void ath9k_exit_debug(struct ath_hw *ah);
+
 int ath9k_debug_create_root(void);
 void ath9k_debug_remove_root(void);
 void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
-void ath_debug_stat_rc(struct ath_softc *sc, struct sk_buff *skb);
+void ath_debug_stat_rc(struct ath_softc *sc, int final_rate);
 void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
 		       struct ath_buf *bf);
 void ath_debug_stat_retries(struct ath_softc *sc, int rix,
@@ -165,17 +147,12 @@ void ath_debug_stat_retries(struct ath_softc *sc, int rix,
 
 #else
 
-static inline void DPRINTF(struct ath_softc *sc, int dbg_mask,
-			   const char *fmt, ...)
-{
-}
-
-static inline int ath9k_init_debug(struct ath_softc *sc)
+static inline int ath9k_init_debug(struct ath_hw *ah)
 {
 	return 0;
 }
 
-static inline void ath9k_exit_debug(struct ath_softc *sc)
+static inline void ath9k_exit_debug(struct ath_hw *ah)
 {
 }
 
@@ -194,7 +171,7 @@ static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
 }
 
 static inline void ath_debug_stat_rc(struct ath_softc *sc,
-				     struct sk_buff *skb)
+				     int final_rate)
 {
 }
 
@@ -209,6 +186,6 @@ static inline void ath_debug_stat_retries(struct ath_softc *sc, int rix,
 {
 }
 
-#endif /* CONFIG_ATH9K_DEBUG */
+#endif /* CONFIG_ATH9K_DEBUGFS */
 
 #endif /* DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index b6e52d0f8c48..dacaae934148 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -14,7 +14,7 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-#include "ath9k.h"
+#include "hw.h"
 
 static inline u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz)
 {
@@ -83,11 +83,9 @@ bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize,
 	return false;
 }
 
-bool ath9k_hw_nvram_read(struct ath_hw *ah, u32 off, u16 *data)
+bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data)
 {
-	struct ath_softc *sc = ah->ah_sc;
-
-	return sc->bus_ops->eeprom_read(ah, off, data);
+	return common->bus_ops->eeprom_read(common, off, data);
 }
 
 void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 4fe33f7eee9d..2f2993b50e2f 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -17,6 +17,7 @@
 #ifndef EEPROM_H
 #define EEPROM_H
 
+#include "../ath.h"
 #include <net/cfg80211.h>
 
 #define AH_USE_EEPROM   0x1
@@ -133,6 +134,7 @@
 #define AR5416_EEP_MINOR_VER_17      0x11
 #define AR5416_EEP_MINOR_VER_19      0x13
 #define AR5416_EEP_MINOR_VER_20      0x14
+#define AR5416_EEP_MINOR_VER_21      0x15
 #define AR5416_EEP_MINOR_VER_22      0x16
 
 #define AR5416_NUM_5G_CAL_PIERS         8
@@ -153,7 +155,7 @@
 #define AR5416_BCHAN_UNUSED             0xFF
 #define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64
 #define AR5416_MAX_CHAINS               3
-#define AR5416_PWR_TABLE_OFFSET         -5
+#define AR5416_PWR_TABLE_OFFSET_DB     -5
 
 /* Rx gain type values */
 #define AR5416_EEP_RXGAIN_23DB_BACKOFF     0
@@ -301,7 +303,7 @@ struct base_eep_header {
 	u8 txGainType;
 	u8 rcChainMask;
 	u8 desiredScaleCCK;
-	u8 power_table_offset;
+	u8 pwr_table_offset;
 	u8 frac_n_5g;
 	u8 futureBase_3[21];
 } __packed;
@@ -638,6 +640,7 @@ struct ar9287_eeprom {
 } __packed;
 
 enum reg_ext_bitmap {
+	REG_EXT_FCC_MIDBAND = 0,
 	REG_EXT_JAPAN_MIDBAND = 1,
 	REG_EXT_FCC_DFS_HT40 = 2,
 	REG_EXT_JAPAN_NONDFS_HT40 = 3,
@@ -684,7 +687,7 @@ int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight,
 			     int16_t targetRight);
 bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize,
 				    u16 *indexL, u16 *indexR);
-bool ath9k_hw_nvram_read(struct ath_hw *ah, u32 off, u16 *data);
+bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data);
 void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList,
 			     u8 *pVpdList, u16 numIntercepts,
 			     u8 *pRetVpdList);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index b8eca7be5f3a..68db16690abf 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -14,7 +14,7 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-#include "ath9k.h"
+#include "hw.h"
 
 static int ath9k_hw_4k_get_eeprom_ver(struct ath_hw *ah)
 {
@@ -29,20 +29,21 @@ static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah)
 static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
 {
 #define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
+	struct ath_common *common = ath9k_hw_common(ah);
 	u16 *eep_data = (u16 *)&ah->eeprom.map4k;
 	int addr, eep_start_loc = 0;
 
 	eep_start_loc = 64;
 
 	if (!ath9k_hw_use_flash(ah)) {
-		DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
-			"Reading from EEPROM, not flash\n");
+		ath_print(common, ATH_DBG_EEPROM,
+			  "Reading from EEPROM, not flash\n");
 	}
 
 	for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
-		if (!ath9k_hw_nvram_read(ah, addr + eep_start_loc, eep_data)) {
-			DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
-			       "Unable to read eeprom region \n");
+		if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) {
+			ath_print(common, ATH_DBG_EEPROM,
+				  "Unable to read eeprom region \n");
 			return false;
 		}
 		eep_data++;
@@ -55,6 +56,7 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
 static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
 {
 #define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
+	struct ath_common *common = ath9k_hw_common(ah);
 	struct ar5416_eeprom_4k *eep =
 		(struct ar5416_eeprom_4k *) &ah->eeprom.map4k;
 	u16 *eepdata, temp, magic, magic2;
@@ -64,15 +66,15 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
 
 
 	if (!ath9k_hw_use_flash(ah)) {
-		if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET,
+		if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET,
 					 &magic)) {
-			DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-				"Reading Magic # failed\n");
+			ath_print(common, ATH_DBG_FATAL,
+				  "Reading Magic # failed\n");
 			return false;
 		}
 
-		DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
-			"Read Magic = 0x%04X\n", magic);
+		ath_print(common, ATH_DBG_EEPROM,
+			  "Read Magic = 0x%04X\n", magic);
 
 		if (magic != AR5416_EEPROM_MAGIC) {
 			magic2 = swab16(magic);
@@ -87,16 +89,16 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
 					eepdata++;
 				}
 			} else {
-				DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-					"Invalid EEPROM Magic. "
-					"endianness mismatch.\n");
+				ath_print(common, ATH_DBG_FATAL,
+					  "Invalid EEPROM Magic. "
+					  "endianness mismatch.\n");
 				return -EINVAL;
 			}
 		}
 	}
 
-	DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "need_swap = %s.\n",
-		need_swap ? "True" : "False");
+	ath_print(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
+		  need_swap ? "True" : "False");
 
 	if (need_swap)
 		el = swab16(ah->eeprom.map4k.baseEepHeader.length);
@@ -117,8 +119,8 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
 		u32 integer;
 		u16 word;
 
-		DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
-			"EEPROM Endianness is not native.. Changing\n");
+		ath_print(common, ATH_DBG_EEPROM,
+			  "EEPROM Endianness is not native.. Changing\n");
 
 		word = swab16(eep->baseEepHeader.length);
 		eep->baseEepHeader.length = word;
@@ -160,9 +162,9 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
 
 	if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER ||
 	    ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) {
-		DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-			"Bad EEPROM checksum 0x%x or revision 0x%04x\n",
-			sum, ah->eep_ops->get_eeprom_ver(ah));
+		ath_print(common, ATH_DBG_FATAL,
+			  "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
+			  sum, ah->eep_ops->get_eeprom_ver(ah));
 		return -EINVAL;
 	}
 
@@ -208,6 +210,8 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah,
 		return pBase->rxMask;
 	case EEP_FRAC_N_5G:
 		return 0;
+	case EEP_PWR_TABLE_OFFSET:
+		return AR5416_PWR_TABLE_OFFSET_DB;
 	default:
 		return 0;
 	}
@@ -385,6 +389,7 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
 				  struct ath9k_channel *chan,
 				  int16_t *pTxPowerIndexOffset)
 {
+	struct ath_common *common = ath9k_hw_common(ah);
 	struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k;
 	struct cal_data_per_freq_4k *pRawDataset;
 	u8 *pCalBChans = NULL;
@@ -470,21 +475,21 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
 					((pdadcValues[4 * j + 3] & 0xFF) << 24);
 				REG_WRITE(ah, regOffset, reg32);
 
-				DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
-					"PDADC (%d,%4x): %4.4x %8.8x\n",
-					i, regChainOffset, regOffset,
-					reg32);
-				DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
-					"PDADC: Chain %d | "
-					"PDADC %3d Value %3d | "
-					"PDADC %3d Value %3d | "
-					"PDADC %3d Value %3d | "
-					"PDADC %3d Value %3d |\n",
-					i, 4 * j, pdadcValues[4 * j],
-					4 * j + 1, pdadcValues[4 * j + 1],
-					4 * j + 2, pdadcValues[4 * j + 2],
-					4 * j + 3,
-					pdadcValues[4 * j + 3]);
+				ath_print(common, ATH_DBG_EEPROM,
+					  "PDADC (%d,%4x): %4.4x %8.8x\n",
+					  i, regChainOffset, regOffset,
+					  reg32);
+				ath_print(common, ATH_DBG_EEPROM,
+					  "PDADC: Chain %d | "
+					  "PDADC %3d Value %3d | "
+					  "PDADC %3d Value %3d | "
+					  "PDADC %3d Value %3d | "
+					  "PDADC %3d Value %3d |\n",
+					  i, 4 * j, pdadcValues[4 * j],
+					  4 * j + 1, pdadcValues[4 * j + 1],
+					  4 * j + 2, pdadcValues[4 * j + 2],
+					  4 * j + 3,
+					  pdadcValues[4 * j + 3]);
 
 				regOffset += 4;
 			}
@@ -750,7 +755,7 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
 
 	if (AR_SREV_9280_10_OR_LATER(ah)) {
 		for (i = 0; i < Ar5416RateSize; i++)
-			ratesArray[i] -= AR5416_PWR_TABLE_OFFSET * 2;
+			ratesArray[i] -= AR5416_PWR_TABLE_OFFSET_DB * 2;
 	}
 
 	/* OFDM power per rate */
@@ -1107,6 +1112,10 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
 
 	REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON,
 		      pModal->txEndToRxOn);
+
+	if (AR_SREV_9271_10(ah))
+		REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON,
+			      pModal->txEndToRxOn);
 	REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62,
 		      pModal->thresh62);
 	REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0, AR_PHY_EXT_CCA0_THRESH62,
@@ -1148,20 +1157,21 @@ static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
 {
 #define EEP_MAP4K_SPURCHAN \
 	(ah->eeprom.map4k.modalHeader.spurChans[i].spurChan)
+	struct ath_common *common = ath9k_hw_common(ah);
 
 	u16 spur_val = AR_NO_SPUR;
 
-	DPRINTF(ah->ah_sc, ATH_DBG_ANI,
-		"Getting spur idx %d is2Ghz. %d val %x\n",
-		i, is2GHz, ah->config.spurchans[i][is2GHz]);
+	ath_print(common, ATH_DBG_ANI,
+		  "Getting spur idx %d is2Ghz. %d val %x\n",
+		  i, is2GHz, ah->config.spurchans[i][is2GHz]);
 
 	switch (ah->config.spurmode) {
 	case SPUR_DISABLE:
 		break;
 	case SPUR_ENABLE_IOCTL:
 		spur_val = ah->config.spurchans[i][is2GHz];
-		DPRINTF(ah->ah_sc, ATH_DBG_ANI,
-			"Getting spur val from new loc. %d\n", spur_val);
+		ath_print(common, ATH_DBG_ANI,
+			  "Getting spur val from new loc. %d\n", spur_val);
 		break;
 	case SPUR_ENABLE_EEPROM:
 		spur_val = EEP_MAP4K_SPURCHAN;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index c20c21a79b21..839d05a1df29 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -14,7 +14,7 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-#include "ath9k.h"
+#include "hw.h"
 
 static int ath9k_hw_AR9287_get_eeprom_ver(struct ath_hw *ah)
 {
@@ -29,20 +29,22 @@ static int ath9k_hw_AR9287_get_eeprom_rev(struct ath_hw *ah)
 static bool ath9k_hw_AR9287_fill_eeprom(struct ath_hw *ah)
 {
 	struct ar9287_eeprom *eep = &ah->eeprom.map9287;
+	struct ath_common *common = ath9k_hw_common(ah);
 	u16 *eep_data;
 	int addr, eep_start_loc = AR9287_EEP_START_LOC;
 	eep_data = (u16 *)eep;
 
 	if (!ath9k_hw_use_flash(ah)) {
-		DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
-			"Reading from EEPROM, not flash\n");
+		ath_print(common, ATH_DBG_EEPROM,
+			  "Reading from EEPROM, not flash\n");
 	}
 
 	for (addr = 0; addr < sizeof(struct ar9287_eeprom) / sizeof(u16);
 			addr++)	{
-		if (!ath9k_hw_nvram_read(ah, addr + eep_start_loc, eep_data)) {
-			DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
-				"Unable to read eeprom region \n");
+		if (!ath9k_hw_nvram_read(common,
+					 addr + eep_start_loc, eep_data)) {
+			ath_print(common, ATH_DBG_EEPROM,
+				  "Unable to read eeprom region \n");
 			return false;
 		}
 		eep_data++;
@@ -57,17 +59,18 @@ static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah)
 	int i, addr;
 	bool need_swap = false;
 	struct ar9287_eeprom *eep = &ah->eeprom.map9287;
+	struct ath_common *common = ath9k_hw_common(ah);
 
 	if (!ath9k_hw_use_flash(ah)) {
-		if (!ath9k_hw_nvram_read
-		    (ah, AR5416_EEPROM_MAGIC_OFFSET, &magic)) {
-			DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-				"Reading Magic # failed\n");
+		if (!ath9k_hw_nvram_read(common,
+					 AR5416_EEPROM_MAGIC_OFFSET, &magic)) {
+			ath_print(common, ATH_DBG_FATAL,
+				  "Reading Magic # failed\n");
 			return false;
 		}
 
-		DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
-				"Read Magic = 0x%04X\n", magic);
+		ath_print(common, ATH_DBG_EEPROM,
+			  "Read Magic = 0x%04X\n", magic);
 		if (magic != AR5416_EEPROM_MAGIC) {
 			magic2 = swab16(magic);
 
@@ -83,15 +86,15 @@ static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah)
 					eepdata++;
 				}
 			} else {
-				DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-					"Invalid EEPROM Magic. "
-					"endianness mismatch.\n");
+				ath_print(common, ATH_DBG_FATAL,
+					  "Invalid EEPROM Magic. "
+					  "endianness mismatch.\n");
 				return -EINVAL;
 			}
 		}
 	}
-	DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "need_swap = %s.\n", need_swap ?
-		"True" : "False");
+	ath_print(common, ATH_DBG_EEPROM, "need_swap = %s.\n", need_swap ?
+		  "True" : "False");
 
 	if (need_swap)
 		el = swab16(ah->eeprom.map9287.baseEepHeader.length);
@@ -148,9 +151,9 @@ static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah)
 
 	if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR9287_EEP_VER
 	    || ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) {
-		DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-			"Bad EEPROM checksum 0x%x or revision 0x%04x\n",
-			 sum, ah->eep_ops->get_eeprom_ver(ah));
+		ath_print(common, ATH_DBG_FATAL,
+			  "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
+			   sum, ah->eep_ops->get_eeprom_ver(ah));
 		return -EINVAL;
 	}
 
@@ -436,6 +439,7 @@ static void ath9k_hw_set_AR9287_power_cal_table(struct ath_hw *ah,
 						struct ath9k_channel *chan,
 						int16_t *pTxPowerIndexOffset)
 {
+	struct ath_common *common = ath9k_hw_common(ah);
 	struct cal_data_per_freq_ar9287 *pRawDataset;
 	struct cal_data_op_loop_ar9287 *pRawDatasetOpenLoop;
 	u8  *pCalBChans = NULL;
@@ -564,24 +568,25 @@ static void ath9k_hw_set_AR9287_power_cal_table(struct ath_hw *ah,
 						  & 0xFF) << 24) ;
 					REG_WRITE(ah, regOffset, reg32);
 
-					DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
-						"PDADC (%d,%4x): %4.4x %8.8x\n",
-						i, regChainOffset, regOffset,
-						reg32);
-
-					DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
-						"PDADC: Chain %d | "
-						"PDADC %3d Value %3d | "
-						"PDADC %3d Value %3d | "
-						"PDADC %3d Value %3d | "
-						"PDADC %3d Value %3d |\n",
-						i, 4 * j, pdadcValues[4 * j],
-						4 * j + 1,
-						pdadcValues[4 * j + 1],
-						4 * j + 2,
-						pdadcValues[4 * j + 2],
-						4 * j + 3,
-						pdadcValues[4 * j + 3]);
+					ath_print(common, ATH_DBG_EEPROM,
+						  "PDADC (%d,%4x): %4.4x "
+						  "%8.8x\n",
+						  i, regChainOffset, regOffset,
+						  reg32);
+
+					ath_print(common, ATH_DBG_EEPROM,
+						  "PDADC: Chain %d | "
+						  "PDADC %3d Value %3d | "
+						  "PDADC %3d Value %3d | "
+						  "PDADC %3d Value %3d | "
+						  "PDADC %3d Value %3d |\n",
+						  i, 4 * j, pdadcValues[4 * j],
+						  4 * j + 1,
+						  pdadcValues[4 * j + 1],
+						  4 * j + 2,
+						  pdadcValues[4 * j + 2],
+						  4 * j + 3,
+						  pdadcValues[4 * j + 3]);
 
 					regOffset += 4;
 				}
@@ -831,6 +836,7 @@ static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah,
 {
 #define INCREASE_MAXPOW_BY_TWO_CHAIN     6
 #define INCREASE_MAXPOW_BY_THREE_CHAIN   10
+	struct ath_common *common = ath9k_hw_common(ah);
 	struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
 	struct ar9287_eeprom *pEepData = &ah->eeprom.map9287;
 	struct modal_eep_ar9287_header *pModal = &pEepData->modalHeader;
@@ -966,8 +972,8 @@ static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah,
 			INCREASE_MAXPOW_BY_THREE_CHAIN;
 		break;
 	default:
-		DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
-			"Invalid chainmask configuration\n");
+		ath_print(common, ATH_DBG_EEPROM,
+			  "Invalid chainmask configuration\n");
 		break;
 	}
 }
@@ -1138,19 +1144,20 @@ static u16 ath9k_hw_AR9287_get_spur_channel(struct ath_hw *ah,
 {
 #define EEP_MAP9287_SPURCHAN \
 	(ah->eeprom.map9287.modalHeader.spurChans[i].spurChan)
+	struct ath_common *common = ath9k_hw_common(ah);
 	u16 spur_val = AR_NO_SPUR;
 
-	DPRINTF(ah->ah_sc, ATH_DBG_ANI,
-		"Getting spur idx %d is2Ghz. %d val %x\n",
-		i, is2GHz, ah->config.spurchans[i][is2GHz]);
+	ath_print(common, ATH_DBG_ANI,
+		  "Getting spur idx %d is2Ghz. %d val %x\n",
+		  i, is2GHz, ah->config.spurchans[i][is2GHz]);
 
 	switch (ah->config.spurmode) {
 	case SPUR_DISABLE:
 		break;
 	case SPUR_ENABLE_IOCTL:
 		spur_val = ah->config.spurchans[i][is2GHz];
-		DPRINTF(ah->ah_sc, ATH_DBG_ANI,
-		       "Getting spur val from new loc. %d\n", spur_val);
+		ath_print(common, ATH_DBG_ANI,
+			  "Getting spur val from new loc. %d\n", spur_val);
 		break;
 	case SPUR_ENABLE_EEPROM:
 		spur_val = EEP_MAP9287_SPURCHAN;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 4071fc91da0a..404a0341242c 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -14,7 +14,7 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-#include "ath9k.h"
+#include "hw.h"
 
 static void ath9k_get_txgain_index(struct ath_hw *ah,
 		struct ath9k_channel *chan,
@@ -89,14 +89,15 @@ static int ath9k_hw_def_get_eeprom_rev(struct ath_hw *ah)
 static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
 {
 #define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16))
+	struct ath_common *common = ath9k_hw_common(ah);
 	u16 *eep_data = (u16 *)&ah->eeprom.def;
 	int addr, ar5416_eep_start_loc = 0x100;
 
 	for (addr = 0; addr < SIZE_EEPROM_DEF; addr++) {
-		if (!ath9k_hw_nvram_read(ah, addr + ar5416_eep_start_loc,
+		if (!ath9k_hw_nvram_read(common, addr + ar5416_eep_start_loc,
 					 eep_data)) {
-			DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-				"Unable to read eeprom region\n");
+			ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+				  "Unable to read eeprom region\n");
 			return false;
 		}
 		eep_data++;
@@ -109,19 +110,20 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
 {
 	struct ar5416_eeprom_def *eep =
 		(struct ar5416_eeprom_def *) &ah->eeprom.def;
+	struct ath_common *common = ath9k_hw_common(ah);
 	u16 *eepdata, temp, magic, magic2;
 	u32 sum = 0, el;
 	bool need_swap = false;
 	int i, addr, size;
 
-	if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, &magic)) {
-		DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Reading Magic # failed\n");
+	if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET, &magic)) {
+		ath_print(common, ATH_DBG_FATAL, "Reading Magic # failed\n");
 		return false;
 	}
 
 	if (!ath9k_hw_use_flash(ah)) {
-		DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
-			"Read Magic = 0x%04X\n", magic);
+		ath_print(common, ATH_DBG_EEPROM,
+			  "Read Magic = 0x%04X\n", magic);
 
 		if (magic != AR5416_EEPROM_MAGIC) {
 			magic2 = swab16(magic);
@@ -137,16 +139,16 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
 					eepdata++;
 				}
 			} else {
-				DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-					"Invalid EEPROM Magic. "
-					"Endianness mismatch.\n");
+				ath_print(common, ATH_DBG_FATAL,
+					  "Invalid EEPROM Magic. "
+					  "Endianness mismatch.\n");
 				return -EINVAL;
 			}
 		}
 	}
 
-	DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "need_swap = %s.\n",
-		need_swap ? "True" : "False");
+	ath_print(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
+		  need_swap ? "True" : "False");
 
 	if (need_swap)
 		el = swab16(ah->eeprom.def.baseEepHeader.length);
@@ -167,8 +169,8 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
 		u32 integer, j;
 		u16 word;
 
-		DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
-			"EEPROM Endianness is not native.. Changing.\n");
+		ath_print(common, ATH_DBG_EEPROM,
+			  "EEPROM Endianness is not native.. Changing.\n");
 
 		word = swab16(eep->baseEepHeader.length);
 		eep->baseEepHeader.length = word;
@@ -214,8 +216,8 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
 
 	if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER ||
 	    ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) {
-		DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-			"Bad EEPROM checksum 0x%x or revision 0x%04x\n",
+		ath_print(common, ATH_DBG_FATAL,
+			  "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
 			sum, ah->eep_ops->get_eeprom_ver(ah));
 		return -EINVAL;
 	}
@@ -289,6 +291,11 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
 			return pBase->frac_n_5g;
 		else
 			return 0;
+	case EEP_PWR_TABLE_OFFSET:
+		if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_21)
+			return pBase->pwr_table_offset;
+		else
+			return AR5416_PWR_TABLE_OFFSET_DB;
 	default:
 		return 0;
 	}
@@ -739,6 +746,76 @@ static void ath9k_hw_get_def_gain_boundaries_pdadcs(struct ath_hw *ah,
 	return;
 }
 
+static int16_t ath9k_change_gain_boundary_setting(struct ath_hw *ah,
+				u16 *gb,
+				u16 numXpdGain,
+				u16 pdGainOverlap_t2,
+				int8_t pwr_table_offset,
+				int16_t *diff)
+
+{
+	u16 k;
+
+	/* Prior to writing the boundaries or the pdadc vs. power table
+	 * into the chip registers the default starting point on the pdadc
+	 * vs. power table needs to be checked and the curve boundaries
+	 * adjusted accordingly
+	 */
+	if (AR_SREV_9280_20_OR_LATER(ah)) {
+		u16 gb_limit;
+
+		if (AR5416_PWR_TABLE_OFFSET_DB != pwr_table_offset) {
+			/* get the difference in dB */
+			*diff = (u16)(pwr_table_offset - AR5416_PWR_TABLE_OFFSET_DB);
+			/* get the number of half dB steps */
+			*diff *= 2;
+			/* change the original gain boundary settings
+			 * by the number of half dB steps
+			 */
+			for (k = 0; k < numXpdGain; k++)
+				gb[k] = (u16)(gb[k] - *diff);
+		}
+		/* Because of a hardware limitation, ensure the gain boundary
+		 * is not larger than (63 - overlap)
+		 */
+		gb_limit = (u16)(AR5416_MAX_RATE_POWER - pdGainOverlap_t2);
+
+		for (k = 0; k < numXpdGain; k++)
+			gb[k] = (u16)min(gb_limit, gb[k]);
+	}
+
+	return *diff;
+}
+
+static void ath9k_adjust_pdadc_values(struct ath_hw *ah,
+				      int8_t pwr_table_offset,
+				      int16_t diff,
+				      u8 *pdadcValues)
+{
+#define NUM_PDADC(diff) (AR5416_NUM_PDADC_VALUES - diff)
+	u16 k;
+
+	/* If this is a board that has a pwrTableOffset that differs from
+	 * the default AR5416_PWR_TABLE_OFFSET_DB then the start of the
+	 * pdadc vs pwr table needs to be adjusted prior to writing to the
+	 * chip.
+	 */
+	if (AR_SREV_9280_20_OR_LATER(ah)) {
+		if (AR5416_PWR_TABLE_OFFSET_DB != pwr_table_offset) {
+			/* shift the table to start at the new offset */
+			for (k = 0; k < (u16)NUM_PDADC(diff); k++ ) {
+				pdadcValues[k] = pdadcValues[k + diff];
+			}
+
+			/* fill the back of the table */
+			for (k = (u16)NUM_PDADC(diff); k < NUM_PDADC(0); k++) {
+				pdadcValues[k] = pdadcValues[NUM_PDADC(diff)];
+			}
+		}
+	}
+#undef NUM_PDADC
+}
+
 static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
 				  struct ath9k_channel *chan,
 				  int16_t *pTxPowerIndexOffset)
@@ -746,7 +823,7 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
 #define SM_PD_GAIN(x) SM(0x38, AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_##x)
 #define SM_PDGAIN_B(x, y) \
 		SM((gainBoundaries[x]), AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_##y)
-
+	struct ath_common *common = ath9k_hw_common(ah);
 	struct ar5416_eeprom_def *pEepData = &ah->eeprom.def;
 	struct cal_data_per_freq *pRawDataset;
 	u8 *pCalBChans = NULL;
@@ -754,15 +831,18 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
 	static u8 pdadcValues[AR5416_NUM_PDADC_VALUES];
 	u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK];
 	u16 numPiers, i, j;
-	int16_t tMinCalPower;
+	int16_t tMinCalPower, diff = 0;
 	u16 numXpdGain, xpdMask;
 	u16 xpdGainValues[AR5416_NUM_PD_GAINS] = { 0, 0, 0, 0 };
 	u32 reg32, regOffset, regChainOffset;
 	int16_t modalIdx;
+	int8_t pwr_table_offset;
 
 	modalIdx = IS_CHAN_2GHZ(chan) ? 1 : 0;
 	xpdMask = pEepData->modalHeader[modalIdx].xpdGain;
 
+	pwr_table_offset = ah->eep_ops->get_eeprom(ah, EEP_PWR_TABLE_OFFSET);
+
 	if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
 	    AR5416_EEP_MINOR_VER_2) {
 		pdGainOverlap_t2 =
@@ -842,6 +922,13 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
 							numXpdGain);
 			}
 
+			diff = ath9k_change_gain_boundary_setting(ah,
+							   gainBoundaries,
+							   numXpdGain,
+							   pdGainOverlap_t2,
+							   pwr_table_offset,
+							   &diff);
+
 			if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) {
 				if (OLC_FOR_AR9280_20_LATER) {
 					REG_WRITE(ah,
@@ -862,6 +949,10 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
 				}
 			}
 
+
+			ath9k_adjust_pdadc_values(ah, pwr_table_offset,
+						  diff, pdadcValues);
+
 			regOffset = AR_PHY_BASE + (672 << 2) + regChainOffset;
 			for (j = 0; j < 32; j++) {
 				reg32 = ((pdadcValues[4 * j + 0] & 0xFF) << 0) |
@@ -870,20 +961,20 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
 					((pdadcValues[4 * j + 3] & 0xFF) << 24);
 				REG_WRITE(ah, regOffset, reg32);
 
-				DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
-					"PDADC (%d,%4x): %4.4x %8.8x\n",
-					i, regChainOffset, regOffset,
-					reg32);
-				DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
-					"PDADC: Chain %d | PDADC %3d "
-					"Value %3d | PDADC %3d Value %3d | "
-					"PDADC %3d Value %3d | PDADC %3d "
-					"Value %3d |\n",
-					i, 4 * j, pdadcValues[4 * j],
-					4 * j + 1, pdadcValues[4 * j + 1],
-					4 * j + 2, pdadcValues[4 * j + 2],
-					4 * j + 3,
-					pdadcValues[4 * j + 3]);
+				ath_print(common, ATH_DBG_EEPROM,
+					  "PDADC (%d,%4x): %4.4x %8.8x\n",
+					  i, regChainOffset, regOffset,
+					  reg32);
+				ath_print(common, ATH_DBG_EEPROM,
+					  "PDADC: Chain %d | PDADC %3d "
+					  "Value %3d | PDADC %3d Value %3d | "
+					  "PDADC %3d Value %3d | PDADC %3d "
+					  "Value %3d |\n",
+					  i, 4 * j, pdadcValues[4 * j],
+					  4 * j + 1, pdadcValues[4 * j + 1],
+					  4 * j + 2, pdadcValues[4 * j + 2],
+					  4 * j + 3,
+					  pdadcValues[4 * j + 3]);
 
 				regOffset += 4;
 			}
@@ -1197,8 +1288,13 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
 	}
 
 	if (AR_SREV_9280_10_OR_LATER(ah)) {
-		for (i = 0; i < Ar5416RateSize; i++)
-			ratesArray[i] -= AR5416_PWR_TABLE_OFFSET * 2;
+		for (i = 0; i < Ar5416RateSize; i++) {
+			int8_t pwr_table_offset;
+
+			pwr_table_offset = ah->eep_ops->get_eeprom(ah,
+							EEP_PWR_TABLE_OFFSET);
+			ratesArray[i] -= pwr_table_offset * 2;
+		}
 	}
 
 	REG_WRITE(ah, AR_PHY_POWER_TX_RATE1,
@@ -1297,7 +1393,7 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
 
 	if (AR_SREV_9280_10_OR_LATER(ah))
 		regulatory->max_power_level =
-			ratesArray[i] + AR5416_PWR_TABLE_OFFSET * 2;
+			ratesArray[i] + AR5416_PWR_TABLE_OFFSET_DB * 2;
 	else
 		regulatory->max_power_level = ratesArray[i];
 
@@ -1311,8 +1407,8 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
 		regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
 		break;
 	default:
-		DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
-			"Invalid chainmask configuration\n");
+		ath_print(ath9k_hw_common(ah), ATH_DBG_EEPROM,
+			  "Invalid chainmask configuration\n");
 		break;
 	}
 }
@@ -1349,20 +1445,21 @@ static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
 {
 #define EEP_DEF_SPURCHAN \
 	(ah->eeprom.def.modalHeader[is2GHz].spurChans[i].spurChan)
+	struct ath_common *common = ath9k_hw_common(ah);
 
 	u16 spur_val = AR_NO_SPUR;
 
-	DPRINTF(ah->ah_sc, ATH_DBG_ANI,
-		"Getting spur idx %d is2Ghz. %d val %x\n",
-		i, is2GHz, ah->config.spurchans[i][is2GHz]);
+	ath_print(common, ATH_DBG_ANI,
+		  "Getting spur idx %d is2Ghz. %d val %x\n",
+		  i, is2GHz, ah->config.spurchans[i][is2GHz]);
 
 	switch (ah->config.spurmode) {
 	case SPUR_DISABLE:
 		break;
 	case SPUR_ENABLE_IOCTL:
 		spur_val = ah->config.spurchans[i][is2GHz];
-		DPRINTF(ah->ah_sc, ATH_DBG_ANI,
-			"Getting spur val from new loc. %d\n", spur_val);
+		ath_print(common, ATH_DBG_ANI,
+			  "Getting spur val from new loc. %d\n", spur_val);
 		break;
 	case SPUR_ENABLE_EEPROM:
 		spur_val = EEP_DEF_SPURCHAN;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index ca7694caf364..2ec61f08cfdb 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -16,9 +16,9 @@
 
 #include <linux/io.h>
 #include <asm/unaligned.h>
-#include <linux/pci.h>
 
-#include "ath9k.h"
+#include "hw.h"
+#include "rc.h"
 #include "initvals.h"
 
 #define ATH9K_CLOCK_RATE_CCK		22
@@ -26,13 +26,27 @@
 #define ATH9K_CLOCK_RATE_2GHZ_OFDM	44
 
 static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
-static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan,
-			      enum ath9k_ht_macmode macmode);
+static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan);
 static u32 ath9k_hw_ini_fixup(struct ath_hw *ah,
 			      struct ar5416_eeprom_def *pEepData,
 			      u32 reg, u32 value);
-static void ath9k_hw_9280_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan);
-static void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan);
+
+MODULE_AUTHOR("Atheros Communications");
+MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
+MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static int __init ath9k_init(void)
+{
+	return 0;
+}
+module_init(ath9k_init);
+
+static void __exit ath9k_exit(void)
+{
+	return;
+}
+module_exit(ath9k_exit);
 
 /********************/
 /* Helper Functions */
@@ -40,7 +54,7 @@ static void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan
 
 static u32 ath9k_hw_mac_usec(struct ath_hw *ah, u32 clks)
 {
-	struct ieee80211_conf *conf = &ah->ah_sc->hw->conf;
+	struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
 
 	if (!ah->curchan) /* should really check for CCK instead */
 		return clks / ATH9K_CLOCK_RATE_CCK;
@@ -52,7 +66,7 @@ static u32 ath9k_hw_mac_usec(struct ath_hw *ah, u32 clks)
 
 static u32 ath9k_hw_mac_to_usec(struct ath_hw *ah, u32 clks)
 {
-	struct ieee80211_conf *conf = &ah->ah_sc->hw->conf;
+	struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
 
 	if (conf_is_ht40(conf))
 		return ath9k_hw_mac_usec(ah, clks) / 2;
@@ -62,7 +76,7 @@ static u32 ath9k_hw_mac_to_usec(struct ath_hw *ah, u32 clks)
 
 static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs)
 {
-	struct ieee80211_conf *conf = &ah->ah_sc->hw->conf;
+	struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
 
 	if (!ah->curchan) /* should really check for CCK instead */
 		return usecs *ATH9K_CLOCK_RATE_CCK;
@@ -73,7 +87,7 @@ static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs)
 
 static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs)
 {
-	struct ieee80211_conf *conf = &ah->ah_sc->hw->conf;
+	struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
 
 	if (conf_is_ht40(conf))
 		return ath9k_hw_mac_clks(ah, usecs) * 2;
@@ -81,38 +95,6 @@ static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs)
 		return ath9k_hw_mac_clks(ah, usecs);
 }
 
-/*
- * Read and write, they both share the same lock. We do this to serialize
- * reads and writes on Atheros 802.11n PCI devices only. This is required
- * as the FIFO on these devices can only accept sanely 2 requests. After
- * that the device goes bananas. Serializing the reads/writes prevents this
- * from happening.
- */
-
-void ath9k_iowrite32(struct ath_hw *ah, u32 reg_offset, u32 val)
-{
-	if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
-		unsigned long flags;
-		spin_lock_irqsave(&ah->ah_sc->sc_serial_rw, flags);
-		iowrite32(val, ah->ah_sc->mem + reg_offset);
-		spin_unlock_irqrestore(&ah->ah_sc->sc_serial_rw, flags);
-	} else
-		iowrite32(val, ah->ah_sc->mem + reg_offset);
-}
-
-unsigned int ath9k_ioread32(struct ath_hw *ah, u32 reg_offset)
-{
-	u32 val;
-	if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
-		unsigned long flags;
-		spin_lock_irqsave(&ah->ah_sc->sc_serial_rw, flags);
-		val = ioread32(ah->ah_sc->mem + reg_offset);
-		spin_unlock_irqrestore(&ah->ah_sc->sc_serial_rw, flags);
-	} else
-		val = ioread32(ah->ah_sc->mem + reg_offset);
-	return val;
-}
-
 bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout)
 {
 	int i;
@@ -126,12 +108,13 @@ bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout)
 		udelay(AH_TIME_QUANTUM);
 	}
 
-	DPRINTF(ah->ah_sc, ATH_DBG_ANY,
-		"timeout (%d us) on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n",
-		timeout, reg, REG_READ(ah, reg), mask, val);
+	ath_print(ath9k_hw_common(ah), ATH_DBG_ANY,
+		  "timeout (%d us) on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n",
+		  timeout, reg, REG_READ(ah, reg), mask, val);
 
 	return false;
 }
+EXPORT_SYMBOL(ath9k_hw_wait);
 
 u32 ath9k_hw_reverse_bits(u32 val, u32 n)
 {
@@ -165,22 +148,19 @@ bool ath9k_get_channel_edges(struct ath_hw *ah,
 }
 
 u16 ath9k_hw_computetxtime(struct ath_hw *ah,
-			   const struct ath_rate_table *rates,
+			   u8 phy, int kbps,
 			   u32 frameLen, u16 rateix,
 			   bool shortPreamble)
 {
 	u32 bitsPerSymbol, numBits, numSymbols, phyTime, txTime;
-	u32 kbps;
-
-	kbps = rates->info[rateix].ratekbps;
 
 	if (kbps == 0)
 		return 0;
 
-	switch (rates->info[rateix].phy) {
+	switch (phy) {
 	case WLAN_RC_PHY_CCK:
 		phyTime = CCK_PREAMBLE_BITS + CCK_PLCP_BITS;
-		if (shortPreamble && rates->info[rateix].short_preamble)
+		if (shortPreamble)
 			phyTime >>= 1;
 		numBits = frameLen << 3;
 		txTime = CCK_SIFS_TIME + phyTime + ((numBits * 1000) / kbps);
@@ -210,15 +190,15 @@ u16 ath9k_hw_computetxtime(struct ath_hw *ah,
 		}
 		break;
 	default:
-		DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-			"Unknown phy %u (rate ix %u)\n",
-			rates->info[rateix].phy, rateix);
+		ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+			  "Unknown phy %u (rate ix %u)\n", phy, rateix);
 		txTime = 0;
 		break;
 	}
 
 	return txTime;
 }
+EXPORT_SYMBOL(ath9k_hw_computetxtime);
 
 void ath9k_hw_get_channel_centers(struct ath_hw *ah,
 				  struct ath9k_channel *chan,
@@ -245,10 +225,9 @@ void ath9k_hw_get_channel_centers(struct ath_hw *ah,
 
 	centers->ctl_center =
 		centers->synth_center - (extoff * HT40_CHANNEL_CENTER_SHIFT);
+	/* 25 MHz spacing is supported by hw but not on upper layers */
 	centers->ext_center =
-		centers->synth_center + (extoff *
-			 ((ah->extprotspacing == ATH9K_HT_EXTPROTSPACING_20) ?
-			  HT40_CHANNEL_CENTER_SHIFT : 15));
+		centers->synth_center + (extoff * HT40_CHANNEL_CENTER_SHIFT);
 }
 
 /******************/
@@ -317,6 +296,7 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah)
 
 static bool ath9k_hw_chip_test(struct ath_hw *ah)
 {
+	struct ath_common *common = ath9k_hw_common(ah);
 	u32 regAddr[2] = { AR_STA_ID0, AR_PHY_BASE + (8 << 2) };
 	u32 regHold[2];
 	u32 patternData[4] = { 0x55555555,
@@ -335,10 +315,11 @@ static bool ath9k_hw_chip_test(struct ath_hw *ah)
 			REG_WRITE(ah, addr, wrData);
 			rdData = REG_READ(ah, addr);
 			if (rdData != wrData) {
-				DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-					"address test failed "
-					"addr: 0x%08x - wr:0x%08x != rd:0x%08x\n",
-					addr, wrData, rdData);
+				ath_print(common, ATH_DBG_FATAL,
+					  "address test failed "
+					  "addr: 0x%08x - wr:0x%08x != "
+					  "rd:0x%08x\n",
+					  addr, wrData, rdData);
 				return false;
 			}
 		}
@@ -347,10 +328,11 @@ static bool ath9k_hw_chip_test(struct ath_hw *ah)
 			REG_WRITE(ah, addr, wrData);
 			rdData = REG_READ(ah, addr);
 			if (wrData != rdData) {
-				DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-					"address test failed "
-					"addr: 0x%08x - wr:0x%08x != rd:0x%08x\n",
-					addr, wrData, rdData);
+				ath_print(common, ATH_DBG_FATAL,
+					  "address test failed "
+					  "addr: 0x%08x - wr:0x%08x != "
+					  "rd:0x%08x\n",
+					  addr, wrData, rdData);
 				return false;
 			}
 		}
@@ -404,8 +386,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
 	ah->config.cck_trig_high = 200;
 	ah->config.cck_trig_low = 100;
 	ah->config.enable_ani = 1;
-	ah->config.diversity_control = ATH9K_ANT_VARIABLE;
-	ah->config.antenna_switch_swap = 0;
 
 	for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
 		ah->config.spurchans[i][0] = AR_NO_SPUR;
@@ -433,6 +413,7 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
 	if (num_possible_cpus() > 1)
 		ah->config.serialize_regmode = SER_REG_MODE_AUTO;
 }
+EXPORT_SYMBOL(ath9k_hw_init);
 
 static void ath9k_hw_init_defaults(struct ath_hw *ah)
 {
@@ -459,27 +440,9 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
 	ah->acktimeout = (u32) -1;
 	ah->ctstimeout = (u32) -1;
 	ah->globaltxtimeout = (u32) -1;
-
-	ah->gbeacon_rate = 0;
-
 	ah->power_mode = ATH9K_PM_UNDEFINED;
 }
 
-static int ath9k_hw_rfattach(struct ath_hw *ah)
-{
-	bool rfStatus = false;
-	int ecode = 0;
-
-	rfStatus = ath9k_hw_init_rf(ah, &ecode);
-	if (!rfStatus) {
-		DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-			"RF setup failed, status: %u\n", ecode);
-		return ecode;
-	}
-
-	return 0;
-}
-
 static int ath9k_hw_rf_claim(struct ath_hw *ah)
 {
 	u32 val;
@@ -497,9 +460,9 @@ static int ath9k_hw_rf_claim(struct ath_hw *ah)
 	case AR_RAD2122_SREV_MAJOR:
 		break;
 	default:
-		DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-			"Radio Chip Rev 0x%02X not supported\n",
-			val & AR_RADIO_SREV_MAJOR);
+		ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+			  "Radio Chip Rev 0x%02X not supported\n",
+			  val & AR_RADIO_SREV_MAJOR);
 		return -EOPNOTSUPP;
 	}
 
@@ -510,6 +473,7 @@ static int ath9k_hw_rf_claim(struct ath_hw *ah)
 
 static int ath9k_hw_init_macaddr(struct ath_hw *ah)
 {
+	struct ath_common *common = ath9k_hw_common(ah);
 	u32 sum;
 	int i;
 	u16 eeval;
@@ -518,8 +482,8 @@ static int ath9k_hw_init_macaddr(struct ath_hw *ah)
 	for (i = 0; i < 3; i++) {
 		eeval = ah->eep_ops->get_eeprom(ah, AR_EEPROM_MAC(i));
 		sum += eeval;
-		ah->macaddr[2 * i] = eeval >> 8;
-		ah->macaddr[2 * i + 1] = eeval & 0xff;
+		common->macaddr[2 * i] = eeval >> 8;
+		common->macaddr[2 * i + 1] = eeval & 0xff;
 	}
 	if (sum == 0 || sum == 0xffff * 3)
 		return -EADDRNOTAVAIL;
@@ -590,12 +554,20 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
 	if (ecode != 0)
 		return ecode;
 
-	DPRINTF(ah->ah_sc, ATH_DBG_CONFIG, "Eeprom VER: %d, REV: %d\n",
-		ah->eep_ops->get_eeprom_ver(ah), ah->eep_ops->get_eeprom_rev(ah));
-
-	ecode = ath9k_hw_rfattach(ah);
-	if (ecode != 0)
-		return ecode;
+	ath_print(ath9k_hw_common(ah), ATH_DBG_CONFIG,
+		  "Eeprom VER: %d, REV: %d\n",
+		  ah->eep_ops->get_eeprom_ver(ah),
+		  ah->eep_ops->get_eeprom_rev(ah));
+
+        if (!AR_SREV_9280_10_OR_LATER(ah)) {
+		ecode = ath9k_hw_rf_alloc_ext_banks(ah);
+		if (ecode) {
+			ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+				  "Failed allocating banks for "
+				  "external radio\n");
+			return ecode;
+		}
+	}
 
 	if (!AR_SREV_9100(ah)) {
 		ath9k_hw_ani_setup(ah);
@@ -617,6 +589,7 @@ static bool ath9k_hw_devid_supported(u16 devid)
 	case AR9285_DEVID_PCIE:
 	case AR5416_DEVID_AR9287_PCI:
 	case AR5416_DEVID_AR9287_PCIE:
+	case AR9271_USB:
 		return true;
 	default:
 		break;
@@ -634,9 +607,8 @@ static bool ath9k_hw_macversion_supported(u32 macversion)
 	case AR_SREV_VERSION_9280:
 	case AR_SREV_VERSION_9285:
 	case AR_SREV_VERSION_9287:
-		return true;
-	/* Not yet */
 	case AR_SREV_VERSION_9271:
+		return true;
 	default:
 		break;
 	}
@@ -670,10 +642,13 @@ static void ath9k_hw_init_cal_settings(struct ath_hw *ah)
 static void ath9k_hw_init_mode_regs(struct ath_hw *ah)
 {
 	if (AR_SREV_9271(ah)) {
-		INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271_1_0,
-			       ARRAY_SIZE(ar9271Modes_9271_1_0), 6);
-		INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271_1_0,
-			       ARRAY_SIZE(ar9271Common_9271_1_0), 2);
+		INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271,
+			       ARRAY_SIZE(ar9271Modes_9271), 6);
+		INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271,
+			       ARRAY_SIZE(ar9271Common_9271), 2);
+		INIT_INI_ARRAY(&ah->iniModes_9271_1_0_only,
+			       ar9271Modes_9271_1_0_only,
+			       ARRAY_SIZE(ar9271Modes_9271_1_0_only), 6);
 		return;
 	}
 
@@ -905,21 +880,27 @@ static void ath9k_hw_init_11a_eeprom_fix(struct ath_hw *ah)
 
 int ath9k_hw_init(struct ath_hw *ah)
 {
+	struct ath_common *common = ath9k_hw_common(ah);
 	int r = 0;
 
-	if (!ath9k_hw_devid_supported(ah->hw_version.devid))
+	if (!ath9k_hw_devid_supported(ah->hw_version.devid)) {
+		ath_print(common, ATH_DBG_FATAL,
+			  "Unsupported device ID: 0x%0x\n",
+			  ah->hw_version.devid);
 		return -EOPNOTSUPP;
+	}
 
 	ath9k_hw_init_defaults(ah);
 	ath9k_hw_init_config(ah);
 
 	if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
-		DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Couldn't reset chip\n");
+		ath_print(common, ATH_DBG_FATAL,
+			  "Couldn't reset chip\n");
 		return -EIO;
 	}
 
 	if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) {
-		DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Couldn't wakeup chip\n");
+		ath_print(common, ATH_DBG_FATAL, "Couldn't wakeup chip\n");
 		return -EIO;
 	}
 
@@ -934,14 +915,19 @@ int ath9k_hw_init(struct ath_hw *ah)
 		}
 	}
 
-	DPRINTF(ah->ah_sc, ATH_DBG_RESET, "serialize_regmode is %d\n",
+	ath_print(common, ATH_DBG_RESET, "serialize_regmode is %d\n",
 		ah->config.serialize_regmode);
 
+	if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
+		ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD >> 1;
+	else
+		ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD;
+
 	if (!ath9k_hw_macversion_supported(ah->hw_version.macVersion)) {
-		DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-			"Mac Chip Rev 0x%02x.%x is not supported by "
-			"this driver\n", ah->hw_version.macVersion,
-			ah->hw_version.macRev);
+		ath_print(common, ATH_DBG_FATAL,
+			  "Mac Chip Rev 0x%02x.%x is not supported by "
+			  "this driver\n", ah->hw_version.macVersion,
+			  ah->hw_version.macRev);
 		return -EOPNOTSUPP;
 	}
 
@@ -959,8 +945,14 @@ int ath9k_hw_init(struct ath_hw *ah)
 	ath9k_hw_init_cal_settings(ah);
 
 	ah->ani_function = ATH9K_ANI_ALL;
-	if (AR_SREV_9280_10_OR_LATER(ah))
+	if (AR_SREV_9280_10_OR_LATER(ah)) {
 		ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
+		ah->ath9k_hw_rf_set_freq = &ath9k_hw_ar9280_set_channel;
+		ah->ath9k_hw_spur_mitigate_freq = &ath9k_hw_9280_spur_mitigate;
+	} else {
+		ah->ath9k_hw_rf_set_freq = &ath9k_hw_set_channel;
+		ah->ath9k_hw_spur_mitigate_freq = &ath9k_hw_spur_mitigate;
+	}
 
 	ath9k_hw_init_mode_regs(ah);
 
@@ -969,18 +961,31 @@ int ath9k_hw_init(struct ath_hw *ah)
 	else
 		ath9k_hw_disablepcie(ah);
 
+	/* Support for Japan ch.14 (2484) spread */
+	if (AR_SREV_9287_11_OR_LATER(ah)) {
+		INIT_INI_ARRAY(&ah->iniCckfirNormal,
+		       ar9287Common_normal_cck_fir_coeff_92871_1,
+		       ARRAY_SIZE(ar9287Common_normal_cck_fir_coeff_92871_1), 2);
+		INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+		       ar9287Common_japan_2484_cck_fir_coeff_92871_1,
+		       ARRAY_SIZE(ar9287Common_japan_2484_cck_fir_coeff_92871_1), 2);
+	}
+
 	r = ath9k_hw_post_init(ah);
 	if (r)
 		return r;
 
 	ath9k_hw_init_mode_gain_regs(ah);
-	ath9k_hw_fill_cap_info(ah);
+	r = ath9k_hw_fill_cap_info(ah);
+	if (r)
+		return r;
+
 	ath9k_hw_init_11a_eeprom_fix(ah);
 
 	r = ath9k_hw_init_macaddr(ah);
 	if (r) {
-		DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-			"Failed to initialize MAC address\n");
+		ath_print(common, ATH_DBG_FATAL,
+			  "Failed to initialize MAC address\n");
 		return r;
 	}
 
@@ -991,6 +996,8 @@ int ath9k_hw_init(struct ath_hw *ah)
 
 	ath9k_init_nfcal_hist_buffer(ah);
 
+	common->state = ATH_HW_INITIALIZED;
+
 	return 0;
 }
 
@@ -1027,6 +1034,22 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
 	REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF);
 }
 
+static void ath9k_hw_change_target_baud(struct ath_hw *ah, u32 freq, u32 baud)
+{
+	u32 lcr;
+	u32 baud_divider = freq * 1000 * 1000 / 16 / baud;
+
+	lcr = REG_READ(ah , 0x5100c);
+	lcr |= 0x80;
+
+	REG_WRITE(ah, 0x5100c, lcr);
+	REG_WRITE(ah, 0x51004, (baud_divider >> 8));
+	REG_WRITE(ah, 0x51000, (baud_divider & 0xff));
+
+	lcr &= ~0x80;
+	REG_WRITE(ah, 0x5100c, lcr);
+}
+
 static void ath9k_hw_init_pll(struct ath_hw *ah,
 			      struct ath9k_channel *chan)
 {
@@ -1090,6 +1113,26 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
 	}
 	REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
 
+	/* Switch the core clock for ar9271 to 117Mhz */
+	if (AR_SREV_9271(ah)) {
+		if ((pll == 0x142c) || (pll == 0x2850) ) {
+			udelay(500);
+			/* set CLKOBS to output AHB clock */
+			REG_WRITE(ah, 0x7020, 0xe);
+			/*
+			 * 0x304: 117Mhz, ahb_ratio: 1x1
+			 * 0x306: 40Mhz, ahb_ratio: 1x1
+			 */
+			REG_WRITE(ah, 0x50040, 0x304);
+			/*
+			 * makes adjustments for the baud dividor to keep the
+			 * targetted baud rate based on the used core clock.
+			 */
+			ath9k_hw_change_target_baud(ah, AR9271_CORE_CLOCK,
+						    AR9271_TARGET_BAUD_RATE);
+		}
+	}
+
 	udelay(RTC_PLL_SETTLE_DELAY);
 
 	REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
@@ -1107,7 +1150,7 @@ static void ath9k_hw_init_chain_masks(struct ath_hw *ah)
 		REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
 			    AR_PHY_SWAP_ALT_CHAIN);
 	case 0x3:
-		if (((ah)->hw_version.macVersion <= AR_SREV_VERSION_9160)) {
+		if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) {
 			REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
 			REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
 			break;
@@ -1164,7 +1207,8 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
 static bool ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us)
 {
 	if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_ACK))) {
-		DPRINTF(ah->ah_sc, ATH_DBG_RESET, "bad ack timeout %u\n", us);
+		ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
+			  "bad ack timeout %u\n", us);
 		ah->acktimeout = (u32) -1;
 		return false;
 	} else {
@@ -1178,7 +1222,8 @@ static bool ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us)
 static bool ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us)
 {
 	if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_CTS))) {
-		DPRINTF(ah->ah_sc, ATH_DBG_RESET, "bad cts timeout %u\n", us);
+		ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
+			  "bad cts timeout %u\n", us);
 		ah->ctstimeout = (u32) -1;
 		return false;
 	} else {
@@ -1192,8 +1237,8 @@ static bool ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us)
 static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
 {
 	if (tu > 0xFFFF) {
-		DPRINTF(ah->ah_sc, ATH_DBG_XMIT,
-			"bad global tx timeout %u\n", tu);
+		ath_print(ath9k_hw_common(ah), ATH_DBG_XMIT,
+			  "bad global tx timeout %u\n", tu);
 		ah->globaltxtimeout = (u32) -1;
 		return false;
 	} else {
@@ -1205,8 +1250,8 @@ static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
 
 static void ath9k_hw_init_user_settings(struct ath_hw *ah)
 {
-	DPRINTF(ah->ah_sc, ATH_DBG_RESET, "ah->misc_mode 0x%x\n",
-		ah->misc_mode);
+	ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n",
+		  ah->misc_mode);
 
 	if (ah->misc_mode != 0)
 		REG_WRITE(ah, AR_PCU_MISC,
@@ -1229,14 +1274,23 @@ const char *ath9k_hw_probe(u16 vendorid, u16 devid)
 
 void ath9k_hw_detach(struct ath_hw *ah)
 {
+	struct ath_common *common = ath9k_hw_common(ah);
+
+	if (common->state <= ATH_HW_INITIALIZED)
+		goto free_hw;
+
 	if (!AR_SREV_9100(ah))
 		ath9k_hw_ani_disable(ah);
 
-	ath9k_hw_rf_free(ah);
 	ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
+
+free_hw:
+	if (!AR_SREV_9280_10_OR_LATER(ah))
+		ath9k_hw_rf_free_ext_banks(ah);
 	kfree(ah);
 	ah = NULL;
 }
+EXPORT_SYMBOL(ath9k_hw_detach);
 
 /*******/
 /* INI */
@@ -1254,7 +1308,8 @@ static void ath9k_hw_override_ini(struct ath_hw *ah,
 		 * AR9271 1.1
 		 */
 		if (AR_SREV_9271_10(ah)) {
-			val = REG_READ(ah, AR_PHY_SPECTRAL_SCAN) | AR_PHY_SPECTRAL_SCAN_ENABLE;
+			val = REG_READ(ah, AR_PHY_SPECTRAL_SCAN) |
+			      AR_PHY_SPECTRAL_SCAN_ENABLE;
 			REG_WRITE(ah, AR_PHY_SPECTRAL_SCAN, val);
 		}
 		else if (AR_SREV_9271_11(ah))
@@ -1298,28 +1353,29 @@ static u32 ath9k_hw_def_ini_fixup(struct ath_hw *ah,
 			      u32 reg, u32 value)
 {
 	struct base_eep_header *pBase = &(pEepData->baseEepHeader);
+	struct ath_common *common = ath9k_hw_common(ah);
 
 	switch (ah->hw_version.devid) {
 	case AR9280_DEVID_PCI:
 		if (reg == 0x7894) {
-			DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
+			ath_print(common, ATH_DBG_EEPROM,
 				"ini VAL: %x  EEPROM: %x\n", value,
 				(pBase->version & 0xff));
 
 			if ((pBase->version & 0xff) > 0x0a) {
-				DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
-					"PWDCLKIND: %d\n",
-					pBase->pwdclkind);
+				ath_print(common, ATH_DBG_EEPROM,
+					  "PWDCLKIND: %d\n",
+					  pBase->pwdclkind);
 				value &= ~AR_AN_TOP2_PWDCLKIND;
 				value |= AR_AN_TOP2_PWDCLKIND &
 					(pBase->pwdclkind << AR_AN_TOP2_PWDCLKIND_S);
 			} else {
-				DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
-					"PWDCLKIND Earlier Rev\n");
+				ath_print(common, ATH_DBG_EEPROM,
+					  "PWDCLKIND Earlier Rev\n");
 			}
 
-			DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
-				"final ini VAL: %x\n", value);
+			ath_print(common, ATH_DBG_EEPROM,
+				  "final ini VAL: %x\n", value);
 		}
 		break;
 	}
@@ -1374,8 +1430,7 @@ static u32 ath9k_regd_get_ctl(struct ath_regulatory *reg,
 }
 
 static int ath9k_hw_process_ini(struct ath_hw *ah,
-				struct ath9k_channel *chan,
-				enum ath9k_ht_macmode macmode)
+				struct ath9k_channel *chan)
 {
 	struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
 	int i, regWrites = 0;
@@ -1469,7 +1524,11 @@ static int ath9k_hw_process_ini(struct ath_hw *ah,
 		DO_DELAY(regWrites);
 	}
 
-	ath9k_hw_write_regs(ah, modesIndex, freqIndex, regWrites);
+	ath9k_hw_write_regs(ah, freqIndex, regWrites);
+
+	if (AR_SREV_9271_10(ah))
+		REG_WRITE_ARRAY(&ah->iniModes_9271_1_0_only,
+				modesIndex, regWrites);
 
 	if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan)) {
 		REG_WRITE_ARRAY(&ah->iniModesAdditional, modesIndex,
@@ -1477,7 +1536,7 @@ static int ath9k_hw_process_ini(struct ath_hw *ah,
 	}
 
 	ath9k_hw_override_ini(ah, chan);
-	ath9k_hw_set_regs(ah, chan, macmode);
+	ath9k_hw_set_regs(ah, chan);
 	ath9k_hw_init_chain_masks(ah);
 
 	if (OLC_FOR_AR9280_20_LATER)
@@ -1491,8 +1550,8 @@ static int ath9k_hw_process_ini(struct ath_hw *ah,
 				 (u32) regulatory->power_limit));
 
 	if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
-		DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-			"ar5416SetRfRegs failed\n");
+		ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+			  "ar5416SetRfRegs failed\n");
 		return -EIO;
 	}
 
@@ -1697,16 +1756,14 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
 
 	REG_WRITE(ah, AR_RTC_RC, 0);
 	if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) {
-		DPRINTF(ah->ah_sc, ATH_DBG_RESET,
-			"RTC stuck in MAC reset\n");
+		ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
+			  "RTC stuck in MAC reset\n");
 		return false;
 	}
 
 	if (!AR_SREV_9100(ah))
 		REG_WRITE(ah, AR_RC, 0);
 
-	ath9k_hw_init_pll(ah, NULL);
-
 	if (AR_SREV_9100(ah))
 		udelay(50);
 
@@ -1734,7 +1791,8 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
 			   AR_RTC_STATUS_M,
 			   AR_RTC_STATUS_ON,
 			   AH_WAIT_TIMEOUT)) {
-		DPRINTF(ah->ah_sc, ATH_DBG_RESET, "RTC not waking up\n");
+		ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
+			  "RTC not waking up\n");
 		return false;
 	}
 
@@ -1759,8 +1817,7 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
 	}
 }
 
-static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan,
-			      enum ath9k_ht_macmode macmode)
+static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan)
 {
 	u32 phymode;
 	u32 enableDacFifo = 0;
@@ -1779,12 +1836,10 @@ static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan,
 		    (chan->chanmode == CHANNEL_G_HT40PLUS))
 			phymode |= AR_PHY_FC_DYN2040_PRI_CH;
 
-		if (ah->extprotspacing == ATH9K_HT_EXTPROTSPACING_25)
-			phymode |= AR_PHY_FC_DYN2040_EXT_CH;
 	}
 	REG_WRITE(ah, AR_PHY_TURBO, phymode);
 
-	ath9k_hw_set11nmac2040(ah, macmode);
+	ath9k_hw_set11nmac2040(ah);
 
 	REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
 	REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
@@ -1810,17 +1865,19 @@ static bool ath9k_hw_chip_reset(struct ath_hw *ah,
 }
 
 static bool ath9k_hw_channel_change(struct ath_hw *ah,
-				    struct ath9k_channel *chan,
-				    enum ath9k_ht_macmode macmode)
+				    struct ath9k_channel *chan)
 {
 	struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
+	struct ath_common *common = ath9k_hw_common(ah);
 	struct ieee80211_channel *channel = chan->chan;
 	u32 synthDelay, qnum;
+	int r;
 
 	for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
 		if (ath9k_hw_numtxpending(ah, qnum)) {
-			DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
-				"Transmit frames pending on queue %d\n", qnum);
+			ath_print(common, ATH_DBG_QUEUE,
+				  "Transmit frames pending on "
+				  "queue %d\n", qnum);
 			return false;
 		}
 	}
@@ -1828,21 +1885,18 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
 	REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
 	if (!ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
 			   AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT)) {
-		DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-			"Could not kill baseband RX\n");
+		ath_print(common, ATH_DBG_FATAL,
+			  "Could not kill baseband RX\n");
 		return false;
 	}
 
-	ath9k_hw_set_regs(ah, chan, macmode);
+	ath9k_hw_set_regs(ah, chan);
 
-	if (AR_SREV_9280_10_OR_LATER(ah)) {
-		ath9k_hw_ar9280_set_channel(ah, chan);
-	} else {
-		if (!(ath9k_hw_set_channel(ah, chan))) {
-			DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-				"Failed to set channel\n");
-			return false;
-		}
+	r = ah->ath9k_hw_rf_set_freq(ah, chan);
+	if (r) {
+		ath_print(common, ATH_DBG_FATAL,
+			  "Failed to set channel\n");
+		return false;
 	}
 
 	ah->eep_ops->set_txpower(ah, chan,
@@ -1865,10 +1919,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
 	if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
 		ath9k_hw_set_delta_slope(ah, chan);
 
-	if (AR_SREV_9280_10_OR_LATER(ah))
-		ath9k_hw_9280_spur_mitigate(ah, chan);
-	else
-		ath9k_hw_spur_mitigate(ah, chan);
+	ah->ath9k_hw_spur_mitigate_freq(ah, chan);
 
 	if (!chan->oneTimeCalsDone)
 		chan->oneTimeCalsDone = true;
@@ -1876,457 +1927,6 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
 	return true;
 }
 
-static void ath9k_hw_9280_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan)
-{
-	int bb_spur = AR_NO_SPUR;
-	int freq;
-	int bin, cur_bin;
-	int bb_spur_off, spur_subchannel_sd;
-	int spur_freq_sd;
-	int spur_delta_phase;
-	int denominator;
-	int upper, lower, cur_vit_mask;
-	int tmp, newVal;
-	int i;
-	int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
-			  AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
-	};
-	int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
-			 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
-	};
-	int inc[4] = { 0, 100, 0, 0 };
-	struct chan_centers centers;
-
-	int8_t mask_m[123];
-	int8_t mask_p[123];
-	int8_t mask_amt;
-	int tmp_mask;
-	int cur_bb_spur;
-	bool is2GHz = IS_CHAN_2GHZ(chan);
-
-	memset(&mask_m, 0, sizeof(int8_t) * 123);
-	memset(&mask_p, 0, sizeof(int8_t) * 123);
-
-	ath9k_hw_get_channel_centers(ah, chan, &centers);
-	freq = centers.synth_center;
-
-	ah->config.spurmode = SPUR_ENABLE_EEPROM;
-	for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
-		cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
-
-		if (is2GHz)
-			cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ;
-		else
-			cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ;
-
-		if (AR_NO_SPUR == cur_bb_spur)
-			break;
-		cur_bb_spur = cur_bb_spur - freq;
-
-		if (IS_CHAN_HT40(chan)) {
-			if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) &&
-			    (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) {
-				bb_spur = cur_bb_spur;
-				break;
-			}
-		} else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) &&
-			   (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) {
-			bb_spur = cur_bb_spur;
-			break;
-		}
-	}
-
-	if (AR_NO_SPUR == bb_spur) {
-		REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
-			    AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
-		return;
-	} else {
-		REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
-			    AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
-	}
-
-	bin = bb_spur * 320;
-
-	tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
-
-	newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
-			AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
-			AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
-			AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
-	REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), newVal);
-
-	newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
-		  AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
-		  AR_PHY_SPUR_REG_MASK_RATE_SELECT |
-		  AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
-		  SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
-	REG_WRITE(ah, AR_PHY_SPUR_REG, newVal);
-
-	if (IS_CHAN_HT40(chan)) {
-		if (bb_spur < 0) {
-			spur_subchannel_sd = 1;
-			bb_spur_off = bb_spur + 10;
-		} else {
-			spur_subchannel_sd = 0;
-			bb_spur_off = bb_spur - 10;
-		}
-	} else {
-		spur_subchannel_sd = 0;
-		bb_spur_off = bb_spur;
-	}
-
-	if (IS_CHAN_HT40(chan))
-		spur_delta_phase =
-			((bb_spur * 262144) /
-			 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
-	else
-		spur_delta_phase =
-			((bb_spur * 524288) /
-			 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
-
-	denominator = IS_CHAN_2GHZ(chan) ? 44 : 40;
-	spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff;
-
-	newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
-		  SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
-		  SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
-	REG_WRITE(ah, AR_PHY_TIMING11, newVal);
-
-	newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
-	REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
-
-	cur_bin = -6000;
-	upper = bin + 100;
-	lower = bin - 100;
-
-	for (i = 0; i < 4; i++) {
-		int pilot_mask = 0;
-		int chan_mask = 0;
-		int bp = 0;
-		for (bp = 0; bp < 30; bp++) {
-			if ((cur_bin > lower) && (cur_bin < upper)) {
-				pilot_mask = pilot_mask | 0x1 << bp;
-				chan_mask = chan_mask | 0x1 << bp;
-			}
-			cur_bin += 100;
-		}
-		cur_bin += inc[i];
-		REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
-		REG_WRITE(ah, chan_mask_reg[i], chan_mask);
-	}
-
-	cur_vit_mask = 6100;
-	upper = bin + 120;
-	lower = bin - 120;
-
-	for (i = 0; i < 123; i++) {
-		if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
-
-			/* workaround for gcc bug #37014 */
-			volatile int tmp_v = abs(cur_vit_mask - bin);
-
-			if (tmp_v < 75)
-				mask_amt = 1;
-			else
-				mask_amt = 0;
-			if (cur_vit_mask < 0)
-				mask_m[abs(cur_vit_mask / 100)] = mask_amt;
-			else
-				mask_p[cur_vit_mask / 100] = mask_amt;
-		}
-		cur_vit_mask -= 100;
-	}
-
-	tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
-		| (mask_m[48] << 26) | (mask_m[49] << 24)
-		| (mask_m[50] << 22) | (mask_m[51] << 20)
-		| (mask_m[52] << 18) | (mask_m[53] << 16)
-		| (mask_m[54] << 14) | (mask_m[55] << 12)
-		| (mask_m[56] << 10) | (mask_m[57] << 8)
-		| (mask_m[58] << 6) | (mask_m[59] << 4)
-		| (mask_m[60] << 2) | (mask_m[61] << 0);
-	REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
-	REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
-
-	tmp_mask = (mask_m[31] << 28)
-		| (mask_m[32] << 26) | (mask_m[33] << 24)
-		| (mask_m[34] << 22) | (mask_m[35] << 20)
-		| (mask_m[36] << 18) | (mask_m[37] << 16)
-		| (mask_m[48] << 14) | (mask_m[39] << 12)
-		| (mask_m[40] << 10) | (mask_m[41] << 8)
-		| (mask_m[42] << 6) | (mask_m[43] << 4)
-		| (mask_m[44] << 2) | (mask_m[45] << 0);
-	REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
-	REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
-
-	tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
-		| (mask_m[18] << 26) | (mask_m[18] << 24)
-		| (mask_m[20] << 22) | (mask_m[20] << 20)
-		| (mask_m[22] << 18) | (mask_m[22] << 16)
-		| (mask_m[24] << 14) | (mask_m[24] << 12)
-		| (mask_m[25] << 10) | (mask_m[26] << 8)
-		| (mask_m[27] << 6) | (mask_m[28] << 4)
-		| (mask_m[29] << 2) | (mask_m[30] << 0);
-	REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
-	REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
-
-	tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
-		| (mask_m[2] << 26) | (mask_m[3] << 24)
-		| (mask_m[4] << 22) | (mask_m[5] << 20)
-		| (mask_m[6] << 18) | (mask_m[7] << 16)
-		| (mask_m[8] << 14) | (mask_m[9] << 12)
-		| (mask_m[10] << 10) | (mask_m[11] << 8)
-		| (mask_m[12] << 6) | (mask_m[13] << 4)
-		| (mask_m[14] << 2) | (mask_m[15] << 0);
-	REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
-	REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
-
-	tmp_mask = (mask_p[15] << 28)
-		| (mask_p[14] << 26) | (mask_p[13] << 24)
-		| (mask_p[12] << 22) | (mask_p[11] << 20)
-		| (mask_p[10] << 18) | (mask_p[9] << 16)
-		| (mask_p[8] << 14) | (mask_p[7] << 12)
-		| (mask_p[6] << 10) | (mask_p[5] << 8)
-		| (mask_p[4] << 6) | (mask_p[3] << 4)
-		| (mask_p[2] << 2) | (mask_p[1] << 0);
-	REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
-	REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
-
-	tmp_mask = (mask_p[30] << 28)
-		| (mask_p[29] << 26) | (mask_p[28] << 24)
-		| (mask_p[27] << 22) | (mask_p[26] << 20)
-		| (mask_p[25] << 18) | (mask_p[24] << 16)
-		| (mask_p[23] << 14) | (mask_p[22] << 12)
-		| (mask_p[21] << 10) | (mask_p[20] << 8)
-		| (mask_p[19] << 6) | (mask_p[18] << 4)
-		| (mask_p[17] << 2) | (mask_p[16] << 0);
-	REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
-	REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
-
-	tmp_mask = (mask_p[45] << 28)
-		| (mask_p[44] << 26) | (mask_p[43] << 24)
-		| (mask_p[42] << 22) | (mask_p[41] << 20)
-		| (mask_p[40] << 18) | (mask_p[39] << 16)
-		| (mask_p[38] << 14) | (mask_p[37] << 12)
-		| (mask_p[36] << 10) | (mask_p[35] << 8)
-		| (mask_p[34] << 6) | (mask_p[33] << 4)
-		| (mask_p[32] << 2) | (mask_p[31] << 0);
-	REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
-	REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
-
-	tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
-		| (mask_p[59] << 26) | (mask_p[58] << 24)
-		| (mask_p[57] << 22) | (mask_p[56] << 20)
-		| (mask_p[55] << 18) | (mask_p[54] << 16)
-		| (mask_p[53] << 14) | (mask_p[52] << 12)
-		| (mask_p[51] << 10) | (mask_p[50] << 8)
-		| (mask_p[49] << 6) | (mask_p[48] << 4)
-		| (mask_p[47] << 2) | (mask_p[46] << 0);
-	REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
-	REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
-}
-
-static void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan)
-{
-	int bb_spur = AR_NO_SPUR;
-	int bin, cur_bin;
-	int spur_freq_sd;
-	int spur_delta_phase;
-	int denominator;
-	int upper, lower, cur_vit_mask;
-	int tmp, new;
-	int i;
-	int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
-			  AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
-	};
-	int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
-			 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
-	};
-	int inc[4] = { 0, 100, 0, 0 };
-
-	int8_t mask_m[123];
-	int8_t mask_p[123];
-	int8_t mask_amt;
-	int tmp_mask;
-	int cur_bb_spur;
-	bool is2GHz = IS_CHAN_2GHZ(chan);
-
-	memset(&mask_m, 0, sizeof(int8_t) * 123);
-	memset(&mask_p, 0, sizeof(int8_t) * 123);
-
-	for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
-		cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
-		if (AR_NO_SPUR == cur_bb_spur)
-			break;
-		cur_bb_spur = cur_bb_spur - (chan->channel * 10);
-		if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
-			bb_spur = cur_bb_spur;
-			break;
-		}
-	}
-
-	if (AR_NO_SPUR == bb_spur)
-		return;
-
-	bin = bb_spur * 32;
-
-	tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
-	new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
-		     AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
-		     AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
-		     AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
-
-	REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
-
-	new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
-	       AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
-	       AR_PHY_SPUR_REG_MASK_RATE_SELECT |
-	       AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
-	       SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
-	REG_WRITE(ah, AR_PHY_SPUR_REG, new);
-
-	spur_delta_phase = ((bb_spur * 524288) / 100) &
-		AR_PHY_TIMING11_SPUR_DELTA_PHASE;
-
-	denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
-	spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
-
-	new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
-	       SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
-	       SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
-	REG_WRITE(ah, AR_PHY_TIMING11, new);
-
-	cur_bin = -6000;
-	upper = bin + 100;
-	lower = bin - 100;
-
-	for (i = 0; i < 4; i++) {
-		int pilot_mask = 0;
-		int chan_mask = 0;
-		int bp = 0;
-		for (bp = 0; bp < 30; bp++) {
-			if ((cur_bin > lower) && (cur_bin < upper)) {
-				pilot_mask = pilot_mask | 0x1 << bp;
-				chan_mask = chan_mask | 0x1 << bp;
-			}
-			cur_bin += 100;
-		}
-		cur_bin += inc[i];
-		REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
-		REG_WRITE(ah, chan_mask_reg[i], chan_mask);
-	}
-
-	cur_vit_mask = 6100;
-	upper = bin + 120;
-	lower = bin - 120;
-
-	for (i = 0; i < 123; i++) {
-		if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
-
-			/* workaround for gcc bug #37014 */
-			volatile int tmp_v = abs(cur_vit_mask - bin);
-
-			if (tmp_v < 75)
-				mask_amt = 1;
-			else
-				mask_amt = 0;
-			if (cur_vit_mask < 0)
-				mask_m[abs(cur_vit_mask / 100)] = mask_amt;
-			else
-				mask_p[cur_vit_mask / 100] = mask_amt;
-		}
-		cur_vit_mask -= 100;
-	}
-
-	tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
-		| (mask_m[48] << 26) | (mask_m[49] << 24)
-		| (mask_m[50] << 22) | (mask_m[51] << 20)
-		| (mask_m[52] << 18) | (mask_m[53] << 16)
-		| (mask_m[54] << 14) | (mask_m[55] << 12)
-		| (mask_m[56] << 10) | (mask_m[57] << 8)
-		| (mask_m[58] << 6) | (mask_m[59] << 4)
-		| (mask_m[60] << 2) | (mask_m[61] << 0);
-	REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
-	REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
-
-	tmp_mask = (mask_m[31] << 28)
-		| (mask_m[32] << 26) | (mask_m[33] << 24)
-		| (mask_m[34] << 22) | (mask_m[35] << 20)
-		| (mask_m[36] << 18) | (mask_m[37] << 16)
-		| (mask_m[48] << 14) | (mask_m[39] << 12)
-		| (mask_m[40] << 10) | (mask_m[41] << 8)
-		| (mask_m[42] << 6) | (mask_m[43] << 4)
-		| (mask_m[44] << 2) | (mask_m[45] << 0);
-	REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
-	REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
-
-	tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
-		| (mask_m[18] << 26) | (mask_m[18] << 24)
-		| (mask_m[20] << 22) | (mask_m[20] << 20)
-		| (mask_m[22] << 18) | (mask_m[22] << 16)
-		| (mask_m[24] << 14) | (mask_m[24] << 12)
-		| (mask_m[25] << 10) | (mask_m[26] << 8)
-		| (mask_m[27] << 6) | (mask_m[28] << 4)
-		| (mask_m[29] << 2) | (mask_m[30] << 0);
-	REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
-	REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
-
-	tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
-		| (mask_m[2] << 26) | (mask_m[3] << 24)
-		| (mask_m[4] << 22) | (mask_m[5] << 20)
-		| (mask_m[6] << 18) | (mask_m[7] << 16)
-		| (mask_m[8] << 14) | (mask_m[9] << 12)
-		| (mask_m[10] << 10) | (mask_m[11] << 8)
-		| (mask_m[12] << 6) | (mask_m[13] << 4)
-		| (mask_m[14] << 2) | (mask_m[15] << 0);
-	REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
-	REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
-
-	tmp_mask = (mask_p[15] << 28)
-		| (mask_p[14] << 26) | (mask_p[13] << 24)
-		| (mask_p[12] << 22) | (mask_p[11] << 20)
-		| (mask_p[10] << 18) | (mask_p[9] << 16)
-		| (mask_p[8] << 14) | (mask_p[7] << 12)
-		| (mask_p[6] << 10) | (mask_p[5] << 8)
-		| (mask_p[4] << 6) | (mask_p[3] << 4)
-		| (mask_p[2] << 2) | (mask_p[1] << 0);
-	REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
-	REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
-
-	tmp_mask = (mask_p[30] << 28)
-		| (mask_p[29] << 26) | (mask_p[28] << 24)
-		| (mask_p[27] << 22) | (mask_p[26] << 20)
-		| (mask_p[25] << 18) | (mask_p[24] << 16)
-		| (mask_p[23] << 14) | (mask_p[22] << 12)
-		| (mask_p[21] << 10) | (mask_p[20] << 8)
-		| (mask_p[19] << 6) | (mask_p[18] << 4)
-		| (mask_p[17] << 2) | (mask_p[16] << 0);
-	REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
-	REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
-
-	tmp_mask = (mask_p[45] << 28)
-		| (mask_p[44] << 26) | (mask_p[43] << 24)
-		| (mask_p[42] << 22) | (mask_p[41] << 20)
-		| (mask_p[40] << 18) | (mask_p[39] << 16)
-		| (mask_p[38] << 14) | (mask_p[37] << 12)
-		| (mask_p[36] << 10) | (mask_p[35] << 8)
-		| (mask_p[34] << 6) | (mask_p[33] << 4)
-		| (mask_p[32] << 2) | (mask_p[31] << 0);
-	REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
-	REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
-
-	tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
-		| (mask_p[59] << 26) | (mask_p[58] << 24)
-		| (mask_p[57] << 22) | (mask_p[56] << 20)
-		| (mask_p[55] << 18) | (mask_p[54] << 16)
-		| (mask_p[53] << 14) | (mask_p[52] << 12)
-		| (mask_p[51] << 10) | (mask_p[50] << 8)
-		| (mask_p[49] << 6) | (mask_p[48] << 4)
-		| (mask_p[47] << 2) | (mask_p[46] << 0);
-	REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
-	REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
-}
-
 static void ath9k_enable_rfkill(struct ath_hw *ah)
 {
 	REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
@@ -2342,17 +1942,16 @@ static void ath9k_enable_rfkill(struct ath_hw *ah)
 int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 		    bool bChannelChange)
 {
+	struct ath_common *common = ath9k_hw_common(ah);
 	u32 saveLedState;
-	struct ath_softc *sc = ah->ah_sc;
 	struct ath9k_channel *curchan = ah->curchan;
 	u32 saveDefAntenna;
 	u32 macStaId1;
 	u64 tsf = 0;
 	int i, rx_chainmask, r;
 
-	ah->extprotspacing = sc->ht_extprotspacing;
-	ah->txchainmask = sc->tx_chainmask;
-	ah->rxchainmask = sc->rx_chainmask;
+	ah->txchainmask = common->tx_chainmask;
+	ah->rxchainmask = common->rx_chainmask;
 
 	if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
 		return -EIO;
@@ -2369,7 +1968,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 	     !(AR_SREV_9280(ah) || IS_CHAN_A_5MHZ_SPACED(chan) ||
 	     IS_CHAN_A_5MHZ_SPACED(ah->curchan))) {
 
-		if (ath9k_hw_channel_change(ah, chan, sc->tx_chan_width)) {
+		if (ath9k_hw_channel_change(ah, chan)) {
 			ath9k_hw_loadnf(ah, ah->curchan);
 			ath9k_hw_start_nfcal(ah);
 			return 0;
@@ -2400,7 +1999,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 	}
 
 	if (!ath9k_hw_chip_reset(ah, chan)) {
-		DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Chip reset failed\n");
+		ath_print(common, ATH_DBG_FATAL, "Chip reset failed\n");
 		return -EINVAL;
 	}
 
@@ -2429,7 +2028,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 		REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
 				AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
 	}
-	r = ath9k_hw_process_ini(ah, chan, sc->tx_chan_width);
+	r = ath9k_hw_process_ini(ah, chan);
 	if (r)
 		return r;
 
@@ -2453,17 +2052,11 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 	if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
 		ath9k_hw_set_delta_slope(ah, chan);
 
-	if (AR_SREV_9280_10_OR_LATER(ah))
-		ath9k_hw_9280_spur_mitigate(ah, chan);
-	else
-		ath9k_hw_spur_mitigate(ah, chan);
-
+	ah->ath9k_hw_spur_mitigate_freq(ah, chan);
 	ah->eep_ops->set_board_values(ah, chan);
 
-	ath9k_hw_decrease_chain_power(ah, chan);
-
-	REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(ah->macaddr));
-	REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(ah->macaddr + 4)
+	REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
+	REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(common->macaddr + 4)
 		  | macStaId1
 		  | AR_STA_ID1_RTS_USE_DEF
 		  | (ah->config.
@@ -2471,24 +2064,19 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 		  | ah->sta_id1_defaults);
 	ath9k_hw_set_operating_mode(ah, ah->opmode);
 
-	REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(sc->bssidmask));
-	REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(sc->bssidmask + 4));
+	ath_hw_setbssidmask(common);
 
 	REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna);
 
-	REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(sc->curbssid));
-	REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(sc->curbssid + 4) |
-		  ((sc->curaid & 0x3fff) << AR_BSS_ID1_AID_S));
+	ath9k_hw_write_associd(ah);
 
 	REG_WRITE(ah, AR_ISR, ~0);
 
 	REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
 
-	if (AR_SREV_9280_10_OR_LATER(ah))
-		ath9k_hw_ar9280_set_channel(ah, chan);
-	else
-		if (!(ath9k_hw_set_channel(ah, chan)))
-			return -EIO;
+	r = ah->ath9k_hw_rf_set_freq(ah, chan);
+	if (r)
+		return r;
 
 	for (i = 0; i < AR_NUM_DCU; i++)
 		REG_WRITE(ah, AR_DQCUMASK(i), 1 << i);
@@ -2558,13 +2146,13 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 		u32 mask;
 		mask = REG_READ(ah, AR_CFG);
 		if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) {
-			DPRINTF(ah->ah_sc, ATH_DBG_RESET,
+			ath_print(common, ATH_DBG_RESET,
 				"CFG Byte Swap Set 0x%x\n", mask);
 		} else {
 			mask =
 				INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB;
 			REG_WRITE(ah, AR_CFG, mask);
-			DPRINTF(ah->ah_sc, ATH_DBG_RESET,
+			ath_print(common, ATH_DBG_RESET,
 				"Setting CFG 0x%x\n", REG_READ(ah, AR_CFG));
 		}
 	} else {
@@ -2577,11 +2165,12 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 #endif
 	}
 
-	if (ah->ah_sc->sc_flags & SC_OP_BTCOEX_ENABLED)
+	if (ah->btcoex_hw.enabled)
 		ath9k_hw_btcoex_enable(ah);
 
 	return 0;
 }
+EXPORT_SYMBOL(ath9k_hw_reset);
 
 /************************/
 /* Key Cache Management */
@@ -2592,8 +2181,8 @@ bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry)
 	u32 keyType;
 
 	if (entry >= ah->caps.keycache_size) {
-		DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-			"keychache entry %u out of range\n", entry);
+		ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+			  "keychache entry %u out of range\n", entry);
 		return false;
 	}
 
@@ -2620,14 +2209,15 @@ bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry)
 
 	return true;
 }
+EXPORT_SYMBOL(ath9k_hw_keyreset);
 
 bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac)
 {
 	u32 macHi, macLo;
 
 	if (entry >= ah->caps.keycache_size) {
-		DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-			"keychache entry %u out of range\n", entry);
+		ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+			  "keychache entry %u out of range\n", entry);
 		return false;
 	}
 
@@ -2648,18 +2238,20 @@ bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac)
 
 	return true;
 }
+EXPORT_SYMBOL(ath9k_hw_keysetmac);
 
 bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
 				 const struct ath9k_keyval *k,
 				 const u8 *mac)
 {
 	const struct ath9k_hw_capabilities *pCap = &ah->caps;
+	struct ath_common *common = ath9k_hw_common(ah);
 	u32 key0, key1, key2, key3, key4;
 	u32 keyType;
 
 	if (entry >= pCap->keycache_size) {
-		DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-			"keycache entry %u out of range\n", entry);
+		ath_print(common, ATH_DBG_FATAL,
+			  "keycache entry %u out of range\n", entry);
 		return false;
 	}
 
@@ -2669,9 +2261,9 @@ bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
 		break;
 	case ATH9K_CIPHER_AES_CCM:
 		if (!(pCap->hw_caps & ATH9K_HW_CAP_CIPHER_AESCCM)) {
-			DPRINTF(ah->ah_sc, ATH_DBG_ANY,
-				"AES-CCM not supported by mac rev 0x%x\n",
-				ah->hw_version.macRev);
+			ath_print(common, ATH_DBG_ANY,
+				  "AES-CCM not supported by mac rev 0x%x\n",
+				  ah->hw_version.macRev);
 			return false;
 		}
 		keyType = AR_KEYTABLE_TYPE_CCM;
@@ -2680,15 +2272,15 @@ bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
 		keyType = AR_KEYTABLE_TYPE_TKIP;
 		if (ATH9K_IS_MIC_ENABLED(ah)
 		    && entry + 64 >= pCap->keycache_size) {
-			DPRINTF(ah->ah_sc, ATH_DBG_ANY,
-				"entry %u inappropriate for TKIP\n", entry);
+			ath_print(common, ATH_DBG_ANY,
+				  "entry %u inappropriate for TKIP\n", entry);
 			return false;
 		}
 		break;
 	case ATH9K_CIPHER_WEP:
 		if (k->kv_len < WLAN_KEY_LEN_WEP40) {
-			DPRINTF(ah->ah_sc, ATH_DBG_ANY,
-				"WEP key length %u too small\n", k->kv_len);
+			ath_print(common, ATH_DBG_ANY,
+				  "WEP key length %u too small\n", k->kv_len);
 			return false;
 		}
 		if (k->kv_len <= WLAN_KEY_LEN_WEP40)
@@ -2702,8 +2294,8 @@ bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
 		keyType = AR_KEYTABLE_TYPE_CLR;
 		break;
 	default:
-		DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-			"cipher %u not supported\n", k->kv_type);
+		ath_print(common, ATH_DBG_FATAL,
+			  "cipher %u not supported\n", k->kv_type);
 		return false;
 	}
 
@@ -2845,6 +2437,7 @@ bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
 
 	return true;
 }
+EXPORT_SYMBOL(ath9k_hw_set_keycache_entry);
 
 bool ath9k_hw_keyisvalid(struct ath_hw *ah, u16 entry)
 {
@@ -2855,6 +2448,7 @@ bool ath9k_hw_keyisvalid(struct ath_hw *ah, u16 entry)
 	}
 	return false;
 }
+EXPORT_SYMBOL(ath9k_hw_keyisvalid);
 
 /******************************/
 /* Power Management (Chipset) */
@@ -2869,8 +2463,9 @@ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
 		if (!AR_SREV_9100(ah))
 			REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
 
-		REG_CLR_BIT(ah, (AR_RTC_RESET),
-			    AR_RTC_RESET_EN);
+		if(!AR_SREV_5416(ah))
+			REG_CLR_BIT(ah, (AR_RTC_RESET),
+				    AR_RTC_RESET_EN);
 	}
 }
 
@@ -2902,6 +2497,7 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
 					   ATH9K_RESET_POWER_ON) != true) {
 				return false;
 			}
+			ath9k_hw_init_pll(ah, NULL);
 		}
 		if (AR_SREV_9100(ah))
 			REG_SET_BIT(ah, AR_RTC_RESET,
@@ -2920,8 +2516,9 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
 				    AR_RTC_FORCE_WAKE_EN);
 		}
 		if (i == 0) {
-			DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-				"Failed to wakeup in %uus\n", POWER_UP_TIME / 20);
+			ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+				  "Failed to wakeup in %uus\n",
+				  POWER_UP_TIME / 20);
 			return false;
 		}
 	}
@@ -2931,9 +2528,9 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
 	return true;
 }
 
-static bool ath9k_hw_setpower_nolock(struct ath_hw *ah,
-				     enum ath9k_power_mode mode)
+bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
 {
+	struct ath_common *common = ath9k_hw_common(ah);
 	int status = true, setChip = true;
 	static const char *modes[] = {
 		"AWAKE",
@@ -2945,8 +2542,8 @@ static bool ath9k_hw_setpower_nolock(struct ath_hw *ah,
 	if (ah->power_mode == mode)
 		return status;
 
-	DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s -> %s\n",
-		modes[ah->power_mode], modes[mode]);
+	ath_print(common, ATH_DBG_RESET, "%s -> %s\n",
+		  modes[ah->power_mode], modes[mode]);
 
 	switch (mode) {
 	case ATH9K_PM_AWAKE:
@@ -2960,59 +2557,15 @@ static bool ath9k_hw_setpower_nolock(struct ath_hw *ah,
 		ath9k_set_power_network_sleep(ah, setChip);
 		break;
 	default:
-		DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-			"Unknown power mode %u\n", mode);
+		ath_print(common, ATH_DBG_FATAL,
+			  "Unknown power mode %u\n", mode);
 		return false;
 	}
 	ah->power_mode = mode;
 
 	return status;
 }
-
-bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
-{
-	unsigned long flags;
-	bool ret;
-
-	spin_lock_irqsave(&ah->ah_sc->sc_pm_lock, flags);
-	ret = ath9k_hw_setpower_nolock(ah, mode);
-	spin_unlock_irqrestore(&ah->ah_sc->sc_pm_lock, flags);
-
-	return ret;
-}
-
-void ath9k_ps_wakeup(struct ath_softc *sc)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&sc->sc_pm_lock, flags);
-	if (++sc->ps_usecount != 1)
-		goto unlock;
-
-	ath9k_hw_setpower_nolock(sc->sc_ah, ATH9K_PM_AWAKE);
-
- unlock:
-	spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
-}
-
-void ath9k_ps_restore(struct ath_softc *sc)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&sc->sc_pm_lock, flags);
-	if (--sc->ps_usecount != 0)
-		goto unlock;
-
-	if (sc->ps_enabled &&
-	    !(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
-			      SC_OP_WAIT_FOR_CAB |
-			      SC_OP_WAIT_FOR_PSPOLL_DATA |
-			      SC_OP_WAIT_FOR_TX_ACK)))
-		ath9k_hw_setpower_nolock(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
-
- unlock:
-	spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
-}
+EXPORT_SYMBOL(ath9k_hw_setpower);
 
 /*
  * Helper for ASPM support.
@@ -3145,6 +2698,7 @@ void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off)
 		}
 	}
 }
+EXPORT_SYMBOL(ath9k_hw_configpcipowersave);
 
 /**********************/
 /* Interrupt Handling */
@@ -3168,6 +2722,7 @@ bool ath9k_hw_intrpend(struct ath_hw *ah)
 
 	return false;
 }
+EXPORT_SYMBOL(ath9k_hw_intrpend);
 
 bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
 {
@@ -3176,6 +2731,7 @@ bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
 	struct ath9k_hw_capabilities *pCap = &ah->caps;
 	u32 sync_cause = 0;
 	bool fatal_int = false;
+	struct ath_common *common = ath9k_hw_common(ah);
 
 	if (!AR_SREV_9100(ah)) {
 		if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
@@ -3249,8 +2805,8 @@ bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
 		}
 
 		if (isr & AR_ISR_RXORN) {
-			DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
-				"receive FIFO overrun interrupt\n");
+			ath_print(common, ATH_DBG_INTERRUPT,
+				  "receive FIFO overrun interrupt\n");
 		}
 
 		if (!AR_SREV_9100(ah)) {
@@ -3292,25 +2848,25 @@ bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
 
 		if (fatal_int) {
 			if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
-				DPRINTF(ah->ah_sc, ATH_DBG_ANY,
-					"received PCI FATAL interrupt\n");
+				ath_print(common, ATH_DBG_ANY,
+					  "received PCI FATAL interrupt\n");
 			}
 			if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
-				DPRINTF(ah->ah_sc, ATH_DBG_ANY,
-					"received PCI PERR interrupt\n");
+				ath_print(common, ATH_DBG_ANY,
+					  "received PCI PERR interrupt\n");
 			}
 			*masked |= ATH9K_INT_FATAL;
 		}
 		if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
-			DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
-				"AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
+			ath_print(common, ATH_DBG_INTERRUPT,
+				  "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
 			REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
 			REG_WRITE(ah, AR_RC, 0);
 			*masked |= ATH9K_INT_FATAL;
 		}
 		if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
-			DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
-				"AR_INTR_SYNC_LOCAL_TIMEOUT\n");
+			ath_print(common, ATH_DBG_INTERRUPT,
+				  "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
 		}
 
 		REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
@@ -3319,17 +2875,19 @@ bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
 
 	return true;
 }
+EXPORT_SYMBOL(ath9k_hw_getisr);
 
 enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
 {
 	u32 omask = ah->mask_reg;
 	u32 mask, mask2;
 	struct ath9k_hw_capabilities *pCap = &ah->caps;
+	struct ath_common *common = ath9k_hw_common(ah);
 
-	DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
+	ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
 
 	if (omask & ATH9K_INT_GLOBAL) {
-		DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "disable IER\n");
+		ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n");
 		REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
 		(void) REG_READ(ah, AR_IER);
 		if (!AR_SREV_9100(ah)) {
@@ -3386,7 +2944,7 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
 			mask2 |= AR_IMR_S2_CST;
 	}
 
-	DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
+	ath_print(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
 	REG_WRITE(ah, AR_IMR, mask);
 	mask = REG_READ(ah, AR_IMR_S2) & ~(AR_IMR_S2_TIM |
 					   AR_IMR_S2_DTIM |
@@ -3406,7 +2964,7 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
 	}
 
 	if (ints & ATH9K_INT_GLOBAL) {
-		DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "enable IER\n");
+		ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n");
 		REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
 		if (!AR_SREV_9100(ah)) {
 			REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
@@ -3419,12 +2977,13 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
 			REG_WRITE(ah, AR_INTR_SYNC_MASK,
 				  AR_INTR_SYNC_DEFAULT);
 		}
-		DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
-			 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
+		ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
+			  REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
 	}
 
 	return omask;
 }
+EXPORT_SYMBOL(ath9k_hw_set_interrupts);
 
 /*******************/
 /* Beacon Handling */
@@ -3467,9 +3026,9 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
 			AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN;
 		break;
 	default:
-		DPRINTF(ah->ah_sc, ATH_DBG_BEACON,
-			"%s: unsupported opmode: %d\n",
-			__func__, ah->opmode);
+		ath_print(ath9k_hw_common(ah), ATH_DBG_BEACON,
+			  "%s: unsupported opmode: %d\n",
+			  __func__, ah->opmode);
 		return;
 		break;
 	}
@@ -3481,18 +3040,19 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
 
 	beacon_period &= ~ATH9K_BEACON_ENA;
 	if (beacon_period & ATH9K_BEACON_RESET_TSF) {
-		beacon_period &= ~ATH9K_BEACON_RESET_TSF;
 		ath9k_hw_reset_tsf(ah);
 	}
 
 	REG_SET_BIT(ah, AR_TIMER_MODE, flags);
 }
+EXPORT_SYMBOL(ath9k_hw_beaconinit);
 
 void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
 				    const struct ath9k_beacon_state *bs)
 {
 	u32 nextTbtt, beaconintval, dtimperiod, beacontimeout;
 	struct ath9k_hw_capabilities *pCap = &ah->caps;
+	struct ath_common *common = ath9k_hw_common(ah);
 
 	REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt));
 
@@ -3518,10 +3078,10 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
 	else
 		nextTbtt = bs->bs_nexttbtt;
 
-	DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "next DTIM %d\n", bs->bs_nextdtim);
-	DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "next beacon %d\n", nextTbtt);
-	DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "beacon period %d\n", beaconintval);
-	DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod);
+	ath_print(common, ATH_DBG_BEACON, "next DTIM %d\n", bs->bs_nextdtim);
+	ath_print(common, ATH_DBG_BEACON, "next beacon %d\n", nextTbtt);
+	ath_print(common, ATH_DBG_BEACON, "beacon period %d\n", beaconintval);
+	ath_print(common, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod);
 
 	REG_WRITE(ah, AR_NEXT_DTIM,
 		  TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP));
@@ -3549,16 +3109,18 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
 	/* TSF Out of Range Threshold */
 	REG_WRITE(ah, AR_TSFOOR_THRESHOLD, bs->bs_tsfoor_threshold);
 }
+EXPORT_SYMBOL(ath9k_hw_set_sta_beacon_timers);
 
 /*******************/
 /* HW Capabilities */
 /*******************/
 
-void ath9k_hw_fill_cap_info(struct ath_hw *ah)
+int ath9k_hw_fill_cap_info(struct ath_hw *ah)
 {
 	struct ath9k_hw_capabilities *pCap = &ah->caps;
 	struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
-	struct ath_btcoex_info *btcoex_info = &ah->ah_sc->btcoex_info;
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
 
 	u16 capField = 0, eeval;
 
@@ -3579,11 +3141,17 @@ void ath9k_hw_fill_cap_info(struct ath_hw *ah)
 			regulatory->current_rd += 5;
 		else if (regulatory->current_rd == 0x41)
 			regulatory->current_rd = 0x43;
-		DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
-			"regdomain mapped to 0x%x\n", regulatory->current_rd);
+		ath_print(common, ATH_DBG_REGULATORY,
+			  "regdomain mapped to 0x%x\n", regulatory->current_rd);
 	}
 
 	eeval = ah->eep_ops->get_eeprom(ah, EEP_OP_MODE);
+	if ((eeval & (AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A)) == 0) {
+		ath_print(common, ATH_DBG_FATAL,
+			  "no band has been marked as supported in EEPROM.\n");
+		return -EINVAL;
+	}
+
 	bitmap_zero(pCap->wireless_modes, ATH9K_MODE_MAX);
 
 	if (eeval & AR5416_OPFLAGS_11A) {
@@ -3670,7 +3238,11 @@ void ath9k_hw_fill_cap_info(struct ath_hw *ah)
 		pCap->keycache_size = AR_KEYTABLE_SIZE;
 
 	pCap->hw_caps |= ATH9K_HW_CAP_FASTCC;
-	pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD;
+
+	if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
+		pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD >> 1;
+	else
+		pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD;
 
 	if (AR_SREV_9285_10_OR_LATER(ah))
 		pCap->num_gpio_pins = AR9285_NUM_GPIO;
@@ -3719,7 +3291,10 @@ void ath9k_hw_fill_cap_info(struct ath_hw *ah)
 			AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN;
 	}
 
-	pCap->reg_cap |= AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND;
+	/* Advertise midband for AR5416 with FCC midband set in eeprom */
+	if (regulatory->current_rd_ext & (1 << REG_EXT_FCC_MIDBAND) &&
+	    AR_SREV_5416(ah))
+		pCap->reg_cap |= AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND;
 
 	pCap->num_antcfg_5ghz =
 		ah->eep_ops->get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_5GHZ);
@@ -3727,19 +3302,21 @@ void ath9k_hw_fill_cap_info(struct ath_hw *ah)
 		ah->eep_ops->get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_2GHZ);
 
 	if (AR_SREV_9280_10_OR_LATER(ah) &&
-	    ath_btcoex_supported(ah->hw_version.subsysid)) {
-		btcoex_info->btactive_gpio = ATH_BTACTIVE_GPIO;
-		btcoex_info->wlanactive_gpio = ATH_WLANACTIVE_GPIO;
+	    ath9k_hw_btcoex_supported(ah)) {
+		btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO;
+		btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO;
 
 		if (AR_SREV_9285(ah)) {
-			btcoex_info->btcoex_scheme = ATH_BTCOEX_CFG_3WIRE;
-			btcoex_info->btpriority_gpio = ATH_BTPRIORITY_GPIO;
+			btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
+			btcoex_hw->btpriority_gpio = ATH_BTPRIORITY_GPIO;
 		} else {
-			btcoex_info->btcoex_scheme = ATH_BTCOEX_CFG_2WIRE;
+			btcoex_hw->scheme = ATH_BTCOEX_CFG_2WIRE;
 		}
 	} else {
-		btcoex_info->btcoex_scheme = ATH_BTCOEX_CFG_NONE;
+		btcoex_hw->scheme = ATH_BTCOEX_CFG_NONE;
 	}
+
+	return 0;
 }
 
 bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
@@ -3812,6 +3389,7 @@ bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
 		return false;
 	}
 }
+EXPORT_SYMBOL(ath9k_hw_getcapability);
 
 bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
 			    u32 capability, u32 setting, int *status)
@@ -3845,6 +3423,7 @@ bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
 		return false;
 	}
 }
+EXPORT_SYMBOL(ath9k_hw_setcapability);
 
 /****************************/
 /* GPIO / RFKILL / Antennae */
@@ -3882,7 +3461,7 @@ void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio)
 {
 	u32 gpio_shift;
 
-	ASSERT(gpio < ah->caps.num_gpio_pins);
+	BUG_ON(gpio >= ah->caps.num_gpio_pins);
 
 	gpio_shift = gpio << 1;
 
@@ -3891,6 +3470,7 @@ void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio)
 		(AR_GPIO_OE_OUT_DRV_NO << gpio_shift),
 		(AR_GPIO_OE_OUT_DRV << gpio_shift));
 }
+EXPORT_SYMBOL(ath9k_hw_cfg_gpio_input);
 
 u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
 {
@@ -3909,6 +3489,7 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
 	else
 		return MS_REG_READ(AR, gpio) != 0;
 }
+EXPORT_SYMBOL(ath9k_hw_gpio_get);
 
 void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
 			 u32 ah_signal_type)
@@ -3924,67 +3505,26 @@ void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
 		(AR_GPIO_OE_OUT_DRV_ALL << gpio_shift),
 		(AR_GPIO_OE_OUT_DRV << gpio_shift));
 }
+EXPORT_SYMBOL(ath9k_hw_cfg_output);
 
 void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
 {
 	REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
 		AR_GPIO_BIT(gpio));
 }
+EXPORT_SYMBOL(ath9k_hw_set_gpio);
 
 u32 ath9k_hw_getdefantenna(struct ath_hw *ah)
 {
 	return REG_READ(ah, AR_DEF_ANTENNA) & 0x7;
 }
+EXPORT_SYMBOL(ath9k_hw_getdefantenna);
 
 void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna)
 {
 	REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7));
 }
-
-bool ath9k_hw_setantennaswitch(struct ath_hw *ah,
-			       enum ath9k_ant_setting settings,
-			       struct ath9k_channel *chan,
-			       u8 *tx_chainmask,
-			       u8 *rx_chainmask,
-			       u8 *antenna_cfgd)
-{
-	static u8 tx_chainmask_cfg, rx_chainmask_cfg;
-
-	if (AR_SREV_9280(ah)) {
-		if (!tx_chainmask_cfg) {
-
-			tx_chainmask_cfg = *tx_chainmask;
-			rx_chainmask_cfg = *rx_chainmask;
-		}
-
-		switch (settings) {
-		case ATH9K_ANT_FIXED_A:
-			*tx_chainmask = ATH9K_ANTENNA0_CHAINMASK;
-			*rx_chainmask = ATH9K_ANTENNA0_CHAINMASK;
-			*antenna_cfgd = true;
-			break;
-		case ATH9K_ANT_FIXED_B:
-			if (ah->caps.tx_chainmask >
-			    ATH9K_ANTENNA1_CHAINMASK) {
-				*tx_chainmask = ATH9K_ANTENNA1_CHAINMASK;
-			}
-			*rx_chainmask = ATH9K_ANTENNA1_CHAINMASK;
-			*antenna_cfgd = true;
-			break;
-		case ATH9K_ANT_VARIABLE:
-			*tx_chainmask = tx_chainmask_cfg;
-			*rx_chainmask = rx_chainmask_cfg;
-			*antenna_cfgd = true;
-			break;
-		default:
-			break;
-		}
-	} else {
-		ah->config.diversity_control = settings;
-	}
-
-	return true;
-}
+EXPORT_SYMBOL(ath9k_hw_setantenna);
 
 /*********************/
 /* General Operation */
@@ -4002,6 +3542,7 @@ u32 ath9k_hw_getrxfilter(struct ath_hw *ah)
 
 	return bits;
 }
+EXPORT_SYMBOL(ath9k_hw_getrxfilter);
 
 void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
 {
@@ -4023,19 +3564,30 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
 		REG_WRITE(ah, AR_RXCFG,
 			  REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA);
 }
+EXPORT_SYMBOL(ath9k_hw_setrxfilter);
 
 bool ath9k_hw_phy_disable(struct ath_hw *ah)
 {
-	return ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM);
+	if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM))
+		return false;
+
+	ath9k_hw_init_pll(ah, NULL);
+	return true;
 }
+EXPORT_SYMBOL(ath9k_hw_phy_disable);
 
 bool ath9k_hw_disable(struct ath_hw *ah)
 {
 	if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
 		return false;
 
-	return ath9k_hw_set_reset_reg(ah, ATH9K_RESET_COLD);
+	if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_COLD))
+		return false;
+
+	ath9k_hw_init_pll(ah, NULL);
+	return true;
 }
+EXPORT_SYMBOL(ath9k_hw_disable);
 
 void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit)
 {
@@ -4052,35 +3604,36 @@ void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit)
 				 min((u32) MAX_RATE_POWER,
 				 (u32) regulatory->power_limit));
 }
+EXPORT_SYMBOL(ath9k_hw_set_txpowerlimit);
 
 void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac)
 {
-	memcpy(ah->macaddr, mac, ETH_ALEN);
+	memcpy(ath9k_hw_common(ah)->macaddr, mac, ETH_ALEN);
 }
+EXPORT_SYMBOL(ath9k_hw_setmac);
 
 void ath9k_hw_setopmode(struct ath_hw *ah)
 {
 	ath9k_hw_set_operating_mode(ah, ah->opmode);
 }
+EXPORT_SYMBOL(ath9k_hw_setopmode);
 
 void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1)
 {
 	REG_WRITE(ah, AR_MCAST_FIL0, filter0);
 	REG_WRITE(ah, AR_MCAST_FIL1, filter1);
 }
+EXPORT_SYMBOL(ath9k_hw_setmcastfilter);
 
-void ath9k_hw_setbssidmask(struct ath_softc *sc)
+void ath9k_hw_write_associd(struct ath_hw *ah)
 {
-	REG_WRITE(sc->sc_ah, AR_BSSMSKL, get_unaligned_le32(sc->bssidmask));
-	REG_WRITE(sc->sc_ah, AR_BSSMSKU, get_unaligned_le16(sc->bssidmask + 4));
-}
+	struct ath_common *common = ath9k_hw_common(ah);
 
-void ath9k_hw_write_associd(struct ath_softc *sc)
-{
-	REG_WRITE(sc->sc_ah, AR_BSS_ID0, get_unaligned_le32(sc->curbssid));
-	REG_WRITE(sc->sc_ah, AR_BSS_ID1, get_unaligned_le16(sc->curbssid + 4) |
-		  ((sc->curaid & 0x3fff) << AR_BSS_ID1_AID_S));
+	REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(common->curbssid));
+	REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(common->curbssid + 4) |
+		  ((common->curaid & 0x3fff) << AR_BSS_ID1_AID_S));
 }
+EXPORT_SYMBOL(ath9k_hw_write_associd);
 
 u64 ath9k_hw_gettsf64(struct ath_hw *ah)
 {
@@ -4091,24 +3644,25 @@ u64 ath9k_hw_gettsf64(struct ath_hw *ah)
 
 	return tsf;
 }
+EXPORT_SYMBOL(ath9k_hw_gettsf64);
 
 void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64)
 {
 	REG_WRITE(ah, AR_TSF_L32, tsf64 & 0xffffffff);
 	REG_WRITE(ah, AR_TSF_U32, (tsf64 >> 32) & 0xffffffff);
 }
+EXPORT_SYMBOL(ath9k_hw_settsf64);
 
 void ath9k_hw_reset_tsf(struct ath_hw *ah)
 {
-	ath9k_ps_wakeup(ah->ah_sc);
 	if (!ath9k_hw_wait(ah, AR_SLP32_MODE, AR_SLP32_TSF_WRITE_STATUS, 0,
 			   AH_TSF_WRITE_TIMEOUT))
-		DPRINTF(ah->ah_sc, ATH_DBG_RESET,
-			"AR_SLP32_TSF_WRITE_STATUS limit exceeded\n");
+		ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
+			  "AR_SLP32_TSF_WRITE_STATUS limit exceeded\n");
 
 	REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE);
-	ath9k_ps_restore(ah->ah_sc);
 }
+EXPORT_SYMBOL(ath9k_hw_reset_tsf);
 
 void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting)
 {
@@ -4117,11 +3671,28 @@ void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting)
 	else
 		ah->misc_mode &= ~AR_PCU_TX_ADD_TSF;
 }
+EXPORT_SYMBOL(ath9k_hw_set_tsfadjust);
+
+/*
+ *  Extend 15-bit time stamp from rx descriptor to
+ *  a full 64-bit TSF using the current h/w TSF.
+*/
+u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp)
+{
+	u64 tsf;
+
+	tsf = ath9k_hw_gettsf64(ah);
+	if ((tsf & 0x7fff) < rstamp)
+		tsf -= 0x8000;
+	return (tsf & ~0x7fff) | rstamp;
+}
+EXPORT_SYMBOL(ath9k_hw_extend_tsf);
 
 bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
 {
 	if (us < ATH9K_SLOT_TIME_9 || us > ath9k_hw_mac_to_usec(ah, 0xffff)) {
-		DPRINTF(ah->ah_sc, ATH_DBG_RESET, "bad slot time %u\n", us);
+		ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
+			  "bad slot time %u\n", us);
 		ah->slottime = (u32) -1;
 		return false;
 	} else {
@@ -4130,13 +3701,14 @@ bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
 		return true;
 	}
 }
+EXPORT_SYMBOL(ath9k_hw_setslottime);
 
-void ath9k_hw_set11nmac2040(struct ath_hw *ah, enum ath9k_ht_macmode mode)
+void ath9k_hw_set11nmac2040(struct ath_hw *ah)
 {
+	struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
 	u32 macmode;
 
-	if (mode == ATH9K_HT_MACMODE_2040 &&
-	    !ah->config.cwm_ignore_extcca)
+	if (conf_is_ht40(conf) && !ah->config.cwm_ignore_extcca)
 		macmode = AR_2040_JOINED_RX_CLEAR;
 	else
 		macmode = 0;
@@ -4193,6 +3765,7 @@ u32 ath9k_hw_gettsf32(struct ath_hw *ah)
 {
 	return REG_READ(ah, AR_TSF_L32);
 }
+EXPORT_SYMBOL(ath9k_hw_gettsf32);
 
 struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
 					  void (*trigger)(void *),
@@ -4206,8 +3779,9 @@ struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
 	timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL);
 
 	if (timer == NULL) {
-		printk(KERN_DEBUG "Failed to allocate memory"
-		       "for hw timer[%d]\n", timer_index);
+		ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+			  "Failed to allocate memory"
+			  "for hw timer[%d]\n", timer_index);
 		return NULL;
 	}
 
@@ -4220,10 +3794,12 @@ struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
 
 	return timer;
 }
+EXPORT_SYMBOL(ath_gen_timer_alloc);
 
-void ath_gen_timer_start(struct ath_hw *ah,
-			 struct ath_gen_timer *timer,
-			 u32 timer_next, u32 timer_period)
+void ath9k_hw_gen_timer_start(struct ath_hw *ah,
+			      struct ath_gen_timer *timer,
+			      u32 timer_next,
+			      u32 timer_period)
 {
 	struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
 	u32 tsf;
@@ -4234,8 +3810,9 @@ void ath_gen_timer_start(struct ath_hw *ah,
 
 	tsf = ath9k_hw_gettsf32(ah);
 
-	DPRINTF(ah->ah_sc, ATH_DBG_HWTIMER, "curent tsf %x period %x"
-		"timer_next %x\n", tsf, timer_period, timer_next);
+	ath_print(ath9k_hw_common(ah), ATH_DBG_HWTIMER,
+		  "curent tsf %x period %x"
+		  "timer_next %x\n", tsf, timer_period, timer_next);
 
 	/*
 	 * Pull timer_next forward if the current TSF already passed it
@@ -4258,15 +3835,10 @@ void ath_gen_timer_start(struct ath_hw *ah,
 	REG_SET_BIT(ah, AR_IMR_S5,
 		(SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) |
 		SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG)));
-
-	if ((ah->ah_sc->imask & ATH9K_INT_GENTIMER) == 0) {
-		ath9k_hw_set_interrupts(ah, 0);
-		ah->ah_sc->imask |= ATH9K_INT_GENTIMER;
-		ath9k_hw_set_interrupts(ah, ah->ah_sc->imask);
-	}
 }
+EXPORT_SYMBOL(ath9k_hw_gen_timer_start);
 
-void ath_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
+void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
 {
 	struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
 
@@ -4285,14 +3857,8 @@ void ath_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
 		SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG)));
 
 	clear_bit(timer->index, &timer_table->timer_mask.timer_bits);
-
-	/* if no timer is enabled, turn off interrupt mask */
-	if (timer_table->timer_mask.val == 0) {
-		ath9k_hw_set_interrupts(ah, 0);
-		ah->ah_sc->imask &= ~ATH9K_INT_GENTIMER;
-		ath9k_hw_set_interrupts(ah, ah->ah_sc->imask);
-	}
 }
+EXPORT_SYMBOL(ath9k_hw_gen_timer_stop);
 
 void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer)
 {
@@ -4302,6 +3868,7 @@ void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer)
 	timer_table->timers[timer->index] = NULL;
 	kfree(timer);
 }
+EXPORT_SYMBOL(ath_gen_timer_free);
 
 /*
  * Generic Timer Interrupts handling
@@ -4310,6 +3877,7 @@ void ath_gen_timer_isr(struct ath_hw *ah)
 {
 	struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
 	struct ath_gen_timer *timer;
+	struct ath_common *common = ath9k_hw_common(ah);
 	u32 trigger_mask, thresh_mask, index;
 
 	/* get hardware generic timer interrupt status */
@@ -4324,8 +3892,8 @@ void ath_gen_timer_isr(struct ath_hw *ah)
 		index = rightmost_index(timer_table, &thresh_mask);
 		timer = timer_table->timers[index];
 		BUG_ON(!timer);
-		DPRINTF(ah->ah_sc, ATH_DBG_HWTIMER,
-			"TSF overflow for Gen timer %d\n", index);
+		ath_print(common, ATH_DBG_HWTIMER,
+			  "TSF overflow for Gen timer %d\n", index);
 		timer->overflow(timer->arg);
 	}
 
@@ -4333,21 +3901,95 @@ void ath_gen_timer_isr(struct ath_hw *ah)
 		index = rightmost_index(timer_table, &trigger_mask);
 		timer = timer_table->timers[index];
 		BUG_ON(!timer);
-		DPRINTF(ah->ah_sc, ATH_DBG_HWTIMER,
-			"Gen timer[%d] trigger\n", index);
+		ath_print(common, ATH_DBG_HWTIMER,
+			  "Gen timer[%d] trigger\n", index);
 		timer->trigger(timer->arg);
 	}
 }
+EXPORT_SYMBOL(ath_gen_timer_isr);
+
+static struct {
+	u32 version;
+	const char * name;
+} ath_mac_bb_names[] = {
+	/* Devices with external radios */
+	{ AR_SREV_VERSION_5416_PCI,	"5416" },
+	{ AR_SREV_VERSION_5416_PCIE,	"5418" },
+	{ AR_SREV_VERSION_9100,		"9100" },
+	{ AR_SREV_VERSION_9160,		"9160" },
+	/* Single-chip solutions */
+	{ AR_SREV_VERSION_9280,		"9280" },
+	{ AR_SREV_VERSION_9285,		"9285" },
+	{ AR_SREV_VERSION_9287,         "9287" },
+	{ AR_SREV_VERSION_9271,         "9271" },
+};
+
+/* For devices with external radios */
+static struct {
+	u16 version;
+	const char * name;
+} ath_rf_names[] = {
+	{ 0,				"5133" },
+	{ AR_RAD5133_SREV_MAJOR,	"5133" },
+	{ AR_RAD5122_SREV_MAJOR,	"5122" },
+	{ AR_RAD2133_SREV_MAJOR,	"2133" },
+	{ AR_RAD2122_SREV_MAJOR,	"2122" }
+};
+
+/*
+ * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown.
+ */
+static const char *ath9k_hw_mac_bb_name(u32 mac_bb_version)
+{
+	int i;
+
+	for (i=0; i<ARRAY_SIZE(ath_mac_bb_names); i++) {
+		if (ath_mac_bb_names[i].version == mac_bb_version) {
+			return ath_mac_bb_names[i].name;
+		}
+	}
+
+	return "????";
+}
 
 /*
- * Primitive to disable ASPM
+ * Return the RF name. "????" is returned if the RF is unknown.
+ * Used for devices with external radios.
  */
-void ath_pcie_aspm_disable(struct ath_softc *sc)
+static const char *ath9k_hw_rf_name(u16 rf_version)
+{
+	int i;
+
+	for (i=0; i<ARRAY_SIZE(ath_rf_names); i++) {
+		if (ath_rf_names[i].version == rf_version) {
+			return ath_rf_names[i].name;
+		}
+	}
+
+	return "????";
+}
+
+void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len)
 {
-	struct pci_dev *pdev = to_pci_dev(sc->dev);
-	u8 aspm;
+	int used;
+
+	/* chipsets >= AR9280 are single-chip */
+	if (AR_SREV_9280_10_OR_LATER(ah)) {
+		used = snprintf(hw_name, len,
+			       "Atheros AR%s Rev:%x",
+			       ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
+			       ah->hw_version.macRev);
+	}
+	else {
+		used = snprintf(hw_name, len,
+			       "Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x",
+			       ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
+			       ah->hw_version.macRev,
+			       ath9k_hw_rf_name((ah->hw_version.analog5GhzRev &
+						AR_RADIO_SREV_MAJOR)),
+			       ah->hw_version.phyRev);
+	}
 
-	pci_read_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, &aspm);
-	aspm &= ~(ATH_PCIE_CAP_LINK_L0S | ATH_PCIE_CAP_LINK_L1);
-	pci_write_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, aspm);
+	hw_name[used] = '\0';
 }
+EXPORT_SYMBOL(ath9k_hw_name);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index b89234571829..e2b0c73a616f 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -27,17 +27,24 @@
 #include "calib.h"
 #include "reg.h"
 #include "phy.h"
+#include "btcoex.h"
 
 #include "../regd.h"
+#include "../debug.h"
 
 #define ATHEROS_VENDOR_ID	0x168c
+
 #define AR5416_DEVID_PCI	0x0023
 #define AR5416_DEVID_PCIE	0x0024
 #define AR9160_DEVID_PCI	0x0027
 #define AR9280_DEVID_PCI	0x0029
 #define AR9280_DEVID_PCIE	0x002a
 #define AR9285_DEVID_PCIE	0x002b
+
 #define AR5416_AR9100_DEVID	0x000b
+
+#define AR9271_USB             0x9271
+
 #define	AR_SUBVENDOR_ID_NOG	0x0e11
 #define AR_SUBVENDOR_ID_NEW_A	0x7065
 #define AR5416_MAGIC		0x19641014
@@ -49,9 +56,18 @@
 #define AT9285_COEX3WIRE_SA_SUBSYSID	0x30aa
 #define AT9285_COEX3WIRE_DA_SUBSYSID	0x30ab
 
+#define ATH_AMPDU_LIMIT_MAX        (64 * 1024 - 1)
+
+#define	ATH_DEFAULT_NOISE_FLOOR -95
+
+#define ATH9K_RSSI_BAD			-128
+
 /* Register read/write primitives */
-#define REG_WRITE(_ah, _reg, _val) ath9k_iowrite32((_ah), (_reg), (_val))
-#define REG_READ(_ah, _reg) ath9k_ioread32((_ah), (_reg))
+#define REG_WRITE(_ah, _reg, _val) \
+	ath9k_hw_common(_ah)->ops->write((_ah), (_val), (_reg))
+
+#define REG_READ(_ah, _reg) \
+	ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
 
 #define SM(_v, _f)  (((_v) << _f##_S) & _f)
 #define MS(_v, _f)  (((_v) & _f) >> _f##_S)
@@ -91,7 +107,7 @@
 #define AR_GPIO_BIT(_gpio)          (1 << (_gpio))
 
 #define BASE_ACTIVATE_DELAY         100
-#define RTC_PLL_SETTLE_DELAY        1000
+#define RTC_PLL_SETTLE_DELAY        100
 #define COEF_SCALE_S                24
 #define HT40_CHANNEL_CENTER_SHIFT   10
 
@@ -132,12 +148,6 @@ enum wireless_mode {
 	ATH9K_MODE_MAX,
 };
 
-enum ath9k_ant_setting {
-	ATH9K_ANT_VARIABLE = 0,
-	ATH9K_ANT_FIXED_A,
-	ATH9K_ANT_FIXED_B
-};
-
 enum ath9k_hw_caps {
 	ATH9K_HW_CAP_MIC_AESCCM                 = BIT(0),
 	ATH9K_HW_CAP_MIC_CKIP                   = BIT(1),
@@ -201,8 +211,6 @@ struct ath9k_ops_config {
 	u32 cck_trig_high;
 	u32 cck_trig_low;
 	u32 enable_ani;
-	enum ath9k_ant_setting diversity_control;
-	u16 antenna_switch_swap;
 	int serialize_regmode;
 	bool intr_mitigation;
 #define SPUR_DISABLE        	0
@@ -218,6 +226,7 @@ struct ath9k_ops_config {
 #define AR_SPUR_FEEQ_BOUND_HT20 10
 	int spurmode;
 	u16 spurchans[AR_EEPROM_MODAL_SPURS][2];
+	u8 max_txtrig_level;
 };
 
 enum ath9k_int {
@@ -407,7 +416,7 @@ struct ath9k_hw_version {
  * Using de Bruijin sequence to to look up 1's index in a 32 bit number
  * debruijn32 = 0000 0111 0111 1100 1011 0101 0011 0001
  */
-#define debruijn32 0x077CB531UL
+#define debruijn32 0x077CB531U
 
 struct ath_gen_timer_configuration {
 	u32 next_addr;
@@ -433,7 +442,8 @@ struct ath_gen_timer_table {
 };
 
 struct ath_hw {
-	struct ath_softc *ah_sc;
+	struct ieee80211_hw *hw;
+	struct ath_common common;
 	struct ath9k_hw_version hw_version;
 	struct ath9k_ops_config config;
 	struct ath9k_hw_capabilities caps;
@@ -450,7 +460,6 @@ struct ath_hw {
 
 	bool sw_mgmt_crypto;
 	bool is_pciexpress;
-	u8 macaddr[ETH_ALEN];
 	u16 tx_trig_level;
 	u16 rfsilent;
 	u32 rfkill_gpio;
@@ -523,7 +532,14 @@ struct ath_hw {
 		DONT_USE_32KHZ,
 	} enable_32kHz_clock;
 
-	/* RF */
+	/* Callback for radio frequency change */
+	int (*ath9k_hw_rf_set_freq)(struct ath_hw *ah, struct ath9k_channel *chan);
+
+	/* Callback for baseband spur frequency */
+	void (*ath9k_hw_spur_mitigate_freq)(struct ath_hw *ah,
+					    struct ath9k_channel *chan);
+
+	/* Used to program the radio on non single-chip devices */
 	u32 *analogBank0Data;
 	u32 *analogBank1Data;
 	u32 *analogBank2Data;
@@ -540,7 +556,6 @@ struct ath_hw {
 	u32 acktimeout;
 	u32 ctstimeout;
 	u32 globaltxtimeout;
-	u8 gbeacon_rate;
 
 	/* ANI */
 	u32 proc_phyerr;
@@ -553,8 +568,10 @@ struct ath_hw {
 	int firpwr[5];
 	enum ath9k_ani_cmd ani_function;
 
+	/* Bluetooth coexistance */
+	struct ath_btcoex_hw btcoex_hw;
+
 	u32 intr_txqs;
-	enum ath9k_ht_extprotspacing extprotspacing;
 	u8 txchainmask;
 	u8 rxchainmask;
 
@@ -578,20 +595,32 @@ struct ath_hw {
 	struct ar5416IniArray iniModesAdditional;
 	struct ar5416IniArray iniModesRxGain;
 	struct ar5416IniArray iniModesTxGain;
+	struct ar5416IniArray iniModes_9271_1_0_only;
+	struct ar5416IniArray iniCckfirNormal;
+	struct ar5416IniArray iniCckfirJapan2484;
 
 	u32 intr_gen_timer_trigger;
 	u32 intr_gen_timer_thresh;
 	struct ath_gen_timer_table hw_gen_timers;
 };
 
+static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah)
+{
+	return &ah->common;
+}
+
+static inline struct ath_regulatory *ath9k_hw_regulatory(struct ath_hw *ah)
+{
+	return &(ath9k_hw_common(ah)->regulatory);
+}
+
 /* Initialization, Detach, Reset */
 const char *ath9k_hw_probe(u16 vendorid, u16 devid);
 void ath9k_hw_detach(struct ath_hw *ah);
 int ath9k_hw_init(struct ath_hw *ah);
-void ath9k_hw_rf_free(struct ath_hw *ah);
 int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 		   bool bChannelChange);
-void ath9k_hw_fill_cap_info(struct ath_hw *ah);
+int ath9k_hw_fill_cap_info(struct ath_hw *ah);
 bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
 			    u32 capability, u32 *result);
 bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
@@ -613,18 +642,13 @@ void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
 void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val);
 u32 ath9k_hw_getdefantenna(struct ath_hw *ah);
 void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna);
-bool ath9k_hw_setantennaswitch(struct ath_hw *ah,
-			       enum ath9k_ant_setting settings,
-			       struct ath9k_channel *chan,
-			       u8 *tx_chainmask, u8 *rx_chainmask,
-			       u8 *antenna_cfgd);
 
 /* General Operation */
 bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout);
 u32 ath9k_hw_reverse_bits(u32 val, u32 n);
 bool ath9k_get_channel_edges(struct ath_hw *ah, u16 flags, u16 *low, u16 *high);
 u16 ath9k_hw_computetxtime(struct ath_hw *ah,
-			   const struct ath_rate_table *rates,
+			   u8 phy, int kbps,
 			   u32 frameLen, u16 rateix, bool shortPreamble);
 void ath9k_hw_get_channel_centers(struct ath_hw *ah,
 				  struct ath9k_channel *chan,
@@ -637,19 +661,21 @@ void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit);
 void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac);
 void ath9k_hw_setopmode(struct ath_hw *ah);
 void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1);
-void ath9k_hw_setbssidmask(struct ath_softc *sc);
-void ath9k_hw_write_associd(struct ath_softc *sc);
+void ath9k_hw_setbssidmask(struct ath_hw *ah);
+void ath9k_hw_write_associd(struct ath_hw *ah);
 u64 ath9k_hw_gettsf64(struct ath_hw *ah);
 void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
 void ath9k_hw_reset_tsf(struct ath_hw *ah);
 void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting);
+u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp);
 bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us);
-void ath9k_hw_set11nmac2040(struct ath_hw *ah, enum ath9k_ht_macmode mode);
+void ath9k_hw_set11nmac2040(struct ath_hw *ah);
 void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
 void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
 				    const struct ath9k_beacon_state *bs);
-bool ath9k_hw_setpower(struct ath_hw *ah,
-		       enum ath9k_power_mode mode);
+
+bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode);
+
 void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off);
 
 /* Interrupt Handling */
@@ -663,16 +689,20 @@ struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
 					  void (*overflow)(void *),
 					  void *arg,
 					  u8 timer_index);
-void ath_gen_timer_start(struct ath_hw *ah, struct ath_gen_timer *timer,
-			 u32 timer_next, u32 timer_period);
-void ath_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer);
+void ath9k_hw_gen_timer_start(struct ath_hw *ah,
+			      struct ath_gen_timer *timer,
+			      u32 timer_next,
+			      u32 timer_period);
+void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer);
+
 void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer);
 void ath_gen_timer_isr(struct ath_hw *hw);
 u32 ath9k_hw_gettsf32(struct ath_hw *ah);
 
+void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len);
+
 #define ATH_PCIE_CAP_LINK_CTRL	0x70
 #define ATH_PCIE_CAP_LINK_L0S	1
 #define ATH_PCIE_CAP_LINK_L1	2
 
-void ath_pcie_aspm_disable(struct ath_softc *sc);
 #endif
diff --git a/drivers/net/wireless/ath/ath9k/initvals.h b/drivers/net/wireless/ath/ath9k/initvals.h
index 8622265a030a..8a3bf3ab998d 100644
--- a/drivers/net/wireless/ath/ath9k/initvals.h
+++ b/drivers/net/wireless/ath/ath9k/initvals.h
@@ -21,6 +21,8 @@ static const u32 ar5416Modes[][6] = {
     { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
     { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
     { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
+    { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 },
+    { 0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a },
     { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
     { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
     { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
@@ -31,11 +33,11 @@ static const u32 ar5416Modes[][6] = {
     { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
     { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
     { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
-    { 0x00009850, 0x6c48b4e0, 0x6c48b4e0, 0x6c48b0de, 0x6c48b0de, 0x6c48b0de },
+    { 0x00009850, 0x6c48b4e0, 0x6d48b4e0, 0x6d48b0de, 0x6c48b0de, 0x6c48b0de },
     { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
-    { 0x0000985c, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e },
+    { 0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e },
     { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18 },
-    { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
+    { 0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
     { 0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 },
     { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
     { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
@@ -46,10 +48,10 @@ static const u32 ar5416Modes[][6] = {
     { 0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
     { 0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
     { 0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120 },
-    { 0x0000c9bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00 },
+    { 0x000099bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00 },
     { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
     { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
-    { 0x000099c8, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c },
+    { 0x000099c8, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c },
     { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
     { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
     { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
@@ -199,7 +201,6 @@ static const u32 ar5416Common[][2] = {
     { 0x00008110, 0x00000168 },
     { 0x00008118, 0x000100aa },
     { 0x0000811c, 0x00003210 },
-    { 0x00008120, 0x08f04800 },
     { 0x00008124, 0x00000000 },
     { 0x00008128, 0x00000000 },
     { 0x0000812c, 0x00000000 },
@@ -215,7 +216,6 @@ static const u32 ar5416Common[][2] = {
     { 0x00008178, 0x00000100 },
     { 0x0000817c, 0x00000000 },
     { 0x000081c4, 0x00000000 },
-    { 0x000081d0, 0x00003210 },
     { 0x000081ec, 0x00000000 },
     { 0x000081f0, 0x00000000 },
     { 0x000081f4, 0x00000000 },
@@ -246,6 +246,7 @@ static const u32 ar5416Common[][2] = {
     { 0x00008258, 0x00000000 },
     { 0x0000825c, 0x400000ff },
     { 0x00008260, 0x00080922 },
+    { 0x00008264, 0xa8000010 },
     { 0x00008270, 0x00000000 },
     { 0x00008274, 0x40000000 },
     { 0x00008278, 0x003e4180 },
@@ -406,9 +407,9 @@ static const u32 ar5416Common[][2] = {
     { 0x0000a25c, 0x0f0f0f01 },
     { 0x0000a260, 0xdfa91f01 },
     { 0x0000a268, 0x00000000 },
-    { 0x0000a26c, 0x0ebae9c6 },
-    { 0x0000b26c, 0x0ebae9c6 },
-    { 0x0000c26c, 0x0ebae9c6 },
+    { 0x0000a26c, 0x0e79e5c6 },
+    { 0x0000b26c, 0x0e79e5c6 },
+    { 0x0000c26c, 0x0e79e5c6 },
     { 0x0000d270, 0x00820820 },
     { 0x0000a278, 0x1ce739ce },
     { 0x0000a27c, 0x051701ce },
@@ -2551,26 +2552,27 @@ static const u32 ar9280Modes_9280_2[][6] = {
     { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 },
     { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
     { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
-    { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+    { 0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e },
     { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
     { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
     { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
     { 0x00009840, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e, 0x206a012e },
     { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
-    { 0x00009850, 0x6c4000e2, 0x6c4000e2, 0x6d4000e2, 0x6c4000e2, 0x6c4000e2 },
+    { 0x00009850, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2, 0x6c4000e2 },
     { 0x00009858, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e },
-    { 0x0000985c, 0x31395d5e, 0x31395d5e, 0x3139605e, 0x31395d5e, 0x31395d5e },
+    { 0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e },
     { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
     { 0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
     { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 },
     { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 },
     { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
-    { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
+    { 0x00009918, 0x0000000a, 0x00000014, 0x00000268, 0x0000000b, 0x00000016 },
     { 0x00009924, 0xd00a8a0b, 0xd00a8a0b, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
     { 0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1010, 0xffbc1010, 0xffbc1010 },
     { 0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 },
     { 0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 },
     { 0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210, 0x00000210 },
+    { 0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce },
     { 0x000099b8, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c },
     { 0x000099bc, 0x00000a00, 0x00000a00, 0x00000c00, 0x00000c00, 0x00000c00 },
     { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 },
@@ -2585,8 +2587,10 @@ static const u32 ar9280Modes_9280_2[][6] = {
     { 0x0000b20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019 },
     { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
     { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
+    { 0x0000a23c, 0x13c88000, 0x13c88000, 0x13c88001, 0x13c88000, 0x13c88000 },
     { 0x0000a250, 0x001ff000, 0x001ff000, 0x0004a000, 0x0004a000, 0x0004a000 },
     { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
+    { 0x0000a388, 0x0c000000, 0x0c000000, 0x08000000, 0x0c000000, 0x0c000000 },
     { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
     { 0x00007894, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000 },
 };
@@ -2813,7 +2817,6 @@ static const u32 ar9280Common_9280_2[][2] = {
     { 0x00009958, 0x2108ecff },
     { 0x00009940, 0x14750604 },
     { 0x0000c95c, 0x004b6a8e },
-    { 0x0000c968, 0x000003ce },
     { 0x00009970, 0x190fb515 },
     { 0x00009974, 0x00000000 },
     { 0x00009978, 0x00000001 },
@@ -2849,7 +2852,6 @@ static const u32 ar9280Common_9280_2[][2] = {
     { 0x0000a22c, 0x233f7180 },
     { 0x0000a234, 0x20202020 },
     { 0x0000a238, 0x20202020 },
-    { 0x0000a23c, 0x13c88000 },
     { 0x0000a240, 0x38490a20 },
     { 0x0000a244, 0x00007bb6 },
     { 0x0000a248, 0x0fff3ffc },
@@ -2859,8 +2861,8 @@ static const u32 ar9280Common_9280_2[][2] = {
     { 0x0000a25c, 0x0f0f0f01 },
     { 0x0000a260, 0xdfa91f01 },
     { 0x0000a268, 0x00000000 },
-    { 0x0000a26c, 0x0ebae9c6 },
-    { 0x0000b26c, 0x0ebae9c6 },
+    { 0x0000a26c, 0x0e79e5c6 },
+    { 0x0000b26c, 0x0e79e5c6 },
     { 0x0000d270, 0x00820820 },
     { 0x0000a278, 0x1ce739ce },
     { 0x0000d35c, 0x07ffffef },
@@ -2874,7 +2876,6 @@ static const u32 ar9280Common_9280_2[][2] = {
     { 0x0000d37c, 0x7fffffe2 },
     { 0x0000d380, 0x7f3c7bba },
     { 0x0000d384, 0xf3307ff0 },
-    { 0x0000a388, 0x0c000000 },
     { 0x0000a38c, 0x20202020 },
     { 0x0000a390, 0x20202020 },
     { 0x0000a394, 0x1ce739ce },
@@ -2940,7 +2941,7 @@ static const u32 ar9280Modes_fast_clock_9280_2[][3] = {
     { 0x0000801c, 0x148ec02b, 0x148ec057 },
     { 0x00008318, 0x000044c0, 0x00008980 },
     { 0x00009820, 0x02020200, 0x02020200 },
-    { 0x00009824, 0x00000f0f, 0x00000f0f },
+    { 0x00009824, 0x01000f0f, 0x01000f0f },
     { 0x00009828, 0x0b020001, 0x0b020001 },
     { 0x00009834, 0x00000f0f, 0x00000f0f },
     { 0x00009844, 0x03721821, 0x03721821 },
@@ -3348,6 +3349,8 @@ static const u32 ar9280Modes_backoff_13db_rxgain_9280_2[][6] = {
 };
 
 static const u32 ar9280Modes_high_power_tx_gain_9280_2[][6] = {
+    { 0x0000a274, 0x0a19e652, 0x0a19e652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 },
+    { 0x0000a27c, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce },
     { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
     { 0x0000a304, 0x00003002, 0x00003002, 0x00004002, 0x00004002, 0x00004002 },
     { 0x0000a308, 0x00006004, 0x00006004, 0x00007008, 0x00007008, 0x00007008 },
@@ -3376,11 +3379,11 @@ static const u32 ar9280Modes_high_power_tx_gain_9280_2[][6] = {
     { 0x00007840, 0x00172000, 0x00172000, 0x00172000, 0x00172000, 0x00172000 },
     { 0x00007820, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480 },
     { 0x00007844, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480 },
-    { 0x0000a274, 0x0a19e652, 0x0a19e652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 },
-    { 0x0000a27c, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce },
 };
 
 static const u32 ar9280Modes_original_tx_gain_9280_2[][6] = {
+    { 0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 },
+    { 0x0000a27c, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce },
     { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
     { 0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002, 0x00003002 },
     { 0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009, 0x00008009 },
@@ -3409,8 +3412,6 @@ static const u32 ar9280Modes_original_tx_gain_9280_2[][6] = {
     { 0x00007840, 0x00392000, 0x00392000, 0x00392000, 0x00392000, 0x00392000 },
     { 0x00007820, 0x92592480, 0x92592480, 0x92592480, 0x92592480, 0x92592480 },
     { 0x00007844, 0x92592480, 0x92592480, 0x92592480, 0x92592480, 0x92592480 },
-    { 0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 },
-    { 0x0000a27c, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce },
 };
 
 static const u32 ar9280PciePhy_clkreq_off_L1_9280[][2] = {
@@ -5918,9 +5919,6 @@ static const u_int32_t ar9287Common_9287_1_1[][2] = {
     { 0x000099ec, 0x0cc80caa },
     { 0x000099f0, 0x00000000 },
     { 0x000099fc, 0x00001042 },
-    { 0x0000a1f4, 0x00fffeff },
-    { 0x0000a1f8, 0x00f5f9ff },
-    { 0x0000a1fc, 0xb79f6427 },
     { 0x0000a208, 0x803e4788 },
     { 0x0000a210, 0x4080a333 },
     { 0x0000a214, 0x40206c10 },
@@ -5980,7 +5978,7 @@ static const u_int32_t ar9287Common_9287_1_1[][2] = {
     { 0x0000b3f4, 0x00000000 },
     { 0x0000a7d8, 0x000003f1 },
     { 0x00007800, 0x00000800 },
-    { 0x00007804, 0x6c35ffc2 },
+    { 0x00007804, 0x6c35ffd2 },
     { 0x00007808, 0x6db6c000 },
     { 0x0000780c, 0x6db6cb30 },
     { 0x00007810, 0x6db6cb6c },
@@ -6000,7 +5998,7 @@ static const u_int32_t ar9287Common_9287_1_1[][2] = {
     { 0x00007848, 0x934934a8 },
     { 0x00007850, 0x00000000 },
     { 0x00007854, 0x00000800 },
-    { 0x00007858, 0x6c35ffc2 },
+    { 0x00007858, 0x6c35ffd2 },
     { 0x0000785c, 0x6db6c000 },
     { 0x00007860, 0x6db6cb30 },
     { 0x00007864, 0x6db6cb6c },
@@ -6027,6 +6025,22 @@ static const u_int32_t ar9287Common_9287_1_1[][2] = {
     { 0x000078b8, 0x2a850160 },
 };
 
+/*
+ * For Japanese regulatory requirements, 2484 MHz requires the following three
+ * registers be programmed differently from the channel between 2412 and 2472 MHz.
+ */
+static const u_int32_t ar9287Common_normal_cck_fir_coeff_92871_1[][2] = {
+    { 0x0000a1f4, 0x00fffeff },
+    { 0x0000a1f8, 0x00f5f9ff },
+    { 0x0000a1fc, 0xb79f6427 },
+};
+
+static const u_int32_t ar9287Common_japan_2484_cck_fir_coeff_92871_1[][2] = {
+    { 0x0000a1f4, 0x00000000 },
+    { 0x0000a1f8, 0xefff0301 },
+    { 0x0000a1fc, 0xca9228ee },
+};
+
 static const u_int32_t ar9287Modes_tx_gain_9287_1_1[][6] = {
     /* Address      5G-HT20     5G-HT40     2G-HT40     2G-HT20     Turbo   */
     { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
@@ -6365,8 +6379,8 @@ static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = {
 };
 
 
-/* AR9271 initialization values automaticaly created: 03/23/09 */
-static const u_int32_t ar9271Modes_9271_1_0[][6] = {
+/* AR9271 initialization values automaticaly created: 06/04/09 */
+static const u_int32_t ar9271Modes_9271[][6] = {
     { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
     { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
     { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
@@ -6376,8 +6390,8 @@ static const u_int32_t ar9271Modes_9271_1_0[][6] = {
     { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 },
     { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
     { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
-    { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
-    { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
+    { 0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e },
+    { 0x00009828, 0x3a020001, 0x3a020001, 0x3a020001, 0x3a020001, 0x3a020001 },
     { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
     { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
     { 0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e },
@@ -6391,6 +6405,7 @@ static const u_int32_t ar9271Modes_9271_1_0[][6] = {
     { 0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
     { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 },
     { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 },
+    { 0x00009910, 0x30002310, 0x30002310, 0x30002310, 0x30002310, 0x30002310 },
     { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
     { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
     { 0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d },
@@ -6401,7 +6416,7 @@ static const u_int32_t ar9271Modes_9271_1_0[][6] = {
     { 0x000099bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 },
     { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 },
     { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
-    { 0x000099c8, 0x6af65329, 0x6af65329, 0x6af65329, 0x6af65329, 0x6af65329 },
+    { 0x000099c8, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f },
     { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
     { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
     { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
@@ -6690,7 +6705,7 @@ static const u_int32_t ar9271Modes_9271_1_0[][6] = {
     { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
 };
 
-static const u_int32_t ar9271Common_9271_1_0[][2] = {
+static const u_int32_t ar9271Common_9271[][2] = {
     { 0x0000000c, 0x00000000 },
     { 0x00000030, 0x00020045 },
     { 0x00000034, 0x00000005 },
@@ -6786,7 +6801,7 @@ static const u_int32_t ar9271Common_9271_1_0[][2] = {
     { 0x0000803c, 0x00000000 },
     { 0x00008048, 0x00000000 },
     { 0x00008054, 0x00000000 },
-    { 0x00008058, 0x02000000 },
+    { 0x00008058, 0x00000000 },
     { 0x0000805c, 0x000fc78f },
     { 0x00008060, 0x0000000f },
     { 0x00008064, 0x00000000 },
@@ -6817,7 +6832,7 @@ static const u_int32_t ar9271Common_9271_1_0[][2] = {
     { 0x00008110, 0x00000168 },
     { 0x00008118, 0x000100aa },
     { 0x0000811c, 0x00003210 },
-    { 0x00008120, 0x08f04814 },
+    { 0x00008120, 0x08f04810 },
     { 0x00008124, 0x00000000 },
     { 0x00008128, 0x00000000 },
     { 0x0000812c, 0x00000000 },
@@ -6864,7 +6879,7 @@ static const u_int32_t ar9271Common_9271_1_0[][2] = {
     { 0x00008258, 0x00000000 },
     { 0x0000825c, 0x400000ff },
     { 0x00008260, 0x00080922 },
-    { 0x00008264, 0xa8a00010 },
+    { 0x00008264, 0x88a00010 },
     { 0x00008270, 0x00000000 },
     { 0x00008274, 0x40000000 },
     { 0x00008278, 0x003e4180 },
@@ -6896,7 +6911,7 @@ static const u_int32_t ar9271Common_9271_1_0[][2] = {
     { 0x00007814, 0x924934a8 },
     { 0x0000781c, 0x00000000 },
     { 0x00007820, 0x00000c04 },
-    { 0x00007824, 0x00d86bff },
+    { 0x00007824, 0x00d8abff },
     { 0x00007828, 0x66964300 },
     { 0x0000782c, 0x8db6d961 },
     { 0x00007830, 0x8db6d96c },
@@ -6930,7 +6945,6 @@ static const u_int32_t ar9271Common_9271_1_0[][2] = {
     { 0x00009904, 0x00000000 },
     { 0x00009908, 0x00000000 },
     { 0x0000990c, 0x00000000 },
-    { 0x00009910, 0x30002310 },
     { 0x0000991c, 0x10000fff },
     { 0x00009920, 0x04900000 },
     { 0x00009928, 0x00000001 },
@@ -6944,7 +6958,7 @@ static const u_int32_t ar9271Common_9271_1_0[][2] = {
     { 0x00009954, 0x5f3ca3de },
     { 0x00009958, 0x0108ecff },
     { 0x00009968, 0x000003ce },
-    { 0x00009970, 0x192bb515 },
+    { 0x00009970, 0x192bb514 },
     { 0x00009974, 0x00000000 },
     { 0x00009978, 0x00000001 },
     { 0x0000997c, 0x00000000 },
@@ -7031,3 +7045,8 @@ static const u_int32_t ar9271Common_9271_1_0[][2] = {
     { 0x0000d380, 0x7f3c7bba },
     { 0x0000d384, 0xf3307ff0 },
 };
+
+static const u_int32_t ar9271Modes_9271_1_0_only[][6] = {
+    { 0x00009910, 0x30002311, 0x30002311, 0x30002311, 0x30002311, 0x30002311 },
+    { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
+};
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 800bfab94635..71b84d91dcff 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -14,16 +14,16 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-#include "ath9k.h"
+#include "hw.h"
 
 static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
 					struct ath9k_tx_queue_info *qi)
 {
-	DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
-		"tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
-		ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
-		ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
-		ah->txurn_interrupt_mask);
+	ath_print(ath9k_hw_common(ah), ATH_DBG_INTERRUPT,
+		  "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
+		  ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
+		  ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
+		  ah->txurn_interrupt_mask);
 
 	REG_WRITE(ah, AR_IMR_S0,
 		  SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
@@ -39,17 +39,21 @@ u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
 {
 	return REG_READ(ah, AR_QTXDP(q));
 }
+EXPORT_SYMBOL(ath9k_hw_gettxbuf);
 
 void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
 {
 	REG_WRITE(ah, AR_QTXDP(q), txdp);
 }
+EXPORT_SYMBOL(ath9k_hw_puttxbuf);
 
 void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
 {
-	DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Enable TXE on queue: %u\n", q);
+	ath_print(ath9k_hw_common(ah), ATH_DBG_QUEUE,
+		  "Enable TXE on queue: %u\n", q);
 	REG_WRITE(ah, AR_Q_TXE, 1 << q);
 }
+EXPORT_SYMBOL(ath9k_hw_txstart);
 
 u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
 {
@@ -64,13 +68,39 @@ u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
 
 	return npend;
 }
+EXPORT_SYMBOL(ath9k_hw_numtxpending);
 
+/**
+ * ath9k_hw_updatetxtriglevel - adjusts the frame trigger level
+ *
+ * @ah: atheros hardware struct
+ * @bIncTrigLevel: whether or not the frame trigger level should be updated
+ *
+ * The frame trigger level specifies the minimum number of bytes,
+ * in units of 64 bytes, that must be DMA'ed into the PCU TX FIFO
+ * before the PCU will initiate sending the frame on the air. This can
+ * mean we initiate transmit before a full frame is on the PCU TX FIFO.
+ * Resets to 0x1 (meaning 64 bytes or a full frame, whichever occurs
+ * first)
+ *
+ * Caution must be taken to ensure to set the frame trigger level based
+ * on the DMA request size. For example if the DMA request size is set to
+ * 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because
+ * there need to be enough space in the tx FIFO for the requested transfer
+ * size. Hence the tx FIFO will stop with 512 - 128 = 384 bytes. If we set
+ * the threshold to a value beyond 6, then the transmit will hang.
+ *
+ * Current dual   stream devices have a PCU TX FIFO size of 8 KB.
+ * Current single stream devices have a PCU TX FIFO size of 4 KB, however,
+ * there is a hardware issue which forces us to use 2 KB instead so the
+ * frame trigger level must not exceed 2 KB for these chipsets.
+ */
 bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
 {
 	u32 txcfg, curLevel, newLevel;
 	enum ath9k_int omask;
 
-	if (ah->tx_trig_level >= MAX_TX_FIFO_THRESHOLD)
+	if (ah->tx_trig_level >= ah->config.max_txtrig_level)
 		return false;
 
 	omask = ath9k_hw_set_interrupts(ah, ah->mask_reg & ~ATH9K_INT_GLOBAL);
@@ -79,7 +109,7 @@ bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
 	curLevel = MS(txcfg, AR_FTRIG);
 	newLevel = curLevel;
 	if (bIncTrigLevel) {
-		if (curLevel < MAX_TX_FIFO_THRESHOLD)
+		if (curLevel < ah->config.max_txtrig_level)
 			newLevel++;
 	} else if (curLevel > MIN_TX_FIFO_THRESHOLD)
 		newLevel--;
@@ -93,27 +123,28 @@ bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
 
 	return newLevel != curLevel;
 }
+EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel);
 
 bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
 {
 #define ATH9K_TX_STOP_DMA_TIMEOUT	4000    /* usec */
 #define ATH9K_TIME_QUANTUM		100     /* usec */
-
+	struct ath_common *common = ath9k_hw_common(ah);
 	struct ath9k_hw_capabilities *pCap = &ah->caps;
 	struct ath9k_tx_queue_info *qi;
 	u32 tsfLow, j, wait;
 	u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
 
 	if (q >= pCap->total_queues) {
-		DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Stopping TX DMA, "
-			"invalid queue: %u\n", q);
+		ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, "
+			  "invalid queue: %u\n", q);
 		return false;
 	}
 
 	qi = &ah->txq[q];
 	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
-		DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Stopping TX DMA, "
-			"inactive queue: %u\n", q);
+		ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, "
+			  "inactive queue: %u\n", q);
 		return false;
 	}
 
@@ -126,9 +157,9 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
 	}
 
 	if (ath9k_hw_numtxpending(ah, q)) {
-		DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
-			"%s: Num of pending TX Frames %d on Q %d\n",
-			__func__, ath9k_hw_numtxpending(ah, q), q);
+		ath_print(common, ATH_DBG_QUEUE,
+			  "%s: Num of pending TX Frames %d on Q %d\n",
+			  __func__, ath9k_hw_numtxpending(ah, q), q);
 
 		for (j = 0; j < 2; j++) {
 			tsfLow = REG_READ(ah, AR_TSF_L32);
@@ -142,9 +173,9 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
 			if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10))
 				break;
 
-			DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
-				"TSF has moved while trying to set "
-				"quiet time TSF: 0x%08x\n", tsfLow);
+			ath_print(common, ATH_DBG_QUEUE,
+				  "TSF has moved while trying to set "
+				  "quiet time TSF: 0x%08x\n", tsfLow);
 		}
 
 		REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
@@ -155,9 +186,9 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
 		wait = wait_time;
 		while (ath9k_hw_numtxpending(ah, q)) {
 			if ((--wait) == 0) {
-				DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
-					"Failed to stop TX DMA in 100 "
-					"msec after killing last frame\n");
+				ath_print(common, ATH_DBG_QUEUE,
+					  "Failed to stop TX DMA in 100 "
+					  "msec after killing last frame\n");
 				break;
 			}
 			udelay(ATH9K_TIME_QUANTUM);
@@ -172,6 +203,7 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
 #undef ATH9K_TX_STOP_DMA_TIMEOUT
 #undef ATH9K_TIME_QUANTUM
 }
+EXPORT_SYMBOL(ath9k_hw_stoptxdma);
 
 void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds,
 			 u32 segLen, bool firstSeg,
@@ -198,6 +230,7 @@ void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds,
 	ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
 	ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
 }
+EXPORT_SYMBOL(ath9k_hw_filltxdesc);
 
 void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds)
 {
@@ -209,6 +242,7 @@ void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds)
 	ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
 	ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
 }
+EXPORT_SYMBOL(ath9k_hw_cleartxdesc);
 
 int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds)
 {
@@ -222,6 +256,8 @@ int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds)
 	ds->ds_txstat.ts_status = 0;
 	ds->ds_txstat.ts_flags = 0;
 
+	if (ads->ds_txstatus1 & AR_FrmXmitOK)
+		ds->ds_txstat.ts_status |= ATH9K_TX_ACKED;
 	if (ads->ds_txstatus1 & AR_ExcessiveRetries)
 		ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY;
 	if (ads->ds_txstatus1 & AR_Filtered)
@@ -284,6 +320,7 @@ int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds)
 
 	return 0;
 }
+EXPORT_SYMBOL(ath9k_hw_txprocdesc);
 
 void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
 			    u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
@@ -319,6 +356,7 @@ void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
 		ads->ds_ctl11 = 0;
 	}
 }
+EXPORT_SYMBOL(ath9k_hw_set11n_txdesc);
 
 void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds,
 				  struct ath_desc *lastds,
@@ -374,6 +412,7 @@ void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds,
 	last_ads->ds_ctl2 = ads->ds_ctl2;
 	last_ads->ds_ctl3 = ads->ds_ctl3;
 }
+EXPORT_SYMBOL(ath9k_hw_set11n_ratescenario);
 
 void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds,
 				u32 aggrLen)
@@ -384,6 +423,7 @@ void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds,
 	ads->ds_ctl6 &= ~AR_AggrLen;
 	ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
 }
+EXPORT_SYMBOL(ath9k_hw_set11n_aggr_first);
 
 void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds,
 				 u32 numDelims)
@@ -398,6 +438,7 @@ void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds,
 	ctl6 |= SM(numDelims, AR_PadDelim);
 	ads->ds_ctl6 = ctl6;
 }
+EXPORT_SYMBOL(ath9k_hw_set11n_aggr_middle);
 
 void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds)
 {
@@ -407,6 +448,7 @@ void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds)
 	ads->ds_ctl1 &= ~AR_MoreAggr;
 	ads->ds_ctl6 &= ~AR_PadDelim;
 }
+EXPORT_SYMBOL(ath9k_hw_set11n_aggr_last);
 
 void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds)
 {
@@ -414,6 +456,7 @@ void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds)
 
 	ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
 }
+EXPORT_SYMBOL(ath9k_hw_clr11n_aggr);
 
 void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds,
 				   u32 burstDuration)
@@ -423,6 +466,7 @@ void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds,
 	ads->ds_ctl2 &= ~AR_BurstDur;
 	ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
 }
+EXPORT_SYMBOL(ath9k_hw_set11n_burstduration);
 
 void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, struct ath_desc *ds,
 				     u32 vmf)
@@ -440,28 +484,30 @@ void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
 	*txqs &= ah->intr_txqs;
 	ah->intr_txqs &= ~(*txqs);
 }
+EXPORT_SYMBOL(ath9k_hw_gettxintrtxqs);
 
 bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
 			    const struct ath9k_tx_queue_info *qinfo)
 {
 	u32 cw;
+	struct ath_common *common = ath9k_hw_common(ah);
 	struct ath9k_hw_capabilities *pCap = &ah->caps;
 	struct ath9k_tx_queue_info *qi;
 
 	if (q >= pCap->total_queues) {
-		DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Set TXQ properties, "
-			"invalid queue: %u\n", q);
+		ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, "
+			  "invalid queue: %u\n", q);
 		return false;
 	}
 
 	qi = &ah->txq[q];
 	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
-		DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Set TXQ properties, "
-			"inactive queue: %u\n", q);
+		ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, "
+			  "inactive queue: %u\n", q);
 		return false;
 	}
 
-	DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q);
+	ath_print(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q);
 
 	qi->tqi_ver = qinfo->tqi_ver;
 	qi->tqi_subtype = qinfo->tqi_subtype;
@@ -510,23 +556,25 @@ bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
 
 	return true;
 }
+EXPORT_SYMBOL(ath9k_hw_set_txq_props);
 
 bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
 			    struct ath9k_tx_queue_info *qinfo)
 {
+	struct ath_common *common = ath9k_hw_common(ah);
 	struct ath9k_hw_capabilities *pCap = &ah->caps;
 	struct ath9k_tx_queue_info *qi;
 
 	if (q >= pCap->total_queues) {
-		DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Get TXQ properties, "
-			"invalid queue: %u\n", q);
+		ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, "
+			  "invalid queue: %u\n", q);
 		return false;
 	}
 
 	qi = &ah->txq[q];
 	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
-		DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Get TXQ properties, "
-			"inactive queue: %u\n", q);
+		ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, "
+			  "inactive queue: %u\n", q);
 		return false;
 	}
 
@@ -547,10 +595,12 @@ bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
 
 	return true;
 }
+EXPORT_SYMBOL(ath9k_hw_get_txq_props);
 
 int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
 			  const struct ath9k_tx_queue_info *qinfo)
 {
+	struct ath_common *common = ath9k_hw_common(ah);
 	struct ath9k_tx_queue_info *qi;
 	struct ath9k_hw_capabilities *pCap = &ah->caps;
 	int q;
@@ -574,23 +624,23 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
 			    ATH9K_TX_QUEUE_INACTIVE)
 				break;
 		if (q == pCap->total_queues) {
-			DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-				"No available TX queue\n");
+			ath_print(common, ATH_DBG_FATAL,
+				  "No available TX queue\n");
 			return -1;
 		}
 		break;
 	default:
-		DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Invalid TX queue type: %u\n",
-			type);
+		ath_print(common, ATH_DBG_FATAL,
+			  "Invalid TX queue type: %u\n", type);
 		return -1;
 	}
 
-	DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q);
+	ath_print(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q);
 
 	qi = &ah->txq[q];
 	if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
-		DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-			"TX queue: %u already active\n", q);
+		ath_print(common, ATH_DBG_FATAL,
+			  "TX queue: %u already active\n", q);
 		return -1;
 	}
 	memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
@@ -613,25 +663,27 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
 
 	return q;
 }
+EXPORT_SYMBOL(ath9k_hw_setuptxqueue);
 
 bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
 {
 	struct ath9k_hw_capabilities *pCap = &ah->caps;
+	struct ath_common *common = ath9k_hw_common(ah);
 	struct ath9k_tx_queue_info *qi;
 
 	if (q >= pCap->total_queues) {
-		DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Release TXQ, "
-			"invalid queue: %u\n", q);
+		ath_print(common, ATH_DBG_QUEUE, "Release TXQ, "
+			  "invalid queue: %u\n", q);
 		return false;
 	}
 	qi = &ah->txq[q];
 	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
-		DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Release TXQ, "
-			"inactive queue: %u\n", q);
+		ath_print(common, ATH_DBG_QUEUE, "Release TXQ, "
+			  "inactive queue: %u\n", q);
 		return false;
 	}
 
-	DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Release TX queue: %u\n", q);
+	ath_print(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q);
 
 	qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
 	ah->txok_interrupt_mask &= ~(1 << q);
@@ -643,28 +695,30 @@ bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
 
 	return true;
 }
+EXPORT_SYMBOL(ath9k_hw_releasetxqueue);
 
 bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
 {
 	struct ath9k_hw_capabilities *pCap = &ah->caps;
+	struct ath_common *common = ath9k_hw_common(ah);
 	struct ath9k_channel *chan = ah->curchan;
 	struct ath9k_tx_queue_info *qi;
 	u32 cwMin, chanCwMin, value;
 
 	if (q >= pCap->total_queues) {
-		DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Reset TXQ, "
-			"invalid queue: %u\n", q);
+		ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, "
+			  "invalid queue: %u\n", q);
 		return false;
 	}
 
 	qi = &ah->txq[q];
 	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
-		DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Reset TXQ, "
-			"inactive queue: %u\n", q);
+		ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, "
+			  "inactive queue: %u\n", q);
 		return true;
 	}
 
-	DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q);
+	ath_print(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q);
 
 	if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
 		if (chan && IS_CHAN_B(chan))
@@ -799,6 +853,7 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
 
 	return true;
 }
+EXPORT_SYMBOL(ath9k_hw_resettxqueue);
 
 int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
 			u32 pa, struct ath_desc *nds, u64 tsf)
@@ -880,6 +935,7 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
 
 	return 0;
 }
+EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
 
 void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
 			  u32 size, u32 flags)
@@ -895,7 +951,15 @@ void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
 	if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
 		memset(&(ads->u), 0, sizeof(ads->u));
 }
+EXPORT_SYMBOL(ath9k_hw_setuprxdesc);
 
+/*
+ * This can stop or re-enables RX.
+ *
+ * If bool is set this will kill any frame which is currently being
+ * transferred between the MAC and baseband and also prevent any new
+ * frames from getting started.
+ */
 bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
 {
 	u32 reg;
@@ -911,8 +975,9 @@ bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
 				     AR_DIAG_RX_ABORT));
 
 			reg = REG_READ(ah, AR_OBS_BUS_1);
-			DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-				"RX failed to go idle in 10 ms RXSM=0x%x\n", reg);
+			ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+				  "RX failed to go idle in 10 ms RXSM=0x%x\n",
+				  reg);
 
 			return false;
 		}
@@ -923,16 +988,19 @@ bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
 
 	return true;
 }
+EXPORT_SYMBOL(ath9k_hw_setrxabort);
 
 void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
 {
 	REG_WRITE(ah, AR_RXDP, rxdp);
 }
+EXPORT_SYMBOL(ath9k_hw_putrxbuf);
 
 void ath9k_hw_rxena(struct ath_hw *ah)
 {
 	REG_WRITE(ah, AR_CR, AR_CR_RXE);
 }
+EXPORT_SYMBOL(ath9k_hw_rxena);
 
 void ath9k_hw_startpcureceive(struct ath_hw *ah)
 {
@@ -942,6 +1010,7 @@ void ath9k_hw_startpcureceive(struct ath_hw *ah)
 
 	REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
 }
+EXPORT_SYMBOL(ath9k_hw_startpcureceive);
 
 void ath9k_hw_stoppcurecv(struct ath_hw *ah)
 {
@@ -949,12 +1018,13 @@ void ath9k_hw_stoppcurecv(struct ath_hw *ah)
 
 	ath9k_hw_disable_mib_counters(ah);
 }
+EXPORT_SYMBOL(ath9k_hw_stoppcurecv);
 
 bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
 {
 #define AH_RX_STOP_DMA_TIMEOUT 10000   /* usec */
 #define AH_RX_TIME_QUANTUM     100     /* usec */
-
+	struct ath_common *common = ath9k_hw_common(ah);
 	int i;
 
 	REG_WRITE(ah, AR_CR, AR_CR_RXD);
@@ -967,12 +1037,12 @@ bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
 	}
 
 	if (i == 0) {
-		DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-			"DMA failed to stop in %d ms "
-			"AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
-			AH_RX_STOP_DMA_TIMEOUT / 1000,
-			REG_READ(ah, AR_CR),
-			REG_READ(ah, AR_DIAG_SW));
+		ath_print(common, ATH_DBG_FATAL,
+			  "DMA failed to stop in %d ms "
+			  "AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
+			  AH_RX_STOP_DMA_TIMEOUT / 1000,
+			  REG_READ(ah, AR_CR),
+			  REG_READ(ah, AR_DIAG_SW));
 		return false;
 	} else {
 		return true;
@@ -981,3 +1051,17 @@ bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
 #undef AH_RX_TIME_QUANTUM
 #undef AH_RX_STOP_DMA_TIMEOUT
 }
+EXPORT_SYMBOL(ath9k_hw_stopdmarecv);
+
+int ath9k_hw_beaconq_setup(struct ath_hw *ah)
+{
+	struct ath9k_tx_queue_info qi;
+
+	memset(&qi, 0, sizeof(qi));
+	qi.tqi_aifs = 1;
+	qi.tqi_cwmin = 0;
+	qi.tqi_cwmax = 0;
+	/* NB: don't enable any interrupts */
+	return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
+}
+EXPORT_SYMBOL(ath9k_hw_beaconq_setup);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index f56e77da6c3e..0c87771383f0 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -76,6 +76,7 @@
 #define ATH9K_TXERR_FIFO           0x04
 #define ATH9K_TXERR_XTXOP          0x08
 #define ATH9K_TXERR_TIMER_EXPIRED  0x10
+#define ATH9K_TX_ACKED		   0x20
 
 #define ATH9K_TX_BA                0x01
 #define ATH9K_TX_PWRMGMT           0x02
@@ -85,9 +86,15 @@
 #define ATH9K_TX_SW_ABORTED        0x40
 #define ATH9K_TX_SW_FILTERED       0x80
 
+/* 64 bytes */
 #define MIN_TX_FIFO_THRESHOLD   0x1
+
+/*
+ * Single stream device AR9285 and AR9271 require 2 KB
+ * to work around a hardware issue, all other devices
+ * have can use the max 4 KB limit.
+ */
 #define MAX_TX_FIFO_THRESHOLD   ((4096 / 64) - 1)
-#define INIT_TX_FIFO_THRESHOLD  MIN_TX_FIFO_THRESHOLD
 
 struct ath_tx_status {
 	u32 ts_tstamp;
@@ -380,6 +387,11 @@ struct ar5416_desc {
 #define AR_TxBaStatus       0x40000000
 #define AR_TxStatusRsvd01   0x80000000
 
+/*
+ * AR_FrmXmitOK - Frame transmission success flag. If set, the frame was
+ * transmitted successfully. If clear, no ACK or BA was received to indicate
+ * successful transmission when we were expecting an ACK or BA.
+ */
 #define AR_FrmXmitOK            0x00000001
 #define AR_ExcessiveRetries     0x00000002
 #define AR_FIFOUnderrun         0x00000004
@@ -614,19 +626,8 @@ enum ath9k_cipher {
 	ATH9K_CIPHER_MIC = 127
 };
 
-enum ath9k_ht_macmode {
-	ATH9K_HT_MACMODE_20 = 0,
-	ATH9K_HT_MACMODE_2040 = 1,
-};
-
-enum ath9k_ht_extprotspacing {
-	ATH9K_HT_EXTPROTSPACING_20 = 0,
-	ATH9K_HT_EXTPROTSPACING_25 = 1,
-};
-
 struct ath_hw;
 struct ath9k_channel;
-struct ath_rate_table;
 
 u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q);
 void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp);
@@ -677,5 +678,6 @@ void ath9k_hw_rxena(struct ath_hw *ah);
 void ath9k_hw_startpcureceive(struct ath_hw *ah);
 void ath9k_hw_stoppcurecv(struct ath_hw *ah);
 bool ath9k_hw_stopdmarecv(struct ath_hw *ah);
+int ath9k_hw_beaconq_setup(struct ath_hw *ah);
 
 #endif /* MAC_H */
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 43d2be9867fc..c48743452515 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -16,6 +16,7 @@
 
 #include <linux/nl80211.h>
 #include "ath9k.h"
+#include "btcoex.h"
 
 static char *dev_info = "ath9k";
 
@@ -28,6 +29,10 @@ static int modparam_nohwcrypt;
 module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
 
+static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
+module_param_named(debug, ath9k_debug, uint, 0);
+MODULE_PARM_DESC(debug, "Debugging mask");
+
 /* We use the hw_value as an index into our private channel structure */
 
 #define CHAN2G(_freq, _idx)  { \
@@ -99,37 +104,55 @@ static struct ieee80211_channel ath9k_5ghz_chantable[] = {
 	CHAN5G(5825, 37), /* Channel 165 */
 };
 
+/* Atheros hardware rate code addition for short premble */
+#define SHPCHECK(__hw_rate, __flags) \
+	((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
+
+#define RATE(_bitrate, _hw_rate, _flags) {              \
+	.bitrate        = (_bitrate),                   \
+	.flags          = (_flags),                     \
+	.hw_value       = (_hw_rate),                   \
+	.hw_value_short = (SHPCHECK(_hw_rate, _flags))  \
+}
+
+static struct ieee80211_rate ath9k_legacy_rates[] = {
+	RATE(10, 0x1b, 0),
+	RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
+	RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
+	RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
+	RATE(60, 0x0b, 0),
+	RATE(90, 0x0f, 0),
+	RATE(120, 0x0a, 0),
+	RATE(180, 0x0e, 0),
+	RATE(240, 0x09, 0),
+	RATE(360, 0x0d, 0),
+	RATE(480, 0x08, 0),
+	RATE(540, 0x0c, 0),
+};
+
 static void ath_cache_conf_rate(struct ath_softc *sc,
 				struct ieee80211_conf *conf)
 {
 	switch (conf->channel->band) {
 	case IEEE80211_BAND_2GHZ:
 		if (conf_is_ht20(conf))
-			sc->cur_rate_table =
-			  sc->hw_rate_table[ATH9K_MODE_11NG_HT20];
+			sc->cur_rate_mode = ATH9K_MODE_11NG_HT20;
 		else if (conf_is_ht40_minus(conf))
-			sc->cur_rate_table =
-			  sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS];
+			sc->cur_rate_mode = ATH9K_MODE_11NG_HT40MINUS;
 		else if (conf_is_ht40_plus(conf))
-			sc->cur_rate_table =
-			  sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS];
+			sc->cur_rate_mode = ATH9K_MODE_11NG_HT40PLUS;
 		else
-			sc->cur_rate_table =
-			  sc->hw_rate_table[ATH9K_MODE_11G];
+			sc->cur_rate_mode = ATH9K_MODE_11G;
 		break;
 	case IEEE80211_BAND_5GHZ:
 		if (conf_is_ht20(conf))
-			sc->cur_rate_table =
-			  sc->hw_rate_table[ATH9K_MODE_11NA_HT20];
+			sc->cur_rate_mode = ATH9K_MODE_11NA_HT20;
 		else if (conf_is_ht40_minus(conf))
-			sc->cur_rate_table =
-			  sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS];
+			sc->cur_rate_mode = ATH9K_MODE_11NA_HT40MINUS;
 		else if (conf_is_ht40_plus(conf))
-			sc->cur_rate_table =
-			  sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS];
+			sc->cur_rate_mode = ATH9K_MODE_11NA_HT40PLUS;
 		else
-			sc->cur_rate_table =
-			  sc->hw_rate_table[ATH9K_MODE_11A];
+			sc->cur_rate_mode = ATH9K_MODE_11A;
 		break;
 	default:
 		BUG_ON(1);
@@ -185,50 +208,6 @@ static u8 parse_mpdudensity(u8 mpdudensity)
 	}
 }
 
-static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
-{
-	const struct ath_rate_table *rate_table = NULL;
-	struct ieee80211_supported_band *sband;
-	struct ieee80211_rate *rate;
-	int i, maxrates;
-
-	switch (band) {
-	case IEEE80211_BAND_2GHZ:
-		rate_table = sc->hw_rate_table[ATH9K_MODE_11G];
-		break;
-	case IEEE80211_BAND_5GHZ:
-		rate_table = sc->hw_rate_table[ATH9K_MODE_11A];
-		break;
-	default:
-		break;
-	}
-
-	if (rate_table == NULL)
-		return;
-
-	sband = &sc->sbands[band];
-	rate = sc->rates[band];
-
-	if (rate_table->rate_cnt > ATH_RATE_MAX)
-		maxrates = ATH_RATE_MAX;
-	else
-		maxrates = rate_table->rate_cnt;
-
-	for (i = 0; i < maxrates; i++) {
-		rate[i].bitrate = rate_table->info[i].ratekbps / 100;
-		rate[i].hw_value = rate_table->info[i].ratecode;
-		if (rate_table->info[i].short_preamble) {
-			rate[i].hw_value_short = rate_table->info[i].ratecode |
-				rate_table->info[i].short_preamble;
-			rate[i].flags = IEEE80211_RATE_SHORT_PREAMBLE;
-		}
-		sband->n_bitrates++;
-
-		DPRINTF(sc, ATH_DBG_CONFIG, "Rate: %2dMbps, ratecode: %2d\n",
-			rate[i].bitrate / 10, rate[i].hw_value);
-	}
-}
-
 static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc,
 						struct ieee80211_hw *hw)
 {
@@ -242,6 +221,51 @@ static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc,
 	return channel;
 }
 
+static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
+{
+	unsigned long flags;
+	bool ret;
+
+	spin_lock_irqsave(&sc->sc_pm_lock, flags);
+	ret = ath9k_hw_setpower(sc->sc_ah, mode);
+	spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+
+	return ret;
+}
+
+void ath9k_ps_wakeup(struct ath_softc *sc)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sc->sc_pm_lock, flags);
+	if (++sc->ps_usecount != 1)
+		goto unlock;
+
+	ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
+
+ unlock:
+	spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+}
+
+void ath9k_ps_restore(struct ath_softc *sc)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sc->sc_pm_lock, flags);
+	if (--sc->ps_usecount != 0)
+		goto unlock;
+
+	if (sc->ps_enabled &&
+	    !(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
+			      SC_OP_WAIT_FOR_CAB |
+			      SC_OP_WAIT_FOR_PSPOLL_DATA |
+			      SC_OP_WAIT_FOR_TX_ACK)))
+		ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
+
+ unlock:
+	spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+}
+
 /*
  * Set/change channels.  If the channel is really being changed, it's done
  * by reseting the chip.  To accomplish this we must first cleanup any pending
@@ -251,6 +275,8 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
 		    struct ath9k_channel *hchan)
 {
 	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ieee80211_conf *conf = &common->hw->conf;
 	bool fastcc = true, stopped;
 	struct ieee80211_channel *channel = hw->conf.channel;
 	int r;
@@ -280,19 +306,19 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
 	if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
 		fastcc = false;
 
-	DPRINTF(sc, ATH_DBG_CONFIG,
-		"(%u MHz) -> (%u MHz), chanwidth: %d\n",
-		sc->sc_ah->curchan->channel,
-		channel->center_freq, sc->tx_chan_width);
+	ath_print(common, ATH_DBG_CONFIG,
+		  "(%u MHz) -> (%u MHz), conf_is_ht40: %d\n",
+		  sc->sc_ah->curchan->channel,
+		  channel->center_freq, conf_is_ht40(conf));
 
 	spin_lock_bh(&sc->sc_resetlock);
 
 	r = ath9k_hw_reset(ah, hchan, fastcc);
 	if (r) {
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"Unable to reset channel (%u Mhz) "
-			"reset status %d\n",
-			channel->center_freq, r);
+		ath_print(common, ATH_DBG_FATAL,
+			  "Unable to reset channel (%u Mhz) "
+			  "reset status %d\n",
+			  channel->center_freq, r);
 		spin_unlock_bh(&sc->sc_resetlock);
 		goto ps_restore;
 	}
@@ -301,8 +327,8 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
 	sc->sc_flags &= ~SC_OP_FULL_RESET;
 
 	if (ath_startrecv(sc) != 0) {
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"Unable to restart recv logic\n");
+		ath_print(common, ATH_DBG_FATAL,
+			  "Unable to restart recv logic\n");
 		r = -EIO;
 		goto ps_restore;
 	}
@@ -327,6 +353,7 @@ static void ath_ani_calibrate(unsigned long data)
 {
 	struct ath_softc *sc = (struct ath_softc *)data;
 	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
 	bool longcal = false;
 	bool shortcal = false;
 	bool aniflag = false;
@@ -351,33 +378,34 @@ static void ath_ani_calibrate(unsigned long data)
 	ath9k_ps_wakeup(sc);
 
 	/* Long calibration runs independently of short calibration. */
-	if ((timestamp - sc->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) {
+	if ((timestamp - common->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) {
 		longcal = true;
-		DPRINTF(sc, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
-		sc->ani.longcal_timer = timestamp;
+		ath_print(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
+		common->ani.longcal_timer = timestamp;
 	}
 
 	/* Short calibration applies only while caldone is false */
-	if (!sc->ani.caldone) {
-		if ((timestamp - sc->ani.shortcal_timer) >= short_cal_interval) {
+	if (!common->ani.caldone) {
+		if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) {
 			shortcal = true;
-			DPRINTF(sc, ATH_DBG_ANI, "shortcal @%lu\n", jiffies);
-			sc->ani.shortcal_timer = timestamp;
-			sc->ani.resetcal_timer = timestamp;
+			ath_print(common, ATH_DBG_ANI,
+				  "shortcal @%lu\n", jiffies);
+			common->ani.shortcal_timer = timestamp;
+			common->ani.resetcal_timer = timestamp;
 		}
 	} else {
-		if ((timestamp - sc->ani.resetcal_timer) >=
+		if ((timestamp - common->ani.resetcal_timer) >=
 		    ATH_RESTART_CALINTERVAL) {
-			sc->ani.caldone = ath9k_hw_reset_calvalid(ah);
-			if (sc->ani.caldone)
-				sc->ani.resetcal_timer = timestamp;
+			common->ani.caldone = ath9k_hw_reset_calvalid(ah);
+			if (common->ani.caldone)
+				common->ani.resetcal_timer = timestamp;
 		}
 	}
 
 	/* Verify whether we must check ANI */
-	if ((timestamp - sc->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
+	if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
 		aniflag = true;
-		sc->ani.checkani_timer = timestamp;
+		common->ani.checkani_timer = timestamp;
 	}
 
 	/* Skip all processing if there's nothing to do. */
@@ -388,16 +416,21 @@ static void ath_ani_calibrate(unsigned long data)
 
 		/* Perform calibration if necessary */
 		if (longcal || shortcal) {
-			sc->ani.caldone = ath9k_hw_calibrate(ah, ah->curchan,
-						     sc->rx_chainmask, longcal);
+			common->ani.caldone =
+				ath9k_hw_calibrate(ah,
+						   ah->curchan,
+						   common->rx_chainmask,
+						   longcal);
 
 			if (longcal)
-				sc->ani.noise_floor = ath9k_hw_getchan_noise(ah,
+				common->ani.noise_floor = ath9k_hw_getchan_noise(ah,
 								     ah->curchan);
 
-			DPRINTF(sc, ATH_DBG_ANI," calibrate chan %u/%x nf: %d\n",
-				ah->curchan->channel, ah->curchan->channelFlags,
-				sc->ani.noise_floor);
+			ath_print(common, ATH_DBG_ANI,
+				  " calibrate chan %u/%x nf: %d\n",
+				  ah->curchan->channel,
+				  ah->curchan->channelFlags,
+				  common->ani.noise_floor);
 		}
 	}
 
@@ -413,21 +446,21 @@ set_timer:
 	cal_interval = ATH_LONG_CALINTERVAL;
 	if (sc->sc_ah->config.enable_ani)
 		cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
-	if (!sc->ani.caldone)
+	if (!common->ani.caldone)
 		cal_interval = min(cal_interval, (u32)short_cal_interval);
 
-	mod_timer(&sc->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
+	mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
 }
 
-static void ath_start_ani(struct ath_softc *sc)
+static void ath_start_ani(struct ath_common *common)
 {
 	unsigned long timestamp = jiffies_to_msecs(jiffies);
 
-	sc->ani.longcal_timer = timestamp;
-	sc->ani.shortcal_timer = timestamp;
-	sc->ani.checkani_timer = timestamp;
+	common->ani.longcal_timer = timestamp;
+	common->ani.shortcal_timer = timestamp;
+	common->ani.checkani_timer = timestamp;
 
-	mod_timer(&sc->ani.timer,
+	mod_timer(&common->ani.timer,
 		  jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
 }
 
@@ -439,17 +472,22 @@ static void ath_start_ani(struct ath_softc *sc)
  */
 void ath_update_chainmask(struct ath_softc *sc, int is_ht)
 {
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
+
 	if ((sc->sc_flags & SC_OP_SCANNING) || is_ht ||
-	    (sc->btcoex_info.btcoex_scheme != ATH_BTCOEX_CFG_NONE)) {
-		sc->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
-		sc->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
+	    (ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE)) {
+		common->tx_chainmask = ah->caps.tx_chainmask;
+		common->rx_chainmask = ah->caps.rx_chainmask;
 	} else {
-		sc->tx_chainmask = 1;
-		sc->rx_chainmask = 1;
+		common->tx_chainmask = 1;
+		common->rx_chainmask = 1;
 	}
 
-	DPRINTF(sc, ATH_DBG_CONFIG, "tx chmask: %d, rx chmask: %d\n",
-		sc->tx_chainmask, sc->rx_chainmask);
+	ath_print(common, ATH_DBG_CONFIG,
+		  "tx chmask: %d, rx chmask: %d\n",
+		  common->tx_chainmask,
+		  common->rx_chainmask);
 }
 
 static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
@@ -478,6 +516,9 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
 static void ath9k_tasklet(unsigned long data)
 {
 	struct ath_softc *sc = (struct ath_softc *)data;
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
+
 	u32 status = sc->intrstatus;
 
 	ath9k_ps_wakeup(sc);
@@ -502,16 +543,17 @@ static void ath9k_tasklet(unsigned long data)
 		 * TSF sync does not look correct; remain awake to sync with
 		 * the next Beacon.
 		 */
-		DPRINTF(sc, ATH_DBG_PS, "TSFOOR - Sync with next Beacon\n");
+		ath_print(common, ATH_DBG_PS,
+			  "TSFOOR - Sync with next Beacon\n");
 		sc->sc_flags |= SC_OP_WAIT_FOR_BEACON | SC_OP_BEACON_SYNC;
 	}
 
-	if (sc->btcoex_info.btcoex_scheme == ATH_BTCOEX_CFG_3WIRE)
+	if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
 		if (status & ATH9K_INT_GENTIMER)
 			ath_gen_timer_isr(sc->sc_ah);
 
 	/* re-enable hardware interrupt */
-	ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
+	ath9k_hw_set_interrupts(ah, sc->imask);
 	ath9k_ps_restore(sc);
 }
 
@@ -602,7 +644,7 @@ irqreturn_t ath_isr(int irq, void *dev)
 		if (status & ATH9K_INT_TIM_TIMER) {
 			/* Clear RxAbort bit so that we can
 			 * receive frames */
-			ath9k_hw_setpower(ah, ATH9K_PM_AWAKE);
+			ath9k_setpower(sc, ATH9K_PM_AWAKE);
 			ath9k_hw_setrxabort(sc->sc_ah, 0);
 			sc->sc_flags |= SC_OP_WAIT_FOR_BEACON;
 		}
@@ -664,10 +706,11 @@ static u32 ath_get_extchanmode(struct ath_softc *sc,
 	return chanmode;
 }
 
-static int ath_setkey_tkip(struct ath_softc *sc, u16 keyix, const u8 *key,
+static int ath_setkey_tkip(struct ath_common *common, u16 keyix, const u8 *key,
 			   struct ath9k_keyval *hk, const u8 *addr,
 			   bool authenticator)
 {
+	struct ath_hw *ah = common->ah;
 	const u8 *key_rxmic;
 	const u8 *key_txmic;
 
@@ -687,42 +730,42 @@ static int ath_setkey_tkip(struct ath_softc *sc, u16 keyix, const u8 *key,
 			memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
 			memcpy(hk->kv_txmic, key_rxmic, sizeof(hk->kv_mic));
 		}
-		return ath9k_hw_set_keycache_entry(sc->sc_ah, keyix, hk, addr);
+		return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
 	}
-	if (!sc->splitmic) {
+	if (!common->splitmic) {
 		/* TX and RX keys share the same key cache entry. */
 		memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
 		memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
-		return ath9k_hw_set_keycache_entry(sc->sc_ah, keyix, hk, addr);
+		return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
 	}
 
 	/* Separate key cache entries for TX and RX */
 
 	/* TX key goes at first index, RX key at +32. */
 	memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
-	if (!ath9k_hw_set_keycache_entry(sc->sc_ah, keyix, hk, NULL)) {
+	if (!ath9k_hw_set_keycache_entry(ah, keyix, hk, NULL)) {
 		/* TX MIC entry failed. No need to proceed further */
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"Setting TX MIC Key Failed\n");
+		ath_print(common, ATH_DBG_FATAL,
+			  "Setting TX MIC Key Failed\n");
 		return 0;
 	}
 
 	memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
 	/* XXX delete tx key on failure? */
-	return ath9k_hw_set_keycache_entry(sc->sc_ah, keyix + 32, hk, addr);
+	return ath9k_hw_set_keycache_entry(ah, keyix + 32, hk, addr);
 }
 
-static int ath_reserve_key_cache_slot_tkip(struct ath_softc *sc)
+static int ath_reserve_key_cache_slot_tkip(struct ath_common *common)
 {
 	int i;
 
-	for (i = IEEE80211_WEP_NKID; i < sc->keymax / 2; i++) {
-		if (test_bit(i, sc->keymap) ||
-		    test_bit(i + 64, sc->keymap))
+	for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
+		if (test_bit(i, common->keymap) ||
+		    test_bit(i + 64, common->keymap))
 			continue; /* At least one part of TKIP key allocated */
-		if (sc->splitmic &&
-		    (test_bit(i + 32, sc->keymap) ||
-		     test_bit(i + 64 + 32, sc->keymap)))
+		if (common->splitmic &&
+		    (test_bit(i + 32, common->keymap) ||
+		     test_bit(i + 64 + 32, common->keymap)))
 			continue; /* At least one part of TKIP key allocated */
 
 		/* Found a free slot for a TKIP key */
@@ -731,60 +774,60 @@ static int ath_reserve_key_cache_slot_tkip(struct ath_softc *sc)
 	return -1;
 }
 
-static int ath_reserve_key_cache_slot(struct ath_softc *sc)
+static int ath_reserve_key_cache_slot(struct ath_common *common)
 {
 	int i;
 
 	/* First, try to find slots that would not be available for TKIP. */
-	if (sc->splitmic) {
-		for (i = IEEE80211_WEP_NKID; i < sc->keymax / 4; i++) {
-			if (!test_bit(i, sc->keymap) &&
-			    (test_bit(i + 32, sc->keymap) ||
-			     test_bit(i + 64, sc->keymap) ||
-			     test_bit(i + 64 + 32, sc->keymap)))
+	if (common->splitmic) {
+		for (i = IEEE80211_WEP_NKID; i < common->keymax / 4; i++) {
+			if (!test_bit(i, common->keymap) &&
+			    (test_bit(i + 32, common->keymap) ||
+			     test_bit(i + 64, common->keymap) ||
+			     test_bit(i + 64 + 32, common->keymap)))
 				return i;
-			if (!test_bit(i + 32, sc->keymap) &&
-			    (test_bit(i, sc->keymap) ||
-			     test_bit(i + 64, sc->keymap) ||
-			     test_bit(i + 64 + 32, sc->keymap)))
+			if (!test_bit(i + 32, common->keymap) &&
+			    (test_bit(i, common->keymap) ||
+			     test_bit(i + 64, common->keymap) ||
+			     test_bit(i + 64 + 32, common->keymap)))
 				return i + 32;
-			if (!test_bit(i + 64, sc->keymap) &&
-			    (test_bit(i , sc->keymap) ||
-			     test_bit(i + 32, sc->keymap) ||
-			     test_bit(i + 64 + 32, sc->keymap)))
+			if (!test_bit(i + 64, common->keymap) &&
+			    (test_bit(i , common->keymap) ||
+			     test_bit(i + 32, common->keymap) ||
+			     test_bit(i + 64 + 32, common->keymap)))
 				return i + 64;
-			if (!test_bit(i + 64 + 32, sc->keymap) &&
-			    (test_bit(i, sc->keymap) ||
-			     test_bit(i + 32, sc->keymap) ||
-			     test_bit(i + 64, sc->keymap)))
+			if (!test_bit(i + 64 + 32, common->keymap) &&
+			    (test_bit(i, common->keymap) ||
+			     test_bit(i + 32, common->keymap) ||
+			     test_bit(i + 64, common->keymap)))
 				return i + 64 + 32;
 		}
 	} else {
-		for (i = IEEE80211_WEP_NKID; i < sc->keymax / 2; i++) {
-			if (!test_bit(i, sc->keymap) &&
-			    test_bit(i + 64, sc->keymap))
+		for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
+			if (!test_bit(i, common->keymap) &&
+			    test_bit(i + 64, common->keymap))
 				return i;
-			if (test_bit(i, sc->keymap) &&
-			    !test_bit(i + 64, sc->keymap))
+			if (test_bit(i, common->keymap) &&
+			    !test_bit(i + 64, common->keymap))
 				return i + 64;
 		}
 	}
 
 	/* No partially used TKIP slots, pick any available slot */
-	for (i = IEEE80211_WEP_NKID; i < sc->keymax; i++) {
+	for (i = IEEE80211_WEP_NKID; i < common->keymax; i++) {
 		/* Do not allow slots that could be needed for TKIP group keys
 		 * to be used. This limitation could be removed if we know that
 		 * TKIP will not be used. */
 		if (i >= 64 && i < 64 + IEEE80211_WEP_NKID)
 			continue;
-		if (sc->splitmic) {
+		if (common->splitmic) {
 			if (i >= 32 && i < 32 + IEEE80211_WEP_NKID)
 				continue;
 			if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID)
 				continue;
 		}
 
-		if (!test_bit(i, sc->keymap))
+		if (!test_bit(i, common->keymap))
 			return i; /* Found a free slot for a key */
 	}
 
@@ -792,11 +835,12 @@ static int ath_reserve_key_cache_slot(struct ath_softc *sc)
 	return -1;
 }
 
-static int ath_key_config(struct ath_softc *sc,
+static int ath_key_config(struct ath_common *common,
 			  struct ieee80211_vif *vif,
 			  struct ieee80211_sta *sta,
 			  struct ieee80211_key_conf *key)
 {
+	struct ath_hw *ah = common->ah;
 	struct ath9k_keyval hk;
 	const u8 *mac = NULL;
 	int ret = 0;
@@ -842,54 +886,57 @@ static int ath_key_config(struct ath_softc *sc,
 		mac = sta->addr;
 
 		if (key->alg == ALG_TKIP)
-			idx = ath_reserve_key_cache_slot_tkip(sc);
+			idx = ath_reserve_key_cache_slot_tkip(common);
 		else
-			idx = ath_reserve_key_cache_slot(sc);
+			idx = ath_reserve_key_cache_slot(common);
 		if (idx < 0)
 			return -ENOSPC; /* no free key cache entries */
 	}
 
 	if (key->alg == ALG_TKIP)
-		ret = ath_setkey_tkip(sc, idx, key->key, &hk, mac,
+		ret = ath_setkey_tkip(common, idx, key->key, &hk, mac,
 				      vif->type == NL80211_IFTYPE_AP);
 	else
-		ret = ath9k_hw_set_keycache_entry(sc->sc_ah, idx, &hk, mac);
+		ret = ath9k_hw_set_keycache_entry(ah, idx, &hk, mac);
 
 	if (!ret)
 		return -EIO;
 
-	set_bit(idx, sc->keymap);
+	set_bit(idx, common->keymap);
 	if (key->alg == ALG_TKIP) {
-		set_bit(idx + 64, sc->keymap);
-		if (sc->splitmic) {
-			set_bit(idx + 32, sc->keymap);
-			set_bit(idx + 64 + 32, sc->keymap);
+		set_bit(idx + 64, common->keymap);
+		if (common->splitmic) {
+			set_bit(idx + 32, common->keymap);
+			set_bit(idx + 64 + 32, common->keymap);
 		}
 	}
 
 	return idx;
 }
 
-static void ath_key_delete(struct ath_softc *sc, struct ieee80211_key_conf *key)
+static void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
 {
-	ath9k_hw_keyreset(sc->sc_ah, key->hw_key_idx);
+	struct ath_hw *ah = common->ah;
+
+	ath9k_hw_keyreset(ah, key->hw_key_idx);
 	if (key->hw_key_idx < IEEE80211_WEP_NKID)
 		return;
 
-	clear_bit(key->hw_key_idx, sc->keymap);
+	clear_bit(key->hw_key_idx, common->keymap);
 	if (key->alg != ALG_TKIP)
 		return;
 
-	clear_bit(key->hw_key_idx + 64, sc->keymap);
-	if (sc->splitmic) {
-		clear_bit(key->hw_key_idx + 32, sc->keymap);
-		clear_bit(key->hw_key_idx + 64 + 32, sc->keymap);
+	clear_bit(key->hw_key_idx + 64, common->keymap);
+	if (common->splitmic) {
+		clear_bit(key->hw_key_idx + 32, common->keymap);
+		clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
 	}
 }
 
 static void setup_ht_cap(struct ath_softc *sc,
 			 struct ieee80211_sta_ht_cap *ht_info)
 {
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	u8 tx_streams, rx_streams;
 
 	ht_info->ht_supported = true;
@@ -903,12 +950,15 @@ static void setup_ht_cap(struct ath_softc *sc,
 
 	/* set up supported mcs set */
 	memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
-	tx_streams = !(sc->tx_chainmask & (sc->tx_chainmask - 1)) ? 1 : 2;
-	rx_streams = !(sc->rx_chainmask & (sc->rx_chainmask - 1)) ? 1 : 2;
+	tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
+		     1 : 2;
+	rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
+		     1 : 2;
 
 	if (tx_streams != rx_streams) {
-		DPRINTF(sc, ATH_DBG_CONFIG, "TX streams %d, RX streams: %d\n",
-			tx_streams, rx_streams);
+		ath_print(common, ATH_DBG_CONFIG,
+			  "TX streams %d, RX streams: %d\n",
+			  tx_streams, rx_streams);
 		ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
 		ht_info->mcs.tx_params |= ((tx_streams - 1) <<
 				IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
@@ -925,14 +975,17 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
 				 struct ieee80211_vif *vif,
 				 struct ieee80211_bss_conf *bss_conf)
 {
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
 
 	if (bss_conf->assoc) {
-		DPRINTF(sc, ATH_DBG_CONFIG, "Bss Info ASSOC %d, bssid: %pM\n",
-			bss_conf->aid, sc->curbssid);
+		ath_print(common, ATH_DBG_CONFIG,
+			  "Bss Info ASSOC %d, bssid: %pM\n",
+			   bss_conf->aid, common->curbssid);
 
 		/* New association, store aid */
-		sc->curaid = bss_conf->aid;
-		ath9k_hw_write_associd(sc);
+		common->curaid = bss_conf->aid;
+		ath9k_hw_write_associd(ah);
 
 		/*
 		 * Request a re-configuration of Beacon related timers
@@ -947,12 +1000,12 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
 		/* Reset rssi stats */
 		sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
 
-		ath_start_ani(sc);
+		ath_start_ani(common);
 	} else {
-		DPRINTF(sc, ATH_DBG_CONFIG, "Bss Info DISASSOC\n");
-		sc->curaid = 0;
+		ath_print(common, ATH_DBG_CONFIG, "Bss Info DISASSOC\n");
+		common->curaid = 0;
 		/* Stop ANI */
-		del_timer_sync(&sc->ani.timer);
+		del_timer_sync(&common->ani.timer);
 	}
 }
 
@@ -1042,8 +1095,8 @@ static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
 
 	ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
 	if (ret)
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"Failed to register led:%s", led->name);
+		ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
+			  "Failed to register led:%s", led->name);
 	else
 		led->registered = 1;
 	return ret;
@@ -1124,10 +1177,11 @@ fail:
 	ath_deinit_leds(sc);
 }
 
-void ath_radio_enable(struct ath_softc *sc)
+void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
 {
 	struct ath_hw *ah = sc->sc_ah;
-	struct ieee80211_channel *channel = sc->hw->conf.channel;
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ieee80211_channel *channel = hw->conf.channel;
 	int r;
 
 	ath9k_ps_wakeup(sc);
@@ -1139,17 +1193,17 @@ void ath_radio_enable(struct ath_softc *sc)
 	spin_lock_bh(&sc->sc_resetlock);
 	r = ath9k_hw_reset(ah, ah->curchan, false);
 	if (r) {
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"Unable to reset channel %u (%uMhz) ",
-			"reset status %d\n",
-			channel->center_freq, r);
+		ath_print(common, ATH_DBG_FATAL,
+			  "Unable to reset channel %u (%uMhz) ",
+			  "reset status %d\n",
+			  channel->center_freq, r);
 	}
 	spin_unlock_bh(&sc->sc_resetlock);
 
 	ath_update_txpow(sc);
 	if (ath_startrecv(sc) != 0) {
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"Unable to restart recv logic\n");
+		ath_print(common, ATH_DBG_FATAL,
+			  "Unable to restart recv logic\n");
 		return;
 	}
 
@@ -1164,18 +1218,18 @@ void ath_radio_enable(struct ath_softc *sc)
 			    AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
 	ath9k_hw_set_gpio(ah, ah->led_pin, 0);
 
-	ieee80211_wake_queues(sc->hw);
+	ieee80211_wake_queues(hw);
 	ath9k_ps_restore(sc);
 }
 
-void ath_radio_disable(struct ath_softc *sc)
+void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
 {
 	struct ath_hw *ah = sc->sc_ah;
-	struct ieee80211_channel *channel = sc->hw->conf.channel;
+	struct ieee80211_channel *channel = hw->conf.channel;
 	int r;
 
 	ath9k_ps_wakeup(sc);
-	ieee80211_stop_queues(sc->hw);
+	ieee80211_stop_queues(hw);
 
 	/* Disable LED */
 	ath9k_hw_set_gpio(ah, ah->led_pin, 1);
@@ -1189,22 +1243,22 @@ void ath_radio_disable(struct ath_softc *sc)
 	ath_flushrecv(sc);		/* flush recv queue */
 
 	if (!ah->curchan)
-		ah->curchan = ath_get_curchannel(sc, sc->hw);
+		ah->curchan = ath_get_curchannel(sc, hw);
 
 	spin_lock_bh(&sc->sc_resetlock);
 	r = ath9k_hw_reset(ah, ah->curchan, false);
 	if (r) {
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"Unable to reset channel %u (%uMhz) "
-			"reset status %d\n",
-			channel->center_freq, r);
+		ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
+			  "Unable to reset channel %u (%uMhz) "
+			  "reset status %d\n",
+			  channel->center_freq, r);
 	}
 	spin_unlock_bh(&sc->sc_resetlock);
 
 	ath9k_hw_phy_disable(ah);
 	ath9k_hw_configpcipowersave(ah, 1, 1);
 	ath9k_ps_restore(sc);
-	ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
+	ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
 }
 
 /*******************/
@@ -1236,23 +1290,26 @@ static void ath_start_rfkill_poll(struct ath_softc *sc)
 		wiphy_rfkill_start_polling(sc->hw->wiphy);
 }
 
-void ath_cleanup(struct ath_softc *sc)
+static void ath9k_uninit_hw(struct ath_softc *sc)
 {
-	ath_detach(sc);
-	free_irq(sc->irq, sc);
-	ath_bus_cleanup(sc);
-	kfree(sc->sec_wiphy);
-	ieee80211_free_hw(sc->hw);
+	struct ath_hw *ah = sc->sc_ah;
+
+	BUG_ON(!ah);
+
+	ath9k_exit_debug(ah);
+	ath9k_hw_detach(ah);
+	sc->sc_ah = NULL;
 }
 
-void ath_detach(struct ath_softc *sc)
+static void ath_clean_core(struct ath_softc *sc)
 {
 	struct ieee80211_hw *hw = sc->hw;
+	struct ath_hw *ah = sc->sc_ah;
 	int i = 0;
 
 	ath9k_ps_wakeup(sc);
 
-	DPRINTF(sc, ATH_DBG_CONFIG, "Detach ATH hw\n");
+	dev_dbg(sc->dev, "Detach ATH hw\n");
 
 	ath_deinit_leds(sc);
 	wiphy_rfkill_stop_polling(sc->hw->wiphy);
@@ -1273,20 +1330,36 @@ void ath_detach(struct ath_softc *sc)
 	tasklet_kill(&sc->bcon_tasklet);
 
 	if (!(sc->sc_flags & SC_OP_INVALID))
-		ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
+		ath9k_setpower(sc, ATH9K_PM_AWAKE);
 
 	/* cleanup tx queues */
 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
 		if (ATH_TXQ_SETUP(sc, i))
 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
 
-	if ((sc->btcoex_info.no_stomp_timer) &&
-	    sc->btcoex_info.btcoex_scheme == ATH_BTCOEX_CFG_3WIRE)
-		ath_gen_timer_free(sc->sc_ah, sc->btcoex_info.no_stomp_timer);
+	if ((sc->btcoex.no_stomp_timer) &&
+	    ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
+		ath_gen_timer_free(ah, sc->btcoex.no_stomp_timer);
+}
 
-	ath9k_hw_detach(sc->sc_ah);
-	sc->sc_ah = NULL;
-	ath9k_exit_debug(sc);
+void ath_detach(struct ath_softc *sc)
+{
+	ath_clean_core(sc);
+	ath9k_uninit_hw(sc);
+}
+
+void ath_cleanup(struct ath_softc *sc)
+{
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
+
+	ath_clean_core(sc);
+	free_irq(sc->irq, sc);
+	ath_bus_cleanup(common);
+	kfree(sc->sec_wiphy);
+	ieee80211_free_hw(sc->hw);
+
+	ath9k_uninit_hw(sc);
 }
 
 static int ath9k_reg_notifier(struct wiphy *wiphy,
@@ -1295,29 +1368,245 @@ static int ath9k_reg_notifier(struct wiphy *wiphy,
 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
 	struct ath_wiphy *aphy = hw->priv;
 	struct ath_softc *sc = aphy->sc;
-	struct ath_regulatory *reg = &sc->common.regulatory;
+	struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
 
 	return ath_reg_notifier_apply(wiphy, request, reg);
 }
 
 /*
+ * Detects if there is any priority bt traffic
+ */
+static void ath_detect_bt_priority(struct ath_softc *sc)
+{
+	struct ath_btcoex *btcoex = &sc->btcoex;
+	struct ath_hw *ah = sc->sc_ah;
+
+	if (ath9k_hw_gpio_get(sc->sc_ah, ah->btcoex_hw.btpriority_gpio))
+		btcoex->bt_priority_cnt++;
+
+	if (time_after(jiffies, btcoex->bt_priority_time +
+			msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
+		if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
+			ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
+				  "BT priority traffic detected");
+			sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
+		} else {
+			sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
+		}
+
+		btcoex->bt_priority_cnt = 0;
+		btcoex->bt_priority_time = jiffies;
+	}
+}
+
+/*
+ * Configures appropriate weight based on stomp type.
+ */
+static void ath9k_btcoex_bt_stomp(struct ath_softc *sc,
+				  enum ath_stomp_type stomp_type)
+{
+	struct ath_hw *ah = sc->sc_ah;
+
+	switch (stomp_type) {
+	case ATH_BTCOEX_STOMP_ALL:
+		ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+					   AR_STOMP_ALL_WLAN_WGHT);
+		break;
+	case ATH_BTCOEX_STOMP_LOW:
+		ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+					   AR_STOMP_LOW_WLAN_WGHT);
+		break;
+	case ATH_BTCOEX_STOMP_NONE:
+		ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+					   AR_STOMP_NONE_WLAN_WGHT);
+		break;
+	default:
+		ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+			  "Invalid Stomptype\n");
+		break;
+	}
+
+	ath9k_hw_btcoex_enable(ah);
+}
+
+static void ath9k_gen_timer_start(struct ath_hw *ah,
+				  struct ath_gen_timer *timer,
+				  u32 timer_next,
+				  u32 timer_period)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath_softc *sc = (struct ath_softc *) common->priv;
+
+	ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
+
+	if ((sc->imask & ATH9K_INT_GENTIMER) == 0) {
+		ath9k_hw_set_interrupts(ah, 0);
+		sc->imask |= ATH9K_INT_GENTIMER;
+		ath9k_hw_set_interrupts(ah, sc->imask);
+	}
+}
+
+static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath_softc *sc = (struct ath_softc *) common->priv;
+	struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
+
+	ath9k_hw_gen_timer_stop(ah, timer);
+
+	/* if no timer is enabled, turn off interrupt mask */
+	if (timer_table->timer_mask.val == 0) {
+		ath9k_hw_set_interrupts(ah, 0);
+		sc->imask &= ~ATH9K_INT_GENTIMER;
+		ath9k_hw_set_interrupts(ah, sc->imask);
+	}
+}
+
+/*
+ * This is the master bt coex timer which runs for every
+ * 45ms, bt traffic will be given priority during 55% of this
+ * period while wlan gets remaining 45%
+ */
+static void ath_btcoex_period_timer(unsigned long data)
+{
+	struct ath_softc *sc = (struct ath_softc *) data;
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_btcoex *btcoex = &sc->btcoex;
+
+	ath_detect_bt_priority(sc);
+
+	spin_lock_bh(&btcoex->btcoex_lock);
+
+	ath9k_btcoex_bt_stomp(sc, btcoex->bt_stomp_type);
+
+	spin_unlock_bh(&btcoex->btcoex_lock);
+
+	if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
+		if (btcoex->hw_timer_enabled)
+			ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
+
+		ath9k_gen_timer_start(ah,
+				      btcoex->no_stomp_timer,
+				      (ath9k_hw_gettsf32(ah) +
+				       btcoex->btcoex_no_stomp),
+				       btcoex->btcoex_no_stomp * 10);
+		btcoex->hw_timer_enabled = true;
+	}
+
+	mod_timer(&btcoex->period_timer, jiffies +
+				  msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
+}
+
+/*
+ * Generic tsf based hw timer which configures weight
+ * registers to time slice between wlan and bt traffic
+ */
+static void ath_btcoex_no_stomp_timer(void *arg)
+{
+	struct ath_softc *sc = (struct ath_softc *)arg;
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_btcoex *btcoex = &sc->btcoex;
+
+	ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+		  "no stomp timer running \n");
+
+	spin_lock_bh(&btcoex->btcoex_lock);
+
+	if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW)
+		ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_NONE);
+	 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
+		ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_LOW);
+
+	spin_unlock_bh(&btcoex->btcoex_lock);
+}
+
+static int ath_init_btcoex_timer(struct ath_softc *sc)
+{
+	struct ath_btcoex *btcoex = &sc->btcoex;
+
+	btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
+	btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
+		btcoex->btcoex_period / 100;
+
+	setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
+			(unsigned long) sc);
+
+	spin_lock_init(&btcoex->btcoex_lock);
+
+	btcoex->no_stomp_timer = ath_gen_timer_alloc(sc->sc_ah,
+			ath_btcoex_no_stomp_timer,
+			ath_btcoex_no_stomp_timer,
+			(void *) sc, AR_FIRST_NDP_TIMER);
+
+	if (!btcoex->no_stomp_timer)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/*
+ * Read and write, they both share the same lock. We do this to serialize
+ * reads and writes on Atheros 802.11n PCI devices only. This is required
+ * as the FIFO on these devices can only accept sanely 2 requests. After
+ * that the device goes bananas. Serializing the reads/writes prevents this
+ * from happening.
+ */
+
+static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
+{
+	struct ath_hw *ah = (struct ath_hw *) hw_priv;
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath_softc *sc = (struct ath_softc *) common->priv;
+
+	if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
+		unsigned long flags;
+		spin_lock_irqsave(&sc->sc_serial_rw, flags);
+		iowrite32(val, sc->mem + reg_offset);
+		spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
+	} else
+		iowrite32(val, sc->mem + reg_offset);
+}
+
+static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
+{
+	struct ath_hw *ah = (struct ath_hw *) hw_priv;
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath_softc *sc = (struct ath_softc *) common->priv;
+	u32 val;
+
+	if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
+		unsigned long flags;
+		spin_lock_irqsave(&sc->sc_serial_rw, flags);
+		val = ioread32(sc->mem + reg_offset);
+		spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
+	} else
+		val = ioread32(sc->mem + reg_offset);
+	return val;
+}
+
+static const struct ath_ops ath9k_common_ops = {
+	.read = ath9k_ioread32,
+	.write = ath9k_iowrite32,
+};
+
+/*
  * Initialize and fill ath_softc, ath_sofct is the
  * "Software Carrier" struct. Historically it has existed
  * to allow the separation between hardware specific
  * variables (now in ath_hw) and driver specific variables.
  */
-static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid)
+static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
+			  const struct ath_bus_ops *bus_ops)
 {
 	struct ath_hw *ah = NULL;
+	struct ath_common *common;
 	int r = 0, i;
 	int csz = 0;
+	int qnum;
 
 	/* XXX: hardware will not be ready until ath_open() being called */
 	sc->sc_flags |= SC_OP_INVALID;
 
-	if (ath9k_init_debug(sc) < 0)
-		printk(KERN_ERR "Unable to create debugfs files\n");
-
 	spin_lock_init(&sc->wiphy_lock);
 	spin_lock_init(&sc->sc_resetlock);
 	spin_lock_init(&sc->sc_serial_rw);
@@ -1328,75 +1617,80 @@ static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid)
 	tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
 		     (unsigned long)sc);
 
-	/*
-	 * Cache line size is used to size and align various
-	 * structures used to communicate with the hardware.
-	 */
-	ath_read_cachesize(sc, &csz);
-	/* XXX assert csz is non-zero */
-	sc->common.cachelsz = csz << 2;	/* convert to bytes */
-
 	ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
-	if (!ah) {
-		r = -ENOMEM;
-		goto bad_no_ah;
-	}
+	if (!ah)
+		return -ENOMEM;
 
-	ah->ah_sc = sc;
 	ah->hw_version.devid = devid;
 	ah->hw_version.subsysid = subsysid;
 	sc->sc_ah = ah;
 
+	common = ath9k_hw_common(ah);
+	common->ops = &ath9k_common_ops;
+	common->bus_ops = bus_ops;
+	common->ah = ah;
+	common->hw = sc->hw;
+	common->priv = sc;
+	common->debug_mask = ath9k_debug;
+
+	/*
+	 * Cache line size is used to size and align various
+	 * structures used to communicate with the hardware.
+	 */
+	ath_read_cachesize(common, &csz);
+	/* XXX assert csz is non-zero */
+	common->cachelsz = csz << 2;	/* convert to bytes */
+
 	r = ath9k_hw_init(ah);
 	if (r) {
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"Unable to initialize hardware; "
-			"initialization status: %d\n", r);
-		goto bad;
+		ath_print(common, ATH_DBG_FATAL,
+			  "Unable to initialize hardware; "
+			  "initialization status: %d\n", r);
+		goto bad_free_hw;
+	}
+
+	if (ath9k_init_debug(ah) < 0) {
+		ath_print(common, ATH_DBG_FATAL,
+			  "Unable to create debugfs files\n");
+		goto bad_free_hw;
 	}
 
 	/* Get the hardware key cache size. */
-	sc->keymax = ah->caps.keycache_size;
-	if (sc->keymax > ATH_KEYMAX) {
-		DPRINTF(sc, ATH_DBG_ANY,
-			"Warning, using only %u entries in %u key cache\n",
-			ATH_KEYMAX, sc->keymax);
-		sc->keymax = ATH_KEYMAX;
+	common->keymax = ah->caps.keycache_size;
+	if (common->keymax > ATH_KEYMAX) {
+		ath_print(common, ATH_DBG_ANY,
+			  "Warning, using only %u entries in %u key cache\n",
+			  ATH_KEYMAX, common->keymax);
+		common->keymax = ATH_KEYMAX;
 	}
 
 	/*
 	 * Reset the key cache since some parts do not
 	 * reset the contents on initial power up.
 	 */
-	for (i = 0; i < sc->keymax; i++)
+	for (i = 0; i < common->keymax; i++)
 		ath9k_hw_keyreset(ah, (u16) i);
 
 	/* default to MONITOR mode */
 	sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
 
-	/* Setup rate tables */
-
-	ath_rate_attach(sc);
-	ath_setup_rates(sc, IEEE80211_BAND_2GHZ);
-	ath_setup_rates(sc, IEEE80211_BAND_5GHZ);
-
 	/*
 	 * Allocate hardware transmit queues: one queue for
 	 * beacon frames and one data queue for each QoS
 	 * priority.  Note that the hal handles reseting
 	 * these queues at the needed time.
 	 */
-	sc->beacon.beaconq = ath_beaconq_setup(ah);
+	sc->beacon.beaconq = ath9k_hw_beaconq_setup(ah);
 	if (sc->beacon.beaconq == -1) {
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"Unable to setup a beacon xmit queue\n");
+		ath_print(common, ATH_DBG_FATAL,
+			  "Unable to setup a beacon xmit queue\n");
 		r = -EIO;
 		goto bad2;
 	}
 	sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
 	if (sc->beacon.cabq == NULL) {
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"Unable to setup CAB xmit queue\n");
+		ath_print(common, ATH_DBG_FATAL,
+			  "Unable to setup CAB xmit queue\n");
 		r = -EIO;
 		goto bad2;
 	}
@@ -1410,27 +1704,27 @@ static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid)
 	/* Setup data queues */
 	/* NB: ensure BK queue is the lowest priority h/w queue */
 	if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"Unable to setup xmit queue for BK traffic\n");
+		ath_print(common, ATH_DBG_FATAL,
+			  "Unable to setup xmit queue for BK traffic\n");
 		r = -EIO;
 		goto bad2;
 	}
 
 	if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"Unable to setup xmit queue for BE traffic\n");
+		ath_print(common, ATH_DBG_FATAL,
+			  "Unable to setup xmit queue for BE traffic\n");
 		r = -EIO;
 		goto bad2;
 	}
 	if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"Unable to setup xmit queue for VI traffic\n");
+		ath_print(common, ATH_DBG_FATAL,
+			  "Unable to setup xmit queue for VI traffic\n");
 		r = -EIO;
 		goto bad2;
 	}
 	if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"Unable to setup xmit queue for VO traffic\n");
+		ath_print(common, ATH_DBG_FATAL,
+			  "Unable to setup xmit queue for VO traffic\n");
 		r = -EIO;
 		goto bad2;
 	}
@@ -1438,8 +1732,8 @@ static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid)
 	/* Initializes the noise floor to a reasonable default value.
 	 * Later on this will be updated during ANI processing. */
 
-	sc->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
-	setup_timer(&sc->ani.timer, ath_ani_calibrate, (unsigned long)sc);
+	common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
+	setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
 
 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
 				   ATH9K_CIPHER_TKIP, NULL)) {
@@ -1465,7 +1759,7 @@ static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid)
 				      ATH9K_CIPHER_MIC, NULL)
 	    && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
 				      0, NULL))
-		sc->splitmic = 1;
+		common->splitmic = 1;
 
 	/* turn on mcast key search if possible */
 	if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
@@ -1480,14 +1774,14 @@ static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid)
 		sc->sc_flags |= SC_OP_RXAGGR;
 	}
 
-	sc->tx_chainmask = ah->caps.tx_chainmask;
-	sc->rx_chainmask = ah->caps.rx_chainmask;
+	common->tx_chainmask = ah->caps.tx_chainmask;
+	common->rx_chainmask = ah->caps.rx_chainmask;
 
 	ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
 	sc->rx.defant = ath9k_hw_getdefantenna(ah);
 
 	if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
-		memcpy(sc->bssidmask, ath_bcast_mac, ETH_ALEN);
+		memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
 
 	sc->beacon.slottime = ATH9K_SLOT_TIME_9;	/* default to short slot time */
 
@@ -1499,26 +1793,45 @@ static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid)
 
 	/* setup channels and rates */
 
-	sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
-	sc->sbands[IEEE80211_BAND_2GHZ].bitrates =
-		sc->rates[IEEE80211_BAND_2GHZ];
-	sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
-	sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
-		ARRAY_SIZE(ath9k_2ghz_chantable);
+	if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
+		sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
+		sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
+		sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
+			ARRAY_SIZE(ath9k_2ghz_chantable);
+		sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
+		sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
+			ARRAY_SIZE(ath9k_legacy_rates);
+	}
 
 	if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
 		sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
-		sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
-			sc->rates[IEEE80211_BAND_5GHZ];
 		sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
 		sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
 			ARRAY_SIZE(ath9k_5ghz_chantable);
+		sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
+			ath9k_legacy_rates + 4;
+		sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
+			ARRAY_SIZE(ath9k_legacy_rates) - 4;
 	}
 
-	if (sc->btcoex_info.btcoex_scheme != ATH_BTCOEX_CFG_NONE) {
-		r = ath9k_hw_btcoex_init(ah);
+	switch (ah->btcoex_hw.scheme) {
+	case ATH_BTCOEX_CFG_NONE:
+		break;
+	case ATH_BTCOEX_CFG_2WIRE:
+		ath9k_hw_btcoex_init_2wire(ah);
+		break;
+	case ATH_BTCOEX_CFG_3WIRE:
+		ath9k_hw_btcoex_init_3wire(ah);
+		r = ath_init_btcoex_timer(sc);
 		if (r)
 			goto bad2;
+		qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
+		ath9k_hw_init_btcoex_hw(ah, qnum);
+		sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+		break;
+	default:
+		WARN_ON(1);
+		break;
 	}
 
 	return 0;
@@ -1527,12 +1840,9 @@ bad2:
 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
 		if (ATH_TXQ_SETUP(sc, i))
 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
-bad:
-	ath9k_hw_detach(ah);
-	sc->sc_ah = NULL;
-bad_no_ah:
-	ath9k_exit_debug(sc);
 
+bad_free_hw:
+	ath9k_uninit_hw(sc);
 	return r;
 }
 
@@ -1555,7 +1865,7 @@ void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
 		BIT(NL80211_IFTYPE_ADHOC) |
 		BIT(NL80211_IFTYPE_MESH_POINT);
 
-	hw->wiphy->ps_default = false;
+	hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
 	hw->queues = 4;
 	hw->max_rates = 4;
@@ -1568,43 +1878,53 @@ void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
 
 	hw->rate_control_algorithm = "ath9k_rate_control";
 
-	hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-		&sc->sbands[IEEE80211_BAND_2GHZ];
+	if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
+		hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+			&sc->sbands[IEEE80211_BAND_2GHZ];
 	if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
 		hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
 			&sc->sbands[IEEE80211_BAND_5GHZ];
 }
 
 /* Device driver core initialization */
-int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid)
+int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
+		    const struct ath_bus_ops *bus_ops)
 {
 	struct ieee80211_hw *hw = sc->hw;
+	struct ath_common *common;
+	struct ath_hw *ah;
 	int error = 0, i;
 	struct ath_regulatory *reg;
 
-	DPRINTF(sc, ATH_DBG_CONFIG, "Attach ATH hw\n");
+	dev_dbg(sc->dev, "Attach ATH hw\n");
 
-	error = ath_init_softc(devid, sc, subsysid);
+	error = ath_init_softc(devid, sc, subsysid, bus_ops);
 	if (error != 0)
 		return error;
 
+	ah = sc->sc_ah;
+	common = ath9k_hw_common(ah);
+
 	/* get mac address from hardware and set in mac80211 */
 
-	SET_IEEE80211_PERM_ADDR(hw, sc->sc_ah->macaddr);
+	SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
 
 	ath_set_hw_capab(sc, hw);
 
-	error = ath_regd_init(&sc->common.regulatory, sc->hw->wiphy,
+	error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
 			      ath9k_reg_notifier);
 	if (error)
 		return error;
 
-	reg = &sc->common.regulatory;
+	reg = &common->regulatory;
 
-	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
-		setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
-		if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
-			setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
+	if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
+		if (test_bit(ATH9K_MODE_11G, ah->caps.wireless_modes))
+			setup_ht_cap(sc,
+				     &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
+		if (test_bit(ATH9K_MODE_11A, ah->caps.wireless_modes))
+			setup_ht_cap(sc,
+				     &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
 	}
 
 	/* initialize tx/rx engine */
@@ -1641,9 +1961,7 @@ error_attach:
 		if (ATH_TXQ_SETUP(sc, i))
 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
 
-	ath9k_hw_detach(sc->sc_ah);
-	sc->sc_ah = NULL;
-	ath9k_exit_debug(sc);
+	ath9k_uninit_hw(sc);
 
 	return error;
 }
@@ -1651,6 +1969,7 @@ error_attach:
 int ath_reset(struct ath_softc *sc, bool retry_tx)
 {
 	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
 	struct ieee80211_hw *hw = sc->hw;
 	int r;
 
@@ -1662,12 +1981,13 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
 	spin_lock_bh(&sc->sc_resetlock);
 	r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
 	if (r)
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"Unable to reset hardware; reset status %d\n", r);
+		ath_print(common, ATH_DBG_FATAL,
+			  "Unable to reset hardware; reset status %d\n", r);
 	spin_unlock_bh(&sc->sc_resetlock);
 
 	if (ath_startrecv(sc) != 0)
-		DPRINTF(sc, ATH_DBG_FATAL, "Unable to start recv logic\n");
+		ath_print(common, ATH_DBG_FATAL,
+			  "Unable to start recv logic\n");
 
 	/*
 	 * We may be doing a reset in response to a request
@@ -1710,19 +2030,20 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
 	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
-
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct ath_desc *ds;
 	struct ath_buf *bf;
 	int i, bsize, error;
 
-	DPRINTF(sc, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
-		name, nbuf, ndesc);
+	ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
+		  name, nbuf, ndesc);
 
 	INIT_LIST_HEAD(head);
 	/* ath_desc must be a multiple of DWORDs */
 	if ((sizeof(struct ath_desc) % 4) != 0) {
-		DPRINTF(sc, ATH_DBG_FATAL, "ath_desc not DWORD aligned\n");
-		ASSERT((sizeof(struct ath_desc) % 4) == 0);
+		ath_print(common, ATH_DBG_FATAL,
+			  "ath_desc not DWORD aligned\n");
+		BUG_ON((sizeof(struct ath_desc) % 4) != 0);
 		error = -ENOMEM;
 		goto fail;
 	}
@@ -1755,9 +2076,9 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
 		goto fail;
 	}
 	ds = dd->dd_desc;
-	DPRINTF(sc, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
-		name, ds, (u32) dd->dd_desc_len,
-		ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
+	ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
+		  name, ds, (u32) dd->dd_desc_len,
+		  ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
 
 	/* allocate buffers */
 	bsize = sizeof(struct ath_buf) * nbuf;
@@ -1780,7 +2101,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
 			 * descriptor fetch.
 			 */
 			while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
-				ASSERT((caddr_t) bf->bf_desc <
+				BUG_ON((caddr_t) bf->bf_desc >=
 				       ((caddr_t) dd->dd_desc +
 					dd->dd_desc_len));
 
@@ -1884,31 +2205,50 @@ void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
 		ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
 	}
 
-	sc->tx_chan_width = ATH9K_HT_MACMODE_20;
-
-	if (conf_is_ht(conf)) {
-		if (conf_is_ht40(conf))
-			sc->tx_chan_width = ATH9K_HT_MACMODE_2040;
-
+	if (conf_is_ht(conf))
 		ichan->chanmode = ath_get_extchanmode(sc, chan,
 					    conf->channel_type);
-	}
 }
 
 /**********************/
 /* mac80211 callbacks */
 /**********************/
 
+/*
+ * (Re)start btcoex timers
+ */
+static void ath9k_btcoex_timer_resume(struct ath_softc *sc)
+{
+	struct ath_btcoex *btcoex = &sc->btcoex;
+	struct ath_hw *ah = sc->sc_ah;
+
+	ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+		  "Starting btcoex timers");
+
+	/* make sure duty cycle timer is also stopped when resuming */
+	if (btcoex->hw_timer_enabled)
+		ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
+
+	btcoex->bt_priority_cnt = 0;
+	btcoex->bt_priority_time = jiffies;
+	sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
+
+	mod_timer(&btcoex->period_timer, jiffies);
+}
+
 static int ath9k_start(struct ieee80211_hw *hw)
 {
 	struct ath_wiphy *aphy = hw->priv;
 	struct ath_softc *sc = aphy->sc;
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
 	struct ieee80211_channel *curchan = hw->conf.channel;
 	struct ath9k_channel *init_channel;
 	int r;
 
-	DPRINTF(sc, ATH_DBG_CONFIG, "Starting driver with "
-		"initial channel: %d MHz\n", curchan->center_freq);
+	ath_print(common, ATH_DBG_CONFIG,
+		  "Starting driver with initial channel: %d MHz\n",
+		  curchan->center_freq);
 
 	mutex_lock(&sc->mutex);
 
@@ -1940,7 +2280,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
 	init_channel = ath_get_curchannel(sc, hw);
 
 	/* Reset SERDES registers */
-	ath9k_hw_configpcipowersave(sc->sc_ah, 0, 0);
+	ath9k_hw_configpcipowersave(ah, 0, 0);
 
 	/*
 	 * The basic interface to setting the hardware in a good
@@ -1950,12 +2290,12 @@ static int ath9k_start(struct ieee80211_hw *hw)
 	 * and then setup of the interrupt mask.
 	 */
 	spin_lock_bh(&sc->sc_resetlock);
-	r = ath9k_hw_reset(sc->sc_ah, init_channel, false);
+	r = ath9k_hw_reset(ah, init_channel, false);
 	if (r) {
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"Unable to reset hardware; reset status %d "
-			"(freq %u MHz)\n", r,
-			curchan->center_freq);
+		ath_print(common, ATH_DBG_FATAL,
+			  "Unable to reset hardware; reset status %d "
+			  "(freq %u MHz)\n", r,
+			  curchan->center_freq);
 		spin_unlock_bh(&sc->sc_resetlock);
 		goto mutex_unlock;
 	}
@@ -1975,7 +2315,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
 	 * here except setup the interrupt mask.
 	 */
 	if (ath_startrecv(sc) != 0) {
-		DPRINTF(sc, ATH_DBG_FATAL, "Unable to start recv logic\n");
+		ath_print(common, ATH_DBG_FATAL,
+			  "Unable to start recv logic\n");
 		r = -EIO;
 		goto mutex_unlock;
 	}
@@ -1985,10 +2326,10 @@ static int ath9k_start(struct ieee80211_hw *hw)
 		| ATH9K_INT_RXEOL | ATH9K_INT_RXORN
 		| ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
 
-	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_GTT)
+	if (ah->caps.hw_caps & ATH9K_HW_CAP_GTT)
 		sc->imask |= ATH9K_INT_GTT;
 
-	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
+	if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
 		sc->imask |= ATH9K_INT_CST;
 
 	ath_cache_conf_rate(sc, &hw->conf);
@@ -1997,21 +2338,22 @@ static int ath9k_start(struct ieee80211_hw *hw)
 
 	/* Disable BMISS interrupt when we're not associated */
 	sc->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
-	ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
+	ath9k_hw_set_interrupts(ah, sc->imask);
 
 	ieee80211_wake_queues(hw);
 
 	ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
 
-	if ((sc->btcoex_info.btcoex_scheme != ATH_BTCOEX_CFG_NONE) &&
-	    !(sc->sc_flags & SC_OP_BTCOEX_ENABLED)) {
-		ath_btcoex_set_weight(&sc->btcoex_info, AR_BT_COEX_WGHT,
-				      AR_STOMP_LOW_WLAN_WGHT);
-		ath9k_hw_btcoex_enable(sc->sc_ah);
+	if ((ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) &&
+	    !ah->btcoex_hw.enabled) {
+		ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+					   AR_STOMP_LOW_WLAN_WGHT);
+		ath9k_hw_btcoex_enable(ah);
 
-		ath_pcie_aspm_disable(sc);
-		if (sc->btcoex_info.btcoex_scheme == ATH_BTCOEX_CFG_3WIRE)
-			ath_btcoex_timer_resume(sc, &sc->btcoex_info);
+		if (common->bus_ops->bt_coex_prep)
+			common->bus_ops->bt_coex_prep(common);
+		if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
+			ath9k_btcoex_timer_resume(sc);
 	}
 
 mutex_unlock:
@@ -2026,17 +2368,19 @@ static int ath9k_tx(struct ieee80211_hw *hw,
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct ath_wiphy *aphy = hw->priv;
 	struct ath_softc *sc = aphy->sc;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct ath_tx_control txctl;
-	int hdrlen, padsize;
+	int padpos, padsize;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 
 	if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) {
-		printk(KERN_DEBUG "ath9k: %s: TX in unexpected wiphy state "
-		       "%d\n", wiphy_name(hw->wiphy), aphy->state);
+		ath_print(common, ATH_DBG_XMIT,
+			  "ath9k: %s: TX in unexpected wiphy state "
+			  "%d\n", wiphy_name(hw->wiphy), aphy->state);
 		goto exit;
 	}
 
 	if (sc->ps_enabled) {
-		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 		/*
 		 * mac80211 does not set PM field for normal data frames, so we
 		 * need to update that based on the current PS mode.
@@ -2044,8 +2388,8 @@ static int ath9k_tx(struct ieee80211_hw *hw,
 		if (ieee80211_is_data(hdr->frame_control) &&
 		    !ieee80211_is_nullfunc(hdr->frame_control) &&
 		    !ieee80211_has_pm(hdr->frame_control)) {
-			DPRINTF(sc, ATH_DBG_PS, "Add PM=1 for a TX frame "
-				"while in PS mode\n");
+			ath_print(common, ATH_DBG_PS, "Add PM=1 for a TX frame "
+				  "while in PS mode\n");
 			hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
 		}
 	}
@@ -2056,15 +2400,15 @@ static int ath9k_tx(struct ieee80211_hw *hw,
 		 * power save mode. Need to wake up hardware for the TX to be
 		 * completed and if needed, also for RX of buffered frames.
 		 */
-		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 		ath9k_ps_wakeup(sc);
 		ath9k_hw_setrxabort(sc->sc_ah, 0);
 		if (ieee80211_is_pspoll(hdr->frame_control)) {
-			DPRINTF(sc, ATH_DBG_PS, "Sending PS-Poll to pick a "
-				"buffered frame\n");
+			ath_print(common, ATH_DBG_PS,
+				  "Sending PS-Poll to pick a buffered frame\n");
 			sc->sc_flags |= SC_OP_WAIT_FOR_PSPOLL_DATA;
 		} else {
-			DPRINTF(sc, ATH_DBG_PS, "Wake up to complete TX\n");
+			ath_print(common, ATH_DBG_PS,
+				  "Wake up to complete TX\n");
 			sc->sc_flags |= SC_OP_WAIT_FOR_TX_ACK;
 		}
 		/*
@@ -2083,7 +2427,6 @@ static int ath9k_tx(struct ieee80211_hw *hw,
 	 * BSSes.
 	 */
 	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
-		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 		if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
 			sc->tx.seq_no += 0x10;
 		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
@@ -2091,13 +2434,13 @@ static int ath9k_tx(struct ieee80211_hw *hw,
 	}
 
 	/* Add the padding after the header if this is not already done */
-	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
-	if (hdrlen & 3) {
-		padsize = hdrlen % 4;
+	padpos = ath9k_cmn_padpos(hdr->frame_control);
+	padsize = padpos & 3;
+	if (padsize && skb->len>padpos) {
 		if (skb_headroom(skb) < padsize)
 			return -1;
 		skb_push(skb, padsize);
-		memmove(skb->data, skb->data + padsize, hdrlen);
+		memmove(skb->data, skb->data + padsize, padpos);
 	}
 
 	/* Check if a tx queue is available */
@@ -2106,10 +2449,10 @@ static int ath9k_tx(struct ieee80211_hw *hw,
 	if (!txctl.txq)
 		goto exit;
 
-	DPRINTF(sc, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
+	ath_print(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
 
 	if (ath_tx_start(hw, skb, &txctl) != 0) {
-		DPRINTF(sc, ATH_DBG_XMIT, "TX failed\n");
+		ath_print(common, ATH_DBG_XMIT, "TX failed\n");
 		goto exit;
 	}
 
@@ -2119,10 +2462,28 @@ exit:
 	return 0;
 }
 
+/*
+ * Pause btcoex timer and bt duty cycle timer
+ */
+static void ath9k_btcoex_timer_pause(struct ath_softc *sc)
+{
+	struct ath_btcoex *btcoex = &sc->btcoex;
+	struct ath_hw *ah = sc->sc_ah;
+
+	del_timer_sync(&btcoex->period_timer);
+
+	if (btcoex->hw_timer_enabled)
+		ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
+
+	btcoex->hw_timer_enabled = false;
+}
+
 static void ath9k_stop(struct ieee80211_hw *hw)
 {
 	struct ath_wiphy *aphy = hw->priv;
 	struct ath_softc *sc = aphy->sc;
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
 
 	mutex_lock(&sc->mutex);
 
@@ -2137,7 +2498,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
 	}
 
 	if (sc->sc_flags & SC_OP_INVALID) {
-		DPRINTF(sc, ATH_DBG_ANY, "Device not present\n");
+		ath_print(common, ATH_DBG_ANY, "Device not present\n");
 		mutex_unlock(&sc->mutex);
 		return;
 	}
@@ -2147,33 +2508,33 @@ static void ath9k_stop(struct ieee80211_hw *hw)
 		return; /* another wiphy still in use */
 	}
 
-	if (sc->sc_flags & SC_OP_BTCOEX_ENABLED) {
-		ath9k_hw_btcoex_disable(sc->sc_ah);
-		if (sc->btcoex_info.btcoex_scheme == ATH_BTCOEX_CFG_3WIRE)
-			ath_btcoex_timer_pause(sc, &sc->btcoex_info);
+	if (ah->btcoex_hw.enabled) {
+		ath9k_hw_btcoex_disable(ah);
+		if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
+			ath9k_btcoex_timer_pause(sc);
 	}
 
 	/* make sure h/w will not generate any interrupt
 	 * before setting the invalid flag. */
-	ath9k_hw_set_interrupts(sc->sc_ah, 0);
+	ath9k_hw_set_interrupts(ah, 0);
 
 	if (!(sc->sc_flags & SC_OP_INVALID)) {
 		ath_drain_all_txq(sc, false);
 		ath_stoprecv(sc);
-		ath9k_hw_phy_disable(sc->sc_ah);
+		ath9k_hw_phy_disable(ah);
 	} else
 		sc->rx.rxlink = NULL;
 
 	/* disable HAL and put h/w to sleep */
-	ath9k_hw_disable(sc->sc_ah);
-	ath9k_hw_configpcipowersave(sc->sc_ah, 1, 1);
-	ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
+	ath9k_hw_disable(ah);
+	ath9k_hw_configpcipowersave(ah, 1, 1);
+	ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
 
 	sc->sc_flags |= SC_OP_INVALID;
 
 	mutex_unlock(&sc->mutex);
 
-	DPRINTF(sc, ATH_DBG_CONFIG, "Driver halt\n");
+	ath_print(common, ATH_DBG_CONFIG, "Driver halt\n");
 }
 
 static int ath9k_add_interface(struct ieee80211_hw *hw,
@@ -2181,6 +2542,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
 {
 	struct ath_wiphy *aphy = hw->priv;
 	struct ath_softc *sc = aphy->sc;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct ath_vif *avp = (void *)conf->vif->drv_priv;
 	enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
 	int ret = 0;
@@ -2207,13 +2569,14 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
 		ic_opmode = conf->type;
 		break;
 	default:
-		DPRINTF(sc, ATH_DBG_FATAL,
+		ath_print(common, ATH_DBG_FATAL,
 			"Interface type %d not yet supported\n", conf->type);
 		ret = -EOPNOTSUPP;
 		goto out;
 	}
 
-	DPRINTF(sc, ATH_DBG_CONFIG, "Attach a VIF of type: %d\n", ic_opmode);
+	ath_print(common, ATH_DBG_CONFIG,
+		  "Attach a VIF of type: %d\n", ic_opmode);
 
 	/* Set the VIF opmode */
 	avp->av_opmode = ic_opmode;
@@ -2251,7 +2614,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
 	if (conf->type == NL80211_IFTYPE_AP    ||
 	    conf->type == NL80211_IFTYPE_ADHOC ||
 	    conf->type == NL80211_IFTYPE_MONITOR)
-		ath_start_ani(sc);
+		ath_start_ani(common);
 
 out:
 	mutex_unlock(&sc->mutex);
@@ -2263,15 +2626,16 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
 {
 	struct ath_wiphy *aphy = hw->priv;
 	struct ath_softc *sc = aphy->sc;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct ath_vif *avp = (void *)conf->vif->drv_priv;
 	int i;
 
-	DPRINTF(sc, ATH_DBG_CONFIG, "Detach Interface\n");
+	ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
 
 	mutex_lock(&sc->mutex);
 
 	/* Stop ANI */
-	del_timer_sync(&sc->ani.timer);
+	del_timer_sync(&common->ani.timer);
 
 	/* Reclaim beacon resources */
 	if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
@@ -2301,32 +2665,55 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
 {
 	struct ath_wiphy *aphy = hw->priv;
 	struct ath_softc *sc = aphy->sc;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct ieee80211_conf *conf = &hw->conf;
 	struct ath_hw *ah = sc->sc_ah;
-	bool all_wiphys_idle = false, disable_radio = false;
+	bool disable_radio;
 
 	mutex_lock(&sc->mutex);
 
-	/* Leave this as the first check */
+	/*
+	 * Leave this as the first check because we need to turn on the
+	 * radio if it was disabled before prior to processing the rest
+	 * of the changes. Likewise we must only disable the radio towards
+	 * the end.
+	 */
 	if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+		bool enable_radio;
+		bool all_wiphys_idle;
+		bool idle = !!(conf->flags & IEEE80211_CONF_IDLE);
 
 		spin_lock_bh(&sc->wiphy_lock);
 		all_wiphys_idle =  ath9k_all_wiphys_idle(sc);
+		ath9k_set_wiphy_idle(aphy, idle);
+
+		if (!idle && all_wiphys_idle)
+			enable_radio = true;
+
+		/*
+		 * After we unlock here its possible another wiphy
+		 * can be re-renabled so to account for that we will
+		 * only disable the radio toward the end of this routine
+		 * if by then all wiphys are still idle.
+		 */
 		spin_unlock_bh(&sc->wiphy_lock);
 
-		if (conf->flags & IEEE80211_CONF_IDLE){
-			if (all_wiphys_idle)
-				disable_radio = true;
-		}
-		else if (all_wiphys_idle) {
-			ath_radio_enable(sc);
-			DPRINTF(sc, ATH_DBG_CONFIG,
-				"not-idle: enabling radio\n");
+		if (enable_radio) {
+			ath_radio_enable(sc, hw);
+			ath_print(common, ATH_DBG_CONFIG,
+				  "not-idle: enabling radio\n");
 		}
 	}
 
+	/*
+	 * We just prepare to enable PS. We have to wait until our AP has
+	 * ACK'd our null data frame to disable RX otherwise we'll ignore
+	 * those ACKs and end up retransmitting the same null data frames.
+	 * IEEE80211_CONF_CHANGE_PS is only passed by mac80211 for STA mode.
+	 */
 	if (changed & IEEE80211_CONF_CHANGE_PS) {
 		if (conf->flags & IEEE80211_CONF_PS) {
+			sc->sc_flags |= SC_OP_PS_ENABLED;
 			if (!(ah->caps.hw_caps &
 			      ATH9K_HW_CAP_AUTOSLEEP)) {
 				if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
@@ -2334,12 +2721,21 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
 					ath9k_hw_set_interrupts(sc->sc_ah,
 							sc->imask);
 				}
-				ath9k_hw_setrxabort(sc->sc_ah, 1);
 			}
-			sc->ps_enabled = true;
+			/*
+			 * At this point we know hardware has received an ACK
+			 * of a previously sent null data frame.
+			 */
+			if ((sc->sc_flags & SC_OP_NULLFUNC_COMPLETED)) {
+				sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED;
+				sc->ps_enabled = true;
+				ath9k_hw_setrxabort(sc->sc_ah, 1);
+                        }
 		} else {
 			sc->ps_enabled = false;
-			ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
+			sc->sc_flags &= ~(SC_OP_PS_ENABLED |
+					  SC_OP_NULLFUNC_COMPLETED);
+			ath9k_setpower(sc, ATH9K_PM_AWAKE);
 			if (!(ah->caps.hw_caps &
 			      ATH9K_HW_CAP_AUTOSLEEP)) {
 				ath9k_hw_setrxabort(sc->sc_ah, 0);
@@ -2374,8 +2770,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
 			goto skip_chan_change;
 		}
 
-		DPRINTF(sc, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
-			curchan->center_freq);
+		ath_print(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
+			  curchan->center_freq);
 
 		/* XXX: remove me eventualy */
 		ath9k_update_ichannel(sc, hw, &sc->sc_ah->channels[pos]);
@@ -2383,7 +2779,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
 		ath_update_chainmask(sc, conf_is_ht(conf));
 
 		if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) {
-			DPRINTF(sc, ATH_DBG_FATAL, "Unable to set channel\n");
+			ath_print(common, ATH_DBG_FATAL,
+				  "Unable to set channel\n");
 			mutex_unlock(&sc->mutex);
 			return -EINVAL;
 		}
@@ -2393,9 +2790,13 @@ skip_chan_change:
 	if (changed & IEEE80211_CONF_CHANGE_POWER)
 		sc->config.txpowlimit = 2 * conf->power_level;
 
+	spin_lock_bh(&sc->wiphy_lock);
+	disable_radio = ath9k_all_wiphys_idle(sc);
+	spin_unlock_bh(&sc->wiphy_lock);
+
 	if (disable_radio) {
-		DPRINTF(sc, ATH_DBG_CONFIG, "idle: disabling radio\n");
-		ath_radio_disable(sc);
+		ath_print(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
+		ath_radio_disable(sc, hw);
 	}
 
 	mutex_unlock(&sc->mutex);
@@ -2431,7 +2832,8 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
 	ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
 	ath9k_ps_restore(sc);
 
-	DPRINTF(sc, ATH_DBG_CONFIG, "Set HW RX filter: 0x%x\n", rfilt);
+	ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
+		  "Set HW RX filter: 0x%x\n", rfilt);
 }
 
 static void ath9k_sta_notify(struct ieee80211_hw *hw,
@@ -2459,6 +2861,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
 {
 	struct ath_wiphy *aphy = hw->priv;
 	struct ath_softc *sc = aphy->sc;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct ath9k_tx_queue_info qi;
 	int ret = 0, qnum;
 
@@ -2475,15 +2878,19 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
 	qi.tqi_burstTime = params->txop;
 	qnum = ath_get_hal_qnum(queue, sc);
 
-	DPRINTF(sc, ATH_DBG_CONFIG,
-		"Configure tx [queue/halq] [%d/%d],  "
-		"aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
-		queue, qnum, params->aifs, params->cw_min,
-		params->cw_max, params->txop);
+	ath_print(common, ATH_DBG_CONFIG,
+		  "Configure tx [queue/halq] [%d/%d],  "
+		  "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
+		  queue, qnum, params->aifs, params->cw_min,
+		  params->cw_max, params->txop);
 
 	ret = ath_txq_update(sc, qnum, &qi);
 	if (ret)
-		DPRINTF(sc, ATH_DBG_FATAL, "TXQ Update failed\n");
+		ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n");
+
+	if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC)
+		if ((qnum == sc->tx.hwq_map[ATH9K_WME_AC_BE]) && !ret)
+			ath_beaconq_config(sc);
 
 	mutex_unlock(&sc->mutex);
 
@@ -2498,6 +2905,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
 {
 	struct ath_wiphy *aphy = hw->priv;
 	struct ath_softc *sc = aphy->sc;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	int ret = 0;
 
 	if (modparam_nohwcrypt)
@@ -2505,11 +2913,11 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
 
 	mutex_lock(&sc->mutex);
 	ath9k_ps_wakeup(sc);
-	DPRINTF(sc, ATH_DBG_CONFIG, "Set HW Key\n");
+	ath_print(common, ATH_DBG_CONFIG, "Set HW Key\n");
 
 	switch (cmd) {
 	case SET_KEY:
-		ret = ath_key_config(sc, vif, sta, key);
+		ret = ath_key_config(common, vif, sta, key);
 		if (ret >= 0) {
 			key->hw_key_idx = ret;
 			/* push IV and Michael MIC generation to stack */
@@ -2522,7 +2930,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
 		}
 		break;
 	case DISABLE_KEY:
-		ath_key_delete(sc, key);
+		ath_key_delete(common, key);
 		break;
 	default:
 		ret = -EINVAL;
@@ -2542,94 +2950,67 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
 	struct ath_wiphy *aphy = hw->priv;
 	struct ath_softc *sc = aphy->sc;
 	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
 	struct ath_vif *avp = (void *)vif->drv_priv;
-	u32 rfilt = 0;
-	int error, i;
+	int error;
 
 	mutex_lock(&sc->mutex);
 
-	/*
-	 * TODO: Need to decide which hw opmode to use for
-	 *       multi-interface cases
-	 * XXX: This belongs into add_interface!
-	 */
-	if (vif->type == NL80211_IFTYPE_AP &&
-	    ah->opmode != NL80211_IFTYPE_AP) {
-		ah->opmode = NL80211_IFTYPE_STATION;
-		ath9k_hw_setopmode(ah);
-		memcpy(sc->curbssid, sc->sc_ah->macaddr, ETH_ALEN);
-		sc->curaid = 0;
-		ath9k_hw_write_associd(sc);
-		/* Request full reset to get hw opmode changed properly */
-		sc->sc_flags |= SC_OP_FULL_RESET;
-	}
-
-	if ((changed & BSS_CHANGED_BSSID) &&
-	    !is_zero_ether_addr(bss_conf->bssid)) {
-		switch (vif->type) {
-		case NL80211_IFTYPE_STATION:
-		case NL80211_IFTYPE_ADHOC:
-		case NL80211_IFTYPE_MESH_POINT:
-			/* Set BSSID */
-			memcpy(sc->curbssid, bss_conf->bssid, ETH_ALEN);
-			memcpy(avp->bssid, bss_conf->bssid, ETH_ALEN);
-			sc->curaid = 0;
-			ath9k_hw_write_associd(sc);
-
-			/* Set aggregation protection mode parameters */
-			sc->config.ath_aggr_prot = 0;
-
-			DPRINTF(sc, ATH_DBG_CONFIG,
-				"RX filter 0x%x bssid %pM aid 0x%x\n",
-				rfilt, sc->curbssid, sc->curaid);
-
-			/* need to reconfigure the beacon */
-			sc->sc_flags &= ~SC_OP_BEACONS ;
+	if (changed & BSS_CHANGED_BSSID) {
+		/* Set BSSID */
+		memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
+		memcpy(avp->bssid, bss_conf->bssid, ETH_ALEN);
+		common->curaid = 0;
+		ath9k_hw_write_associd(ah);
 
-			break;
-		default:
-			break;
-		}
+		/* Set aggregation protection mode parameters */
+		sc->config.ath_aggr_prot = 0;
+
+		/* Only legacy IBSS for now */
+		if (vif->type == NL80211_IFTYPE_ADHOC)
+			ath_update_chainmask(sc, 0);
+
+		ath_print(common, ATH_DBG_CONFIG,
+			  "BSSID: %pM aid: 0x%x\n",
+			  common->curbssid, common->curaid);
+
+		/* need to reconfigure the beacon */
+		sc->sc_flags &= ~SC_OP_BEACONS ;
 	}
 
-	if ((vif->type == NL80211_IFTYPE_ADHOC) ||
-	    (vif->type == NL80211_IFTYPE_AP) ||
-	    (vif->type == NL80211_IFTYPE_MESH_POINT)) {
-		if ((changed & BSS_CHANGED_BEACON) ||
-		    (changed & BSS_CHANGED_BEACON_ENABLED &&
-		     bss_conf->enable_beacon)) {
-			/*
-			 * Allocate and setup the beacon frame.
-			 *
-			 * Stop any previous beacon DMA.  This may be
-			 * necessary, for example, when an ibss merge
-			 * causes reconfiguration; we may be called
-			 * with beacon transmission active.
-			 */
-			ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
+	/* Enable transmission of beacons (AP, IBSS, MESH) */
+	if ((changed & BSS_CHANGED_BEACON) ||
+	    ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon)) {
+		ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
+		error = ath_beacon_alloc(aphy, vif);
+		if (!error)
+			ath_beacon_config(sc, vif);
+	}
 
+	/* Disable transmission of beacons */
+	if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon)
+		ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
+
+	if (changed & BSS_CHANGED_BEACON_INT) {
+		sc->beacon_interval = bss_conf->beacon_int;
+		/*
+		 * In case of AP mode, the HW TSF has to be reset
+		 * when the beacon interval changes.
+		 */
+		if (vif->type == NL80211_IFTYPE_AP) {
+			sc->sc_flags |= SC_OP_TSF_RESET;
+			ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
 			error = ath_beacon_alloc(aphy, vif);
 			if (!error)
 				ath_beacon_config(sc, vif);
+		} else {
+			ath_beacon_config(sc, vif);
 		}
 	}
 
-	/* Check for WLAN_CAPABILITY_PRIVACY ? */
-	if ((avp->av_opmode != NL80211_IFTYPE_STATION)) {
-		for (i = 0; i < IEEE80211_WEP_NKID; i++)
-			if (ath9k_hw_keyisvalid(sc->sc_ah, (u16)i))
-				ath9k_hw_keysetmac(sc->sc_ah,
-						   (u16)i,
-						   sc->curbssid);
-	}
-
-	/* Only legacy IBSS for now */
-	if (vif->type == NL80211_IFTYPE_ADHOC)
-		ath_update_chainmask(sc, 0);
-
 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
-		DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
-			bss_conf->use_short_preamble);
+		ath_print(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
+			  bss_conf->use_short_preamble);
 		if (bss_conf->use_short_preamble)
 			sc->sc_flags |= SC_OP_PREAMBLE_SHORT;
 		else
@@ -2637,8 +3018,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
 	}
 
 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
-		DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
-			bss_conf->use_cts_prot);
+		ath_print(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
+			  bss_conf->use_cts_prot);
 		if (bss_conf->use_cts_prot &&
 		    hw->conf.channel->band != IEEE80211_BAND_5GHZ)
 			sc->sc_flags |= SC_OP_PROTECT_ENABLE;
@@ -2647,23 +3028,11 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
 	}
 
 	if (changed & BSS_CHANGED_ASSOC) {
-		DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
+		ath_print(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
 			bss_conf->assoc);
 		ath9k_bss_assoc_info(sc, vif, bss_conf);
 	}
 
-	/*
-	 * The HW TSF has to be reset when the beacon interval changes.
-	 * We set the flag here, and ath_beacon_config_ap() would take this
-	 * into account when it gets called through the subsequent
-	 * config_interface() call - with IFCC_BEACON in the changed field.
-	 */
-
-	if (changed & BSS_CHANGED_BEACON_INT) {
-		sc->sc_flags |= SC_OP_TSF_RESET;
-		sc->beacon_interval = bss_conf->beacon_int;
-	}
-
 	mutex_unlock(&sc->mutex);
 }
 
@@ -2696,11 +3065,16 @@ static void ath9k_reset_tsf(struct ieee80211_hw *hw)
 	struct ath_softc *sc = aphy->sc;
 
 	mutex_lock(&sc->mutex);
+
+	ath9k_ps_wakeup(sc);
 	ath9k_hw_reset_tsf(sc->sc_ah);
+	ath9k_ps_restore(sc);
+
 	mutex_unlock(&sc->mutex);
 }
 
 static int ath9k_ampdu_action(struct ieee80211_hw *hw,
+			      struct ieee80211_vif *vif,
 			      enum ieee80211_ampdu_mlme_action action,
 			      struct ieee80211_sta *sta,
 			      u16 tid, u16 *ssn)
@@ -2718,17 +3092,18 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
 		break;
 	case IEEE80211_AMPDU_TX_START:
 		ath_tx_aggr_start(sc, sta, tid, ssn);
-		ieee80211_start_tx_ba_cb_irqsafe(hw, sta->addr, tid);
+		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
 		break;
 	case IEEE80211_AMPDU_TX_STOP:
 		ath_tx_aggr_stop(sc, sta, tid);
-		ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid);
+		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
 		break;
 	case IEEE80211_AMPDU_TX_OPERATIONAL:
 		ath_tx_aggr_resume(sc, sta, tid);
 		break;
 	default:
-		DPRINTF(sc, ATH_DBG_FATAL, "Unknown AMPDU action\n");
+		ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
+			  "Unknown AMPDU action\n");
 	}
 
 	return ret;
@@ -2796,64 +3171,6 @@ struct ieee80211_ops ath9k_ops = {
 	.rfkill_poll        = ath9k_rfkill_poll_state,
 };
 
-static struct {
-	u32 version;
-	const char * name;
-} ath_mac_bb_names[] = {
-	{ AR_SREV_VERSION_5416_PCI,	"5416" },
-	{ AR_SREV_VERSION_5416_PCIE,	"5418" },
-	{ AR_SREV_VERSION_9100,		"9100" },
-	{ AR_SREV_VERSION_9160,		"9160" },
-	{ AR_SREV_VERSION_9280,		"9280" },
-	{ AR_SREV_VERSION_9285,		"9285" },
-	{ AR_SREV_VERSION_9287,         "9287" }
-};
-
-static struct {
-	u16 version;
-	const char * name;
-} ath_rf_names[] = {
-	{ 0,				"5133" },
-	{ AR_RAD5133_SREV_MAJOR,	"5133" },
-	{ AR_RAD5122_SREV_MAJOR,	"5122" },
-	{ AR_RAD2133_SREV_MAJOR,	"2133" },
-	{ AR_RAD2122_SREV_MAJOR,	"2122" }
-};
-
-/*
- * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown.
- */
-const char *
-ath_mac_bb_name(u32 mac_bb_version)
-{
-	int i;
-
-	for (i=0; i<ARRAY_SIZE(ath_mac_bb_names); i++) {
-		if (ath_mac_bb_names[i].version == mac_bb_version) {
-			return ath_mac_bb_names[i].name;
-		}
-	}
-
-	return "????";
-}
-
-/*
- * Return the RF name. "????" is returned if the RF is unknown.
- */
-const char *
-ath_rf_name(u16 rf_version)
-{
-	int i;
-
-	for (i=0; i<ARRAY_SIZE(ath_rf_names); i++) {
-		if (ath_rf_names[i].version == rf_version) {
-			return ath_rf_names[i].name;
-		}
-	}
-
-	return "????";
-}
-
 static int __init ath9k_init(void)
 {
 	int error;
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 903dd8ad9d43..5321f735e5a0 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -31,8 +31,9 @@ static struct pci_device_id ath_pci_id_table[] __devinitdata = {
 };
 
 /* return bus cachesize in 4B word units */
-static void ath_pci_read_cachesize(struct ath_softc *sc, int *csz)
+static void ath_pci_read_cachesize(struct ath_common *common, int *csz)
 {
+	struct ath_softc *sc = (struct ath_softc *) common->priv;
 	u8 u8tmp;
 
 	pci_read_config_byte(to_pci_dev(sc->dev), PCI_CACHE_LINE_SIZE, &u8tmp);
@@ -48,8 +49,9 @@ static void ath_pci_read_cachesize(struct ath_softc *sc, int *csz)
 		*csz = DEFAULT_CACHELINE >> 2;   /* Use the default size */
 }
 
-static void ath_pci_cleanup(struct ath_softc *sc)
+static void ath_pci_cleanup(struct ath_common *common)
 {
+	struct ath_softc *sc = (struct ath_softc *) common->priv;
 	struct pci_dev *pdev = to_pci_dev(sc->dev);
 
 	pci_iounmap(pdev, sc->mem);
@@ -57,9 +59,11 @@ static void ath_pci_cleanup(struct ath_softc *sc)
 	pci_release_region(pdev, 0);
 }
 
-static bool ath_pci_eeprom_read(struct ath_hw *ah, u32 off, u16 *data)
+static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data)
 {
-	(void)REG_READ(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S));
+	struct ath_hw *ah = (struct ath_hw *) common->ah;
+
+	common->ops->read(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S));
 
 	if (!ath9k_hw_wait(ah,
 			   AR_EEPROM_STATUS_DATA,
@@ -69,16 +73,34 @@ static bool ath_pci_eeprom_read(struct ath_hw *ah, u32 off, u16 *data)
 		return false;
 	}
 
-	*data = MS(REG_READ(ah, AR_EEPROM_STATUS_DATA),
+	*data = MS(common->ops->read(ah, AR_EEPROM_STATUS_DATA),
 		   AR_EEPROM_STATUS_DATA_VAL);
 
 	return true;
 }
 
-static struct ath_bus_ops ath_pci_bus_ops = {
+/*
+ * Bluetooth coexistance requires disabling ASPM.
+ */
+static void ath_pci_bt_coex_prep(struct ath_common *common)
+{
+	struct ath_softc *sc = (struct ath_softc *) common->priv;
+	struct pci_dev *pdev = to_pci_dev(sc->dev);
+	u8 aspm;
+
+	if (!pdev->is_pcie)
+		return;
+
+	pci_read_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, &aspm);
+	aspm &= ~(ATH_PCIE_CAP_LINK_L0S | ATH_PCIE_CAP_LINK_L1);
+	pci_write_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, aspm);
+}
+
+const static struct ath_bus_ops ath_pci_bus_ops = {
 	.read_cachesize = ath_pci_read_cachesize,
 	.cleanup = ath_pci_cleanup,
 	.eeprom_read = ath_pci_eeprom_read,
+	.bt_coex_prep = ath_pci_bt_coex_prep,
 };
 
 static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -92,6 +114,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	u32 val;
 	int ret = 0;
 	struct ath_hw *ah;
+	char hw_name[64];
 
 	if (pci_enable_device(pdev))
 		return -EIO;
@@ -177,10 +200,9 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	sc->hw = hw;
 	sc->dev = &pdev->dev;
 	sc->mem = mem;
-	sc->bus_ops = &ath_pci_bus_ops;
 
 	pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsysid);
-	ret = ath_init_device(id->device, sc, subsysid);
+	ret = ath_init_device(id->device, sc, subsysid, &ath_pci_bus_ops);
 	if (ret) {
 		dev_err(&pdev->dev, "failed to initialize device\n");
 		goto bad3;
@@ -197,14 +219,11 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	sc->irq = pdev->irq;
 
 	ah = sc->sc_ah;
+	ath9k_hw_name(ah, hw_name, sizeof(hw_name));
 	printk(KERN_INFO
-	       "%s: Atheros AR%s MAC/BB Rev:%x "
-	       "AR%s RF Rev:%x: mem=0x%lx, irq=%d\n",
+	       "%s: %s mem=0x%lx, irq=%d\n",
 	       wiphy_name(hw->wiphy),
-	       ath_mac_bb_name(ah->hw_version.macVersion),
-	       ah->hw_version.macRev,
-	       ath_rf_name((ah->hw_version.analog5GhzRev & AR_RADIO_SREV_MAJOR)),
-	       ah->hw_version.phyRev,
+	       hw_name,
 	       (unsigned long)mem, pdev->irq);
 
 	return 0;
diff --git a/drivers/net/wireless/ath/ath9k/phy.c b/drivers/net/wireless/ath/ath9k/phy.c
index 63bf9a307c6a..c3b59390fe38 100644
--- a/drivers/net/wireless/ath/ath9k/phy.c
+++ b/drivers/net/wireless/ath/ath9k/phy.c
@@ -14,90 +14,70 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-#include "ath9k.h"
+/**
+ * DOC: Programming Atheros 802.11n analog front end radios
+ *
+ * AR5416 MAC based PCI devices and AR518 MAC based PCI-Express
+ * devices have either an external AR2133 analog front end radio for single
+ * band 2.4 GHz communication or an AR5133 analog front end radio for dual
+ * band 2.4 GHz / 5 GHz communication.
+ *
+ * All devices after the AR5416 and AR5418 family starting with the AR9280
+ * have their analog front radios, MAC/BB and host PCIe/USB interface embedded
+ * into a single-chip and require less programming.
+ *
+ * The following single-chips exist with a respective embedded radio:
+ *
+ * AR9280 - 11n dual-band 2x2 MIMO for PCIe
+ * AR9281 - 11n single-band 1x2 MIMO for PCIe
+ * AR9285 - 11n single-band 1x1 for PCIe
+ * AR9287 - 11n single-band 2x2 MIMO for PCIe
+ *
+ * AR9220 - 11n dual-band 2x2 MIMO for PCI
+ * AR9223 - 11n single-band 2x2 MIMO for PCI
+ *
+ * AR9287 - 11n single-band 1x1 MIMO for USB
+ */
 
-void
-ath9k_hw_write_regs(struct ath_hw *ah, u32 modesIndex, u32 freqIndex,
-		    int regWrites)
-{
-	REG_WRITE_ARRAY(&ah->iniBB_RfGain, freqIndex, regWrites);
-}
+#include "hw.h"
 
-bool
-ath9k_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
+/**
+ * ath9k_hw_write_regs - ??
+ *
+ * @ah: atheros hardware structure
+ * @freqIndex:
+ * @regWrites:
+ *
+ * Used for both the chipsets with an external AR2133/AR5133 radios and
+ * single-chip devices.
+ */
+void ath9k_hw_write_regs(struct ath_hw *ah, u32 freqIndex, int regWrites)
 {
-	u32 channelSel = 0;
-	u32 bModeSynth = 0;
-	u32 aModeRefSel = 0;
-	u32 reg32 = 0;
-	u16 freq;
-	struct chan_centers centers;
-
-	ath9k_hw_get_channel_centers(ah, chan, &centers);
-	freq = centers.synth_center;
-
-	if (freq < 4800) {
-		u32 txctl;
-
-		if (((freq - 2192) % 5) == 0) {
-			channelSel = ((freq - 672) * 2 - 3040) / 10;
-			bModeSynth = 0;
-		} else if (((freq - 2224) % 5) == 0) {
-			channelSel = ((freq - 704) * 2 - 3040) / 10;
-			bModeSynth = 1;
-		} else {
-			DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-				"Invalid channel %u MHz\n", freq);
-			return false;
-		}
-
-		channelSel = (channelSel << 2) & 0xff;
-		channelSel = ath9k_hw_reverse_bits(channelSel, 8);
-
-		txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
-		if (freq == 2484) {
-
-			REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
-				  txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
-		} else {
-			REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
-				  txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
-		}
-
-	} else if ((freq % 20) == 0 && freq >= 5120) {
-		channelSel =
-		    ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8);
-		aModeRefSel = ath9k_hw_reverse_bits(1, 2);
-	} else if ((freq % 10) == 0) {
-		channelSel =
-		    ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8);
-		if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
-			aModeRefSel = ath9k_hw_reverse_bits(2, 2);
-		else
-			aModeRefSel = ath9k_hw_reverse_bits(1, 2);
-	} else if ((freq % 5) == 0) {
-		channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8);
-		aModeRefSel = ath9k_hw_reverse_bits(1, 2);
-	} else {
-		DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-			"Invalid channel %u MHz\n", freq);
-		return false;
-	}
-
-	reg32 =
-	    (channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) |
-	    (1 << 5) | 0x1;
-
-	REG_WRITE(ah, AR_PHY(0x37), reg32);
-
-	ah->curchan = chan;
-	ah->curchan_rad_index = -1;
-
-	return true;
+	REG_WRITE_ARRAY(&ah->iniBB_RfGain, freqIndex, regWrites);
 }
 
-void ath9k_hw_ar9280_set_channel(struct ath_hw *ah,
-				 struct ath9k_channel *chan)
+/**
+ * ath9k_hw_ar9280_set_channel - set channel on single-chip device
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * This is the function to change channel on single-chip devices, that is
+ * all devices after ar9280.
+ *
+ * This function takes the channel value in MHz and sets
+ * hardware channel value. Assumes writes have been enabled to analog bus.
+ *
+ * Actual Expression,
+ *
+ * For 2GHz channel,
+ * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
+ * (freq_ref = 40MHz)
+ *
+ * For 5GHz channel,
+ * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10)
+ * (freq_ref = 40MHz/(24>>amodeRefSel))
+ */
+int ath9k_hw_ar9280_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
 {
 	u16 bMode, fracMode, aModeRefSel = 0;
 	u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0;
@@ -110,22 +90,34 @@ void ath9k_hw_ar9280_set_channel(struct ath_hw *ah,
 	reg32 = REG_READ(ah, AR_PHY_SYNTH_CONTROL);
 	reg32 &= 0xc0000000;
 
-	if (freq < 4800) {
+	if (freq < 4800) { /* 2 GHz, fractional mode */
 		u32 txctl;
+		int regWrites = 0;
 
 		bMode = 1;
 		fracMode = 1;
 		aModeRefSel = 0;
 		channelSel = (freq * 0x10000) / 15;
 
-		txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
-		if (freq == 2484) {
-
-			REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
-				  txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
+		if (AR_SREV_9287_11_OR_LATER(ah)) {
+			if (freq == 2484) {
+				/* Enable channel spreading for channel 14 */
+				REG_WRITE_ARRAY(&ah->iniCckfirJapan2484,
+						1, regWrites);
+			} else {
+				REG_WRITE_ARRAY(&ah->iniCckfirNormal,
+						1, regWrites);
+			}
 		} else {
-			REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
-				  txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
+			txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
+			if (freq == 2484) {
+				/* Enable channel spreading for channel 14 */
+				REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+					  txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
+			} else {
+				REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+					  txctl &~ AR_PHY_CCK_TX_CTRL_JAPAN);
+			}
 		}
 	} else {
 		bMode = 0;
@@ -143,10 +135,15 @@ void ath9k_hw_ar9280_set_channel(struct ath_hw *ah,
 		case 1:
 		default:
 			aModeRefSel = 0;
+			/*
+			 * Enable 2G (fractional) mode for channels
+			 * which are 5MHz spaced.
+			 */
 			fracMode = 1;
 			refDivA = 1;
 			channelSel = (freq * 0x8000) / 15;
 
+			/* RefDivA setting */
 			REG_RMW_FIELD(ah, AR_AN_SYNTH9,
 				      AR_AN_SYNTH9_REFDIVA, refDivA);
 
@@ -168,12 +165,284 @@ void ath9k_hw_ar9280_set_channel(struct ath_hw *ah,
 
 	ah->curchan = chan;
 	ah->curchan_rad_index = -1;
+
+	return 0;
+}
+
+/**
+ * ath9k_hw_9280_spur_mitigate - convert baseband spur frequency
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * For single-chip solutions. Converts to baseband spur frequency given the
+ * input channel frequency and compute register settings below.
+ */
+void ath9k_hw_9280_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+	int bb_spur = AR_NO_SPUR;
+	int freq;
+	int bin, cur_bin;
+	int bb_spur_off, spur_subchannel_sd;
+	int spur_freq_sd;
+	int spur_delta_phase;
+	int denominator;
+	int upper, lower, cur_vit_mask;
+	int tmp, newVal;
+	int i;
+	int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
+			  AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
+	};
+	int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
+			 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
+	};
+	int inc[4] = { 0, 100, 0, 0 };
+	struct chan_centers centers;
+
+	int8_t mask_m[123];
+	int8_t mask_p[123];
+	int8_t mask_amt;
+	int tmp_mask;
+	int cur_bb_spur;
+	bool is2GHz = IS_CHAN_2GHZ(chan);
+
+	memset(&mask_m, 0, sizeof(int8_t) * 123);
+	memset(&mask_p, 0, sizeof(int8_t) * 123);
+
+	ath9k_hw_get_channel_centers(ah, chan, &centers);
+	freq = centers.synth_center;
+
+	ah->config.spurmode = SPUR_ENABLE_EEPROM;
+	for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
+		cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
+
+		if (is2GHz)
+			cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ;
+		else
+			cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ;
+
+		if (AR_NO_SPUR == cur_bb_spur)
+			break;
+		cur_bb_spur = cur_bb_spur - freq;
+
+		if (IS_CHAN_HT40(chan)) {
+			if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) &&
+			    (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) {
+				bb_spur = cur_bb_spur;
+				break;
+			}
+		} else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) &&
+			   (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) {
+			bb_spur = cur_bb_spur;
+			break;
+		}
+	}
+
+	if (AR_NO_SPUR == bb_spur) {
+		REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
+			    AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
+		return;
+	} else {
+		REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
+			    AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
+	}
+
+	bin = bb_spur * 320;
+
+	tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
+
+	newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
+			AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
+			AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
+			AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
+	REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), newVal);
+
+	newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
+		  AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
+		  AR_PHY_SPUR_REG_MASK_RATE_SELECT |
+		  AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
+		  SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
+	REG_WRITE(ah, AR_PHY_SPUR_REG, newVal);
+
+	if (IS_CHAN_HT40(chan)) {
+		if (bb_spur < 0) {
+			spur_subchannel_sd = 1;
+			bb_spur_off = bb_spur + 10;
+		} else {
+			spur_subchannel_sd = 0;
+			bb_spur_off = bb_spur - 10;
+		}
+	} else {
+		spur_subchannel_sd = 0;
+		bb_spur_off = bb_spur;
+	}
+
+	if (IS_CHAN_HT40(chan))
+		spur_delta_phase =
+			((bb_spur * 262144) /
+			 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
+	else
+		spur_delta_phase =
+			((bb_spur * 524288) /
+			 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
+
+	denominator = IS_CHAN_2GHZ(chan) ? 44 : 40;
+	spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff;
+
+	newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
+		  SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
+		  SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
+	REG_WRITE(ah, AR_PHY_TIMING11, newVal);
+
+	newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
+	REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
+
+	cur_bin = -6000;
+	upper = bin + 100;
+	lower = bin - 100;
+
+	for (i = 0; i < 4; i++) {
+		int pilot_mask = 0;
+		int chan_mask = 0;
+		int bp = 0;
+		for (bp = 0; bp < 30; bp++) {
+			if ((cur_bin > lower) && (cur_bin < upper)) {
+				pilot_mask = pilot_mask | 0x1 << bp;
+				chan_mask = chan_mask | 0x1 << bp;
+			}
+			cur_bin += 100;
+		}
+		cur_bin += inc[i];
+		REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
+		REG_WRITE(ah, chan_mask_reg[i], chan_mask);
+	}
+
+	cur_vit_mask = 6100;
+	upper = bin + 120;
+	lower = bin - 120;
+
+	for (i = 0; i < 123; i++) {
+		if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
+
+			/* workaround for gcc bug #37014 */
+			volatile int tmp_v = abs(cur_vit_mask - bin);
+
+			if (tmp_v < 75)
+				mask_amt = 1;
+			else
+				mask_amt = 0;
+			if (cur_vit_mask < 0)
+				mask_m[abs(cur_vit_mask / 100)] = mask_amt;
+			else
+				mask_p[cur_vit_mask / 100] = mask_amt;
+		}
+		cur_vit_mask -= 100;
+	}
+
+	tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
+		| (mask_m[48] << 26) | (mask_m[49] << 24)
+		| (mask_m[50] << 22) | (mask_m[51] << 20)
+		| (mask_m[52] << 18) | (mask_m[53] << 16)
+		| (mask_m[54] << 14) | (mask_m[55] << 12)
+		| (mask_m[56] << 10) | (mask_m[57] << 8)
+		| (mask_m[58] << 6) | (mask_m[59] << 4)
+		| (mask_m[60] << 2) | (mask_m[61] << 0);
+	REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
+	REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
+
+	tmp_mask = (mask_m[31] << 28)
+		| (mask_m[32] << 26) | (mask_m[33] << 24)
+		| (mask_m[34] << 22) | (mask_m[35] << 20)
+		| (mask_m[36] << 18) | (mask_m[37] << 16)
+		| (mask_m[48] << 14) | (mask_m[39] << 12)
+		| (mask_m[40] << 10) | (mask_m[41] << 8)
+		| (mask_m[42] << 6) | (mask_m[43] << 4)
+		| (mask_m[44] << 2) | (mask_m[45] << 0);
+	REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
+	REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
+
+	tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
+		| (mask_m[18] << 26) | (mask_m[18] << 24)
+		| (mask_m[20] << 22) | (mask_m[20] << 20)
+		| (mask_m[22] << 18) | (mask_m[22] << 16)
+		| (mask_m[24] << 14) | (mask_m[24] << 12)
+		| (mask_m[25] << 10) | (mask_m[26] << 8)
+		| (mask_m[27] << 6) | (mask_m[28] << 4)
+		| (mask_m[29] << 2) | (mask_m[30] << 0);
+	REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
+	REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
+
+	tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
+		| (mask_m[2] << 26) | (mask_m[3] << 24)
+		| (mask_m[4] << 22) | (mask_m[5] << 20)
+		| (mask_m[6] << 18) | (mask_m[7] << 16)
+		| (mask_m[8] << 14) | (mask_m[9] << 12)
+		| (mask_m[10] << 10) | (mask_m[11] << 8)
+		| (mask_m[12] << 6) | (mask_m[13] << 4)
+		| (mask_m[14] << 2) | (mask_m[15] << 0);
+	REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
+	REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
+
+	tmp_mask = (mask_p[15] << 28)
+		| (mask_p[14] << 26) | (mask_p[13] << 24)
+		| (mask_p[12] << 22) | (mask_p[11] << 20)
+		| (mask_p[10] << 18) | (mask_p[9] << 16)
+		| (mask_p[8] << 14) | (mask_p[7] << 12)
+		| (mask_p[6] << 10) | (mask_p[5] << 8)
+		| (mask_p[4] << 6) | (mask_p[3] << 4)
+		| (mask_p[2] << 2) | (mask_p[1] << 0);
+	REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
+	REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
+
+	tmp_mask = (mask_p[30] << 28)
+		| (mask_p[29] << 26) | (mask_p[28] << 24)
+		| (mask_p[27] << 22) | (mask_p[26] << 20)
+		| (mask_p[25] << 18) | (mask_p[24] << 16)
+		| (mask_p[23] << 14) | (mask_p[22] << 12)
+		| (mask_p[21] << 10) | (mask_p[20] << 8)
+		| (mask_p[19] << 6) | (mask_p[18] << 4)
+		| (mask_p[17] << 2) | (mask_p[16] << 0);
+	REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
+	REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
+
+	tmp_mask = (mask_p[45] << 28)
+		| (mask_p[44] << 26) | (mask_p[43] << 24)
+		| (mask_p[42] << 22) | (mask_p[41] << 20)
+		| (mask_p[40] << 18) | (mask_p[39] << 16)
+		| (mask_p[38] << 14) | (mask_p[37] << 12)
+		| (mask_p[36] << 10) | (mask_p[35] << 8)
+		| (mask_p[34] << 6) | (mask_p[33] << 4)
+		| (mask_p[32] << 2) | (mask_p[31] << 0);
+	REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
+	REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
+
+	tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
+		| (mask_p[59] << 26) | (mask_p[58] << 24)
+		| (mask_p[57] << 22) | (mask_p[56] << 20)
+		| (mask_p[55] << 18) | (mask_p[54] << 16)
+		| (mask_p[53] << 14) | (mask_p[52] << 12)
+		| (mask_p[51] << 10) | (mask_p[50] << 8)
+		| (mask_p[49] << 6) | (mask_p[48] << 4)
+		| (mask_p[47] << 2) | (mask_p[46] << 0);
+	REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
+	REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
 }
 
-static void
-ath9k_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32,
-			   u32 numBits, u32 firstBit,
-			   u32 column)
+/* All code below is for non single-chip solutions */
+
+/**
+ * ath9k_phy_modify_rx_buffer() - perform analog swizzling of parameters
+ * @rfbuf:
+ * @reg32:
+ * @numBits:
+ * @firstBit:
+ * @column:
+ *
+ * Performs analog "swizzling" of parameters into their location.
+ * Used on external AR2133/AR5133 radios.
+ */
+static void ath9k_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32,
+				       u32 numBits, u32 firstBit,
+				       u32 column)
 {
 	u32 tmp32, mask, arrayEntry, lastBit;
 	int32_t bitPosition, bitsLeft;
@@ -197,26 +466,466 @@ ath9k_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32,
 	}
 }
 
-bool
-ath9k_hw_set_rf_regs(struct ath_hw *ah, struct ath9k_channel *chan,
-		     u16 modesIndex)
+/*
+ * Fix on 2.4 GHz band for orientation sensitivity issue by increasing
+ * rf_pwd_icsyndiv.
+ *
+ * Theoretical Rules:
+ *   if 2 GHz band
+ *      if forceBiasAuto
+ *         if synth_freq < 2412
+ *            bias = 0
+ *         else if 2412 <= synth_freq <= 2422
+ *            bias = 1
+ *         else // synth_freq > 2422
+ *            bias = 2
+ *      else if forceBias > 0
+ *         bias = forceBias & 7
+ *      else
+ *         no change, use value from ini file
+ *   else
+ *      no change, invalid band
+ *
+ *  1st Mod:
+ *    2422 also uses value of 2
+ *    <approved>
+ *
+ *  2nd Mod:
+ *    Less than 2412 uses value of 0, 2412 and above uses value of 2
+ */
+static void ath9k_hw_force_bias(struct ath_hw *ah, u16 synth_freq)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+	u32 tmp_reg;
+	int reg_writes = 0;
+	u32 new_bias = 0;
+
+	if (!AR_SREV_5416(ah) || synth_freq >= 3000) {
+		return;
+	}
+
+	BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
+
+	if (synth_freq < 2412)
+		new_bias = 0;
+	else if (synth_freq < 2422)
+		new_bias = 1;
+	else
+		new_bias = 2;
+
+	/* pre-reverse this field */
+	tmp_reg = ath9k_hw_reverse_bits(new_bias, 3);
+
+	ath_print(common, ATH_DBG_CONFIG,
+		  "Force rf_pwd_icsyndiv to %1d on %4d\n",
+		  new_bias, synth_freq);
+
+	/* swizzle rf_pwd_icsyndiv */
+	ath9k_phy_modify_rx_buffer(ah->analogBank6Data, tmp_reg, 3, 181, 3);
+
+	/* write Bank 6 with new params */
+	REG_WRITE_RF_ARRAY(&ah->iniBank6, ah->analogBank6Data, reg_writes);
+}
+
+/**
+ * ath9k_hw_set_channel - tune to a channel on the external AR2133/AR5133 radios
+ * @ah: atheros hardware stucture
+ * @chan:
+ *
+ * For the external AR2133/AR5133 radios, takes the MHz channel value and set
+ * the channel value. Assumes writes enabled to analog bus and bank6 register
+ * cache in ah->analogBank6Data.
+ */
+int ath9k_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+	u32 channelSel = 0;
+	u32 bModeSynth = 0;
+	u32 aModeRefSel = 0;
+	u32 reg32 = 0;
+	u16 freq;
+	struct chan_centers centers;
+
+	ath9k_hw_get_channel_centers(ah, chan, &centers);
+	freq = centers.synth_center;
+
+	if (freq < 4800) {
+		u32 txctl;
+
+		if (((freq - 2192) % 5) == 0) {
+			channelSel = ((freq - 672) * 2 - 3040) / 10;
+			bModeSynth = 0;
+		} else if (((freq - 2224) % 5) == 0) {
+			channelSel = ((freq - 704) * 2 - 3040) / 10;
+			bModeSynth = 1;
+		} else {
+			ath_print(common, ATH_DBG_FATAL,
+				  "Invalid channel %u MHz\n", freq);
+			return -EINVAL;
+		}
+
+		channelSel = (channelSel << 2) & 0xff;
+		channelSel = ath9k_hw_reverse_bits(channelSel, 8);
+
+		txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
+		if (freq == 2484) {
+
+			REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+				  txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
+		} else {
+			REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+				  txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
+		}
+
+	} else if ((freq % 20) == 0 && freq >= 5120) {
+		channelSel =
+		    ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8);
+		aModeRefSel = ath9k_hw_reverse_bits(1, 2);
+	} else if ((freq % 10) == 0) {
+		channelSel =
+		    ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8);
+		if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
+			aModeRefSel = ath9k_hw_reverse_bits(2, 2);
+		else
+			aModeRefSel = ath9k_hw_reverse_bits(1, 2);
+	} else if ((freq % 5) == 0) {
+		channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8);
+		aModeRefSel = ath9k_hw_reverse_bits(1, 2);
+	} else {
+		ath_print(common, ATH_DBG_FATAL,
+			  "Invalid channel %u MHz\n", freq);
+		return -EINVAL;
+	}
+
+	ath9k_hw_force_bias(ah, freq);
+
+	reg32 =
+	    (channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) |
+	    (1 << 5) | 0x1;
+
+	REG_WRITE(ah, AR_PHY(0x37), reg32);
+
+	ah->curchan = chan;
+	ah->curchan_rad_index = -1;
+
+	return 0;
+}
+
+/**
+ * ath9k_hw_spur_mitigate - convert baseband spur frequency for external radios
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * For non single-chip solutions. Converts to baseband spur frequency given the
+ * input channel frequency and compute register settings below.
+ */
+void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+	int bb_spur = AR_NO_SPUR;
+	int bin, cur_bin;
+	int spur_freq_sd;
+	int spur_delta_phase;
+	int denominator;
+	int upper, lower, cur_vit_mask;
+	int tmp, new;
+	int i;
+	int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
+			  AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
+	};
+	int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
+			 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
+	};
+	int inc[4] = { 0, 100, 0, 0 };
+
+	int8_t mask_m[123];
+	int8_t mask_p[123];
+	int8_t mask_amt;
+	int tmp_mask;
+	int cur_bb_spur;
+	bool is2GHz = IS_CHAN_2GHZ(chan);
+
+	memset(&mask_m, 0, sizeof(int8_t) * 123);
+	memset(&mask_p, 0, sizeof(int8_t) * 123);
+
+	for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
+		cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
+		if (AR_NO_SPUR == cur_bb_spur)
+			break;
+		cur_bb_spur = cur_bb_spur - (chan->channel * 10);
+		if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
+			bb_spur = cur_bb_spur;
+			break;
+		}
+	}
+
+	if (AR_NO_SPUR == bb_spur)
+		return;
+
+	bin = bb_spur * 32;
+
+	tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
+	new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
+		     AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
+		     AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
+		     AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
+
+	REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
+
+	new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
+	       AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
+	       AR_PHY_SPUR_REG_MASK_RATE_SELECT |
+	       AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
+	       SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
+	REG_WRITE(ah, AR_PHY_SPUR_REG, new);
+
+	spur_delta_phase = ((bb_spur * 524288) / 100) &
+		AR_PHY_TIMING11_SPUR_DELTA_PHASE;
+
+	denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
+	spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
+
+	new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
+	       SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
+	       SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
+	REG_WRITE(ah, AR_PHY_TIMING11, new);
+
+	cur_bin = -6000;
+	upper = bin + 100;
+	lower = bin - 100;
+
+	for (i = 0; i < 4; i++) {
+		int pilot_mask = 0;
+		int chan_mask = 0;
+		int bp = 0;
+		for (bp = 0; bp < 30; bp++) {
+			if ((cur_bin > lower) && (cur_bin < upper)) {
+				pilot_mask = pilot_mask | 0x1 << bp;
+				chan_mask = chan_mask | 0x1 << bp;
+			}
+			cur_bin += 100;
+		}
+		cur_bin += inc[i];
+		REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
+		REG_WRITE(ah, chan_mask_reg[i], chan_mask);
+	}
+
+	cur_vit_mask = 6100;
+	upper = bin + 120;
+	lower = bin - 120;
+
+	for (i = 0; i < 123; i++) {
+		if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
+
+			/* workaround for gcc bug #37014 */
+			volatile int tmp_v = abs(cur_vit_mask - bin);
+
+			if (tmp_v < 75)
+				mask_amt = 1;
+			else
+				mask_amt = 0;
+			if (cur_vit_mask < 0)
+				mask_m[abs(cur_vit_mask / 100)] = mask_amt;
+			else
+				mask_p[cur_vit_mask / 100] = mask_amt;
+		}
+		cur_vit_mask -= 100;
+	}
+
+	tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
+		| (mask_m[48] << 26) | (mask_m[49] << 24)
+		| (mask_m[50] << 22) | (mask_m[51] << 20)
+		| (mask_m[52] << 18) | (mask_m[53] << 16)
+		| (mask_m[54] << 14) | (mask_m[55] << 12)
+		| (mask_m[56] << 10) | (mask_m[57] << 8)
+		| (mask_m[58] << 6) | (mask_m[59] << 4)
+		| (mask_m[60] << 2) | (mask_m[61] << 0);
+	REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
+	REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
+
+	tmp_mask = (mask_m[31] << 28)
+		| (mask_m[32] << 26) | (mask_m[33] << 24)
+		| (mask_m[34] << 22) | (mask_m[35] << 20)
+		| (mask_m[36] << 18) | (mask_m[37] << 16)
+		| (mask_m[48] << 14) | (mask_m[39] << 12)
+		| (mask_m[40] << 10) | (mask_m[41] << 8)
+		| (mask_m[42] << 6) | (mask_m[43] << 4)
+		| (mask_m[44] << 2) | (mask_m[45] << 0);
+	REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
+	REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
+
+	tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
+		| (mask_m[18] << 26) | (mask_m[18] << 24)
+		| (mask_m[20] << 22) | (mask_m[20] << 20)
+		| (mask_m[22] << 18) | (mask_m[22] << 16)
+		| (mask_m[24] << 14) | (mask_m[24] << 12)
+		| (mask_m[25] << 10) | (mask_m[26] << 8)
+		| (mask_m[27] << 6) | (mask_m[28] << 4)
+		| (mask_m[29] << 2) | (mask_m[30] << 0);
+	REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
+	REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
+
+	tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
+		| (mask_m[2] << 26) | (mask_m[3] << 24)
+		| (mask_m[4] << 22) | (mask_m[5] << 20)
+		| (mask_m[6] << 18) | (mask_m[7] << 16)
+		| (mask_m[8] << 14) | (mask_m[9] << 12)
+		| (mask_m[10] << 10) | (mask_m[11] << 8)
+		| (mask_m[12] << 6) | (mask_m[13] << 4)
+		| (mask_m[14] << 2) | (mask_m[15] << 0);
+	REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
+	REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
+
+	tmp_mask = (mask_p[15] << 28)
+		| (mask_p[14] << 26) | (mask_p[13] << 24)
+		| (mask_p[12] << 22) | (mask_p[11] << 20)
+		| (mask_p[10] << 18) | (mask_p[9] << 16)
+		| (mask_p[8] << 14) | (mask_p[7] << 12)
+		| (mask_p[6] << 10) | (mask_p[5] << 8)
+		| (mask_p[4] << 6) | (mask_p[3] << 4)
+		| (mask_p[2] << 2) | (mask_p[1] << 0);
+	REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
+	REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
+
+	tmp_mask = (mask_p[30] << 28)
+		| (mask_p[29] << 26) | (mask_p[28] << 24)
+		| (mask_p[27] << 22) | (mask_p[26] << 20)
+		| (mask_p[25] << 18) | (mask_p[24] << 16)
+		| (mask_p[23] << 14) | (mask_p[22] << 12)
+		| (mask_p[21] << 10) | (mask_p[20] << 8)
+		| (mask_p[19] << 6) | (mask_p[18] << 4)
+		| (mask_p[17] << 2) | (mask_p[16] << 0);
+	REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
+	REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
+
+	tmp_mask = (mask_p[45] << 28)
+		| (mask_p[44] << 26) | (mask_p[43] << 24)
+		| (mask_p[42] << 22) | (mask_p[41] << 20)
+		| (mask_p[40] << 18) | (mask_p[39] << 16)
+		| (mask_p[38] << 14) | (mask_p[37] << 12)
+		| (mask_p[36] << 10) | (mask_p[35] << 8)
+		| (mask_p[34] << 6) | (mask_p[33] << 4)
+		| (mask_p[32] << 2) | (mask_p[31] << 0);
+	REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
+	REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
+
+	tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
+		| (mask_p[59] << 26) | (mask_p[58] << 24)
+		| (mask_p[57] << 22) | (mask_p[56] << 20)
+		| (mask_p[55] << 18) | (mask_p[54] << 16)
+		| (mask_p[53] << 14) | (mask_p[52] << 12)
+		| (mask_p[51] << 10) | (mask_p[50] << 8)
+		| (mask_p[49] << 6) | (mask_p[48] << 4)
+		| (mask_p[47] << 2) | (mask_p[46] << 0);
+	REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
+	REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
+}
+
+/**
+ * ath9k_hw_rf_alloc_ext_banks - allocates banks for external radio programming
+ * @ah: atheros hardware structure
+ *
+ * Only required for older devices with external AR2133/AR5133 radios.
+ */
+int ath9k_hw_rf_alloc_ext_banks(struct ath_hw *ah)
+{
+#define ATH_ALLOC_BANK(bank, size) do { \
+		bank = kzalloc((sizeof(u32) * size), GFP_KERNEL); \
+		if (!bank) { \
+			ath_print(common, ATH_DBG_FATAL, \
+				  "Cannot allocate RF banks\n"); \
+			return -ENOMEM; \
+		} \
+	} while (0);
+
+	struct ath_common *common = ath9k_hw_common(ah);
+
+	BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
+
+	ATH_ALLOC_BANK(ah->analogBank0Data, ah->iniBank0.ia_rows);
+	ATH_ALLOC_BANK(ah->analogBank1Data, ah->iniBank1.ia_rows);
+	ATH_ALLOC_BANK(ah->analogBank2Data, ah->iniBank2.ia_rows);
+	ATH_ALLOC_BANK(ah->analogBank3Data, ah->iniBank3.ia_rows);
+	ATH_ALLOC_BANK(ah->analogBank6Data, ah->iniBank6.ia_rows);
+	ATH_ALLOC_BANK(ah->analogBank6TPCData, ah->iniBank6TPC.ia_rows);
+	ATH_ALLOC_BANK(ah->analogBank7Data, ah->iniBank7.ia_rows);
+	ATH_ALLOC_BANK(ah->addac5416_21,
+		       ah->iniAddac.ia_rows * ah->iniAddac.ia_columns);
+	ATH_ALLOC_BANK(ah->bank6Temp, ah->iniBank6.ia_rows);
+
+	return 0;
+#undef ATH_ALLOC_BANK
+}
+
+
+/**
+ * ath9k_hw_rf_free_ext_banks - Free memory for analog bank scratch buffers
+ * @ah: atheros hardware struture
+ * For the external AR2133/AR5133 radios banks.
+ */
+void
+ath9k_hw_rf_free_ext_banks(struct ath_hw *ah)
+{
+#define ATH_FREE_BANK(bank) do { \
+		kfree(bank); \
+		bank = NULL; \
+	} while (0);
+
+	BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
+
+	ATH_FREE_BANK(ah->analogBank0Data);
+	ATH_FREE_BANK(ah->analogBank1Data);
+	ATH_FREE_BANK(ah->analogBank2Data);
+	ATH_FREE_BANK(ah->analogBank3Data);
+	ATH_FREE_BANK(ah->analogBank6Data);
+	ATH_FREE_BANK(ah->analogBank6TPCData);
+	ATH_FREE_BANK(ah->analogBank7Data);
+	ATH_FREE_BANK(ah->addac5416_21);
+	ATH_FREE_BANK(ah->bank6Temp);
+
+#undef ATH_FREE_BANK
+}
+
+/* *
+ * ath9k_hw_set_rf_regs - programs rf registers based on EEPROM
+ * @ah: atheros hardware structure
+ * @chan:
+ * @modesIndex:
+ *
+ * Used for the external AR2133/AR5133 radios.
+ *
+ * Reads the EEPROM header info from the device structure and programs
+ * all rf registers. This routine requires access to the analog
+ * rf device. This is not required for single-chip devices.
+ */
+bool ath9k_hw_set_rf_regs(struct ath_hw *ah, struct ath9k_channel *chan,
+			  u16 modesIndex)
 {
 	u32 eepMinorRev;
 	u32 ob5GHz = 0, db5GHz = 0;
 	u32 ob2GHz = 0, db2GHz = 0;
 	int regWrites = 0;
 
+	/*
+	 * Software does not need to program bank data
+	 * for single chip devices, that is AR9280 or anything
+	 * after that.
+	 */
 	if (AR_SREV_9280_10_OR_LATER(ah))
 		return true;
 
+	/* Setup rf parameters */
 	eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV);
 
+	/* Setup Bank 0 Write */
 	RF_BANK_SETUP(ah->analogBank0Data, &ah->iniBank0, 1);
 
+	/* Setup Bank 1 Write */
 	RF_BANK_SETUP(ah->analogBank1Data, &ah->iniBank1, 1);
 
+	/* Setup Bank 2 Write */
 	RF_BANK_SETUP(ah->analogBank2Data, &ah->iniBank2, 1);
 
+	/* Setup Bank 6 Write */
 	RF_BANK_SETUP(ah->analogBank3Data, &ah->iniBank3,
 		      modesIndex);
 	{
@@ -227,6 +936,7 @@ ath9k_hw_set_rf_regs(struct ath_hw *ah, struct ath9k_channel *chan,
 		}
 	}
 
+	/* Only the 5 or 2 GHz OB/DB need to be set for a mode */
 	if (eepMinorRev >= 2) {
 		if (IS_CHAN_2GHZ(chan)) {
 			ob2GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_2);
@@ -245,8 +955,10 @@ ath9k_hw_set_rf_regs(struct ath_hw *ah, struct ath9k_channel *chan,
 		}
 	}
 
+	/* Setup Bank 7 Setup */
 	RF_BANK_SETUP(ah->analogBank7Data, &ah->iniBank7, 1);
 
+	/* Write Analog registers */
 	REG_WRITE_RF_ARRAY(&ah->iniBank0, ah->analogBank0Data,
 			   regWrites);
 	REG_WRITE_RF_ARRAY(&ah->iniBank1, ah->analogBank1Data,
@@ -262,137 +974,3 @@ ath9k_hw_set_rf_regs(struct ath_hw *ah, struct ath9k_channel *chan,
 
 	return true;
 }
-
-void
-ath9k_hw_rf_free(struct ath_hw *ah)
-{
-#define ATH_FREE_BANK(bank) do { \
-		kfree(bank); \
-		bank = NULL; \
-	} while (0);
-
-	ATH_FREE_BANK(ah->analogBank0Data);
-	ATH_FREE_BANK(ah->analogBank1Data);
-	ATH_FREE_BANK(ah->analogBank2Data);
-	ATH_FREE_BANK(ah->analogBank3Data);
-	ATH_FREE_BANK(ah->analogBank6Data);
-	ATH_FREE_BANK(ah->analogBank6TPCData);
-	ATH_FREE_BANK(ah->analogBank7Data);
-	ATH_FREE_BANK(ah->addac5416_21);
-	ATH_FREE_BANK(ah->bank6Temp);
-#undef ATH_FREE_BANK
-}
-
-bool ath9k_hw_init_rf(struct ath_hw *ah, int *status)
-{
-	if (!AR_SREV_9280_10_OR_LATER(ah)) {
-		ah->analogBank0Data =
-		    kzalloc((sizeof(u32) *
-			     ah->iniBank0.ia_rows), GFP_KERNEL);
-		ah->analogBank1Data =
-		    kzalloc((sizeof(u32) *
-			     ah->iniBank1.ia_rows), GFP_KERNEL);
-		ah->analogBank2Data =
-		    kzalloc((sizeof(u32) *
-			     ah->iniBank2.ia_rows), GFP_KERNEL);
-		ah->analogBank3Data =
-		    kzalloc((sizeof(u32) *
-			     ah->iniBank3.ia_rows), GFP_KERNEL);
-		ah->analogBank6Data =
-		    kzalloc((sizeof(u32) *
-			     ah->iniBank6.ia_rows), GFP_KERNEL);
-		ah->analogBank6TPCData =
-		    kzalloc((sizeof(u32) *
-			     ah->iniBank6TPC.ia_rows), GFP_KERNEL);
-		ah->analogBank7Data =
-		    kzalloc((sizeof(u32) *
-			     ah->iniBank7.ia_rows), GFP_KERNEL);
-
-		if (ah->analogBank0Data == NULL
-		    || ah->analogBank1Data == NULL
-		    || ah->analogBank2Data == NULL
-		    || ah->analogBank3Data == NULL
-		    || ah->analogBank6Data == NULL
-		    || ah->analogBank6TPCData == NULL
-		    || ah->analogBank7Data == NULL) {
-			DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-				"Cannot allocate RF banks\n");
-			*status = -ENOMEM;
-			return false;
-		}
-
-		ah->addac5416_21 =
-		    kzalloc((sizeof(u32) *
-			     ah->iniAddac.ia_rows *
-			     ah->iniAddac.ia_columns), GFP_KERNEL);
-		if (ah->addac5416_21 == NULL) {
-			DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-				"Cannot allocate addac5416_21\n");
-			*status = -ENOMEM;
-			return false;
-		}
-
-		ah->bank6Temp =
-		    kzalloc((sizeof(u32) *
-			     ah->iniBank6.ia_rows), GFP_KERNEL);
-		if (ah->bank6Temp == NULL) {
-			DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
-				"Cannot allocate bank6Temp\n");
-			*status = -ENOMEM;
-			return false;
-		}
-	}
-
-	return true;
-}
-
-void
-ath9k_hw_decrease_chain_power(struct ath_hw *ah, struct ath9k_channel *chan)
-{
-	int i, regWrites = 0;
-	u32 bank6SelMask;
-	u32 *bank6Temp = ah->bank6Temp;
-
-	switch (ah->config.diversity_control) {
-	case ATH9K_ANT_FIXED_A:
-		bank6SelMask =
-		    (ah->config.antenna_switch_swap & ANTSWAP_AB) ?
-			REDUCE_CHAIN_0 : REDUCE_CHAIN_1;
-		break;
-	case ATH9K_ANT_FIXED_B:
-		bank6SelMask =
-		    (ah->config.antenna_switch_swap & ANTSWAP_AB) ?
-			REDUCE_CHAIN_1 : REDUCE_CHAIN_0;
-		break;
-	case ATH9K_ANT_VARIABLE:
-		return;
-		break;
-	default:
-		return;
-		break;
-	}
-
-	for (i = 0; i < ah->iniBank6.ia_rows; i++)
-		bank6Temp[i] = ah->analogBank6Data[i];
-
-	REG_WRITE(ah, AR_PHY_BASE + 0xD8, bank6SelMask);
-
-	ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 189, 0);
-	ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 190, 0);
-	ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 191, 0);
-	ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 192, 0);
-	ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 193, 0);
-	ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 222, 0);
-	ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 245, 0);
-	ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 246, 0);
-	ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 247, 0);
-
-	REG_WRITE_RF_ARRAY(&ah->iniBank6, bank6Temp, regWrites);
-
-	REG_WRITE(ah, AR_PHY_BASE + 0xD8, 0x00000053);
-#ifdef ALTER_SWITCH
-	REG_WRITE(ah, PHY_SWITCH_CHAIN_0,
-		  (REG_READ(ah, PHY_SWITCH_CHAIN_0) & ~0x38)
-		  | ((REG_READ(ah, PHY_SWITCH_CHAIN_0) >> 3) & 0x38));
-#endif
-}
diff --git a/drivers/net/wireless/ath/ath9k/phy.h b/drivers/net/wireless/ath/ath9k/phy.h
index dfda6f444648..31de27dc0c4a 100644
--- a/drivers/net/wireless/ath/ath9k/phy.h
+++ b/drivers/net/wireless/ath/ath9k/phy.h
@@ -17,20 +17,23 @@
 #ifndef PHY_H
 #define PHY_H
 
-void ath9k_hw_ar9280_set_channel(struct ath_hw *ah,
-				 struct ath9k_channel
-				 *chan);
-bool ath9k_hw_set_channel(struct ath_hw *ah,
-			  struct ath9k_channel *chan);
-void ath9k_hw_write_regs(struct ath_hw *ah, u32 modesIndex,
-			 u32 freqIndex, int regWrites);
+/* Common between single chip and non single-chip solutions */
+void ath9k_hw_write_regs(struct ath_hw *ah, u32 freqIndex, int regWrites);
+
+/* Single chip radio settings */
+int ath9k_hw_ar9280_set_channel(struct ath_hw *ah, struct ath9k_channel *chan);
+void ath9k_hw_9280_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan);
+
+/* Routines below are for non single-chip solutions */
+int ath9k_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan);
+void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan);
+
+int ath9k_hw_rf_alloc_ext_banks(struct ath_hw *ah);
+void ath9k_hw_rf_free_ext_banks(struct ath_hw *ah);
+
 bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
 			  struct ath9k_channel *chan,
 			  u16 modesIndex);
-void ath9k_hw_decrease_chain_power(struct ath_hw *ah,
-				   struct ath9k_channel *chan);
-bool ath9k_hw_init_rf(struct ath_hw *ah,
-		      int *status);
 
 #define AR_PHY_BASE     0x9800
 #define AR_PHY(_n)      (AR_PHY_BASE + ((_n)<<2))
@@ -45,6 +48,7 @@ bool ath9k_hw_init_rf(struct ath_hw *ah,
 #define AR_PHY_FC_DYN2040_EN        0x00000004
 #define AR_PHY_FC_DYN2040_PRI_ONLY  0x00000008
 #define AR_PHY_FC_DYN2040_PRI_CH    0x00000010
+/* For 25 MHz channel spacing -- not used but supported by hw */
 #define AR_PHY_FC_DYN2040_EXT_CH    0x00000020
 #define AR_PHY_FC_HT_EN             0x00000040
 #define AR_PHY_FC_SHORT_GI_40       0x00000080
@@ -185,8 +189,20 @@ bool ath9k_hw_init_rf(struct ath_hw *ah,
 #define AR_PHY_PLL_CTL_44_2133  0xeb
 #define AR_PHY_PLL_CTL_40_2133  0xea
 
-#define AR_PHY_SPECTRAL_SCAN		0x9912
-#define AR_PHY_SPECTRAL_SCAN_ENABLE	0x1
+#define AR_PHY_SPECTRAL_SCAN			0x9910  /* AR9280 spectral scan configuration register */
+#define	AR_PHY_SPECTRAL_SCAN_ENABLE		0x1
+#define AR_PHY_SPECTRAL_SCAN_ENA		0x00000001  /* Enable spectral scan, reg 68, bit 0 */
+#define AR_PHY_SPECTRAL_SCAN_ENA_S		0  /* Enable spectral scan, reg 68, bit 0 */
+#define AR_PHY_SPECTRAL_SCAN_ACTIVE		0x00000002  /* Activate spectral scan reg 68, bit 1*/
+#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S		1  /* Activate spectral scan reg 68, bit 1*/
+#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD		0x000000F0  /* Interval for FFT reports, reg 68, bits 4-7*/
+#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S	4
+#define AR_PHY_SPECTRAL_SCAN_PERIOD		0x0000FF00  /* Interval for FFT reports, reg 68, bits 8-15*/
+#define AR_PHY_SPECTRAL_SCAN_PERIOD_S		8
+#define AR_PHY_SPECTRAL_SCAN_COUNT		0x00FF0000  /* Number of reports, reg 68, bits 16-23*/
+#define AR_PHY_SPECTRAL_SCAN_COUNT_S		16
+#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT	0x01000000  /* Short repeat, reg 68, bit 24*/
+#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S	24  /* Short repeat, reg 68, bit 24*/
 
 #define AR_PHY_RX_DELAY           0x9914
 #define AR_PHY_SEARCH_START_DELAY 0x9918
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 1895d63aad0a..c915954d4d5b 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -19,133 +19,92 @@
 
 static const struct ath_rate_table ar5416_11na_ratetable = {
 	42,
+	8, /* MCS start */
 	{
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
-			5400, 0x0b, 0x00, 12,
-			0, 0, 0, 0, 0, 0 },
+			5400, 0, 12, 0, 0, 0, 0, 0 },
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
-			7800,  0x0f, 0x00, 18,
-			0, 1, 1, 1, 1, 0 },
+			7800,  1, 18, 0, 1, 1, 1, 1 },
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
-			10000, 0x0a, 0x00, 24,
-			2, 2, 2, 2, 2, 0 },
+			10000, 2, 24, 2, 2, 2, 2, 2 },
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
-			13900, 0x0e, 0x00, 36,
-			2,  3, 3, 3, 3, 0 },
+			13900, 3, 36, 2, 3, 3, 3, 3 },
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
-			17300, 0x09, 0x00, 48,
-			4,  4, 4, 4, 4, 0 },
+			17300, 4, 48, 4, 4, 4, 4, 4 },
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
-			23000, 0x0d, 0x00, 72,
-			4,  5, 5, 5, 5, 0 },
+			23000, 5, 72, 4, 5, 5, 5, 5 },
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
-			27400, 0x08, 0x00, 96,
-			4,  6, 6, 6, 6, 0 },
+			27400, 6, 96, 4, 6, 6, 6, 6 },
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
-			29300, 0x0c, 0x00, 108,
-			4,  7, 7, 7, 7, 0 },
+			29300, 7, 108, 4, 7, 7, 7, 7 },
 		{ VALID_2040, VALID_2040, WLAN_RC_PHY_HT_20_SS, 6500, /* 6.5 Mb */
-			6400, 0x80, 0x00, 0,
-			0, 8, 24, 8, 24, 3216 },
+			6400, 0, 0, 0, 8, 24, 8, 24 },
 		{ VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 13000, /* 13 Mb */
-			12700, 0x81, 0x00, 1,
-			2, 9, 25, 9, 25, 6434 },
+			12700, 1, 1, 2, 9, 25, 9, 25 },
 		{ VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 19500, /* 19.5 Mb */
-			18800, 0x82, 0x00, 2,
-			2, 10, 26, 10, 26, 9650 },
+			18800, 2, 2, 2, 10, 26, 10, 26 },
 		{ VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 26000, /* 26 Mb */
-			25000, 0x83, 0x00, 3,
-			4,  11, 27, 11, 27, 12868 },
+			25000, 3, 3, 4, 11, 27, 11, 27 },
 		{ VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 39000, /* 39 Mb */
-			36700, 0x84, 0x00, 4,
-			4,  12, 28, 12, 28, 19304 },
+			36700, 4, 4, 4, 12, 28, 12, 28 },
 		{ INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 52000, /* 52 Mb */
-			48100, 0x85, 0x00, 5,
-			4,  13, 29, 13, 29, 25740 },
+			48100, 5, 5, 4, 13, 29, 13, 29 },
 		{ INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 58500, /* 58.5 Mb */
-			53500, 0x86, 0x00, 6,
-			4,  14, 30, 14, 30,  28956 },
+			53500, 6, 6, 4, 14, 30, 14, 30 },
 		{ INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 65000, /* 65 Mb */
-			59000, 0x87, 0x00, 7,
-			4,  15, 31, 15, 32, 32180 },
+			59000, 7, 7, 4, 15, 31, 15, 32 },
 		{ INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 13000, /* 13 Mb */
-			12700, 0x88, 0x00,
-			8, 3, 16, 33, 16, 33, 6430 },
+			12700, 8, 8, 3, 16, 33, 16, 33 },
 		{ INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 26000, /* 26 Mb */
-			24800, 0x89, 0x00, 9,
-			2, 17, 34, 17, 34, 12860 },
+			24800, 9, 9, 2, 17, 34, 17, 34 },
 		{ INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 39000, /* 39 Mb */
-			36600, 0x8a, 0x00, 10,
-			2, 18, 35, 18, 35, 19300 },
+			36600, 10, 10, 2, 18, 35, 18, 35 },
 		{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 52000, /* 52 Mb */
-			48100, 0x8b, 0x00, 11,
-			4,  19, 36, 19, 36, 25736 },
+			48100, 11, 11, 4, 19, 36, 19, 36 },
 		{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 78000, /* 78 Mb */
-			69500, 0x8c, 0x00, 12,
-			4,  20, 37, 20, 37, 38600 },
+			69500, 12, 12, 4, 20, 37, 20, 37 },
 		{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 104000, /* 104 Mb */
-			89500, 0x8d, 0x00, 13,
-			4,  21, 38, 21, 38, 51472 },
+			89500, 13, 13, 4, 21, 38, 21, 38 },
 		{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 117000, /* 117 Mb */
-			98900, 0x8e, 0x00, 14,
-			4,  22, 39, 22, 39, 57890 },
+			98900, 14, 14, 4, 22, 39, 22, 39 },
 		{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 130000, /* 130 Mb */
-			108300, 0x8f, 0x00, 15,
-			4,  23, 40, 23, 41, 64320 },
+			108300, 15, 15, 4, 23, 40, 23, 41 },
 		{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 13500, /* 13.5 Mb */
-			13200, 0x80, 0x00, 0,
-			0, 8, 24, 24, 24, 6684 },
+			13200, 0, 0, 0, 8, 24, 24, 24 },
 		{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 27500, /* 27.0 Mb */
-			25900, 0x81, 0x00, 1,
-			2, 9, 25, 25, 25, 13368 },
+			25900, 1, 1, 2, 9, 25, 25, 25 },
 		{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 40500, /* 40.5 Mb */
-			38600, 0x82, 0x00, 2,
-			2, 10, 26, 26, 26, 20052 },
+			38600, 2, 2, 2, 10, 26, 26, 26 },
 		{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 54000, /* 54 Mb */
-			49800, 0x83, 0x00, 3,
-			4,  11, 27, 27, 27, 26738 },
+			49800, 3, 3, 4, 11, 27, 27, 27 },
 		{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 81500, /* 81 Mb */
-			72200, 0x84, 0x00, 4,
-			4,  12, 28, 28, 28, 40104 },
+			72200, 4, 4, 4, 12, 28, 28, 28 },
 		{ INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 108000, /* 108 Mb */
-			92900, 0x85, 0x00, 5,
-			4,  13, 29, 29, 29, 53476 },
+			92900, 5, 5, 4, 13, 29, 29, 29 },
 		{ INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 121500, /* 121.5 Mb */
-			102700, 0x86, 0x00, 6,
-			4,  14, 30, 30, 30, 60156 },
+			102700, 6, 6, 4, 14, 30, 30, 30 },
 		{ INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 135000, /* 135 Mb */
-			112000, 0x87, 0x00, 7,
-			4,  15, 31, 32, 32, 66840 },
+			112000, 7, 7, 4, 15, 31, 32, 32 },
 		{ INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000, /* 150 Mb */
-			122000, 0x87, 0x00, 7,
-			4,  15, 31, 32, 32, 74200 },
+			122000, 7, 7, 4, 15, 31, 32, 32 },
 		{ INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 27000, /* 27 Mb */
-			25800, 0x88, 0x00, 8,
-			0, 16, 33, 33, 33, 13360 },
+			25800, 8, 8, 0, 16, 33, 33, 33 },
 		{ INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 54000, /* 54 Mb */
-			49800, 0x89, 0x00, 9,
-			2, 17, 34, 34, 34, 26720 },
+			49800, 9, 9, 2, 17, 34, 34, 34 },
 		{ INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 81000, /* 81 Mb */
-			71900, 0x8a, 0x00, 10,
-			2, 18, 35, 35, 35, 40080 },
+			71900, 10, 10, 2, 18, 35, 35, 35 },
 		{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 108000, /* 108 Mb */
-			92500, 0x8b, 0x00, 11,
-			4,  19, 36, 36, 36, 53440 },
+			92500, 11, 11, 4, 19, 36, 36, 36 },
 		{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 162000, /* 162 Mb */
-			130300, 0x8c, 0x00, 12,
-			4,  20, 37, 37, 37, 80160 },
+			130300, 12, 12, 4, 20, 37, 37, 37 },
 		{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 216000, /* 216 Mb */
-			162800, 0x8d, 0x00, 13,
-			4,  21, 38, 38, 38, 106880 },
+			162800, 13, 13, 4, 21, 38, 38, 38 },
 		{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 243000, /* 243 Mb */
-			178200, 0x8e, 0x00, 14,
-			4,  22, 39, 39, 39, 120240 },
+			178200, 14, 14, 4, 22, 39, 39, 39 },
 		{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 270000, /* 270 Mb */
-			192100, 0x8f, 0x00, 15,
-			4,  23, 40, 41, 41, 133600 },
+			192100, 15, 15, 4, 23, 40, 41, 41 },
 		{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS_HGI, 300000, /* 300 Mb */
-			207000, 0x8f, 0x00, 15,
-			4,  23, 40, 41, 41, 148400 },
+			207000, 15, 15, 4, 23, 40, 41, 41 },
 	},
 	50,  /* probe interval */
 	WLAN_RC_HT_FLAG,  /* Phy rates allowed initially */
@@ -156,177 +115,125 @@ static const struct ath_rate_table ar5416_11na_ratetable = {
 
 static const struct ath_rate_table ar5416_11ng_ratetable = {
 	46,
+	12, /* MCS start */
 	{
 		{ VALID_ALL, VALID_ALL, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */
-			900, 0x1b, 0x00, 2,
-			0, 0, 0, 0, 0, 0 },
+			900, 0, 2, 0, 0, 0, 0, 0 },
 		{ VALID_ALL, VALID_ALL, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */
-			1900, 0x1a, 0x04, 4,
-			1, 1, 1, 1, 1, 0 },
+			1900, 1, 4, 1, 1, 1, 1, 1 },
 		{ VALID_ALL, VALID_ALL, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */
-			4900, 0x19, 0x04, 11,
-			2, 2, 2, 2, 2, 0 },
+			4900, 2, 11, 2, 2, 2, 2, 2 },
 		{ VALID_ALL, VALID_ALL, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */
-			8100, 0x18, 0x04, 22,
-			3, 3, 3, 3, 3, 0 },
+			8100, 3, 22, 3, 3, 3, 3, 3 },
 		{ INVALID, INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
-			5400, 0x0b, 0x00, 12,
-			4, 4, 4, 4, 4, 0 },
+			5400, 4, 12, 4, 4, 4, 4, 4 },
 		{ INVALID, INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
-			7800, 0x0f, 0x00, 18,
-			4, 5, 5, 5, 5, 0 },
+			7800, 5, 18, 4, 5, 5, 5, 5 },
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
-			10100, 0x0a, 0x00, 24,
-			6, 6, 6, 6, 6, 0 },
+			10100, 6, 24, 6, 6, 6, 6, 6 },
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
-			14100,  0x0e, 0x00, 36,
-			6, 7, 7, 7, 7, 0 },
+			14100, 7, 36, 6, 7, 7, 7, 7 },
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
-			17700, 0x09, 0x00, 48,
-			8,  8, 8, 8, 8, 0 },
+			17700, 8, 48, 8, 8, 8, 8, 8 },
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
-			23700, 0x0d, 0x00, 72,
-			8,  9, 9, 9, 9, 0 },
+			23700, 9, 72, 8, 9, 9, 9, 9 },
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
-			27400, 0x08, 0x00, 96,
-			8,  10, 10, 10, 10, 0 },
+			27400, 10, 96, 8, 10, 10, 10, 10 },
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
-			30900, 0x0c, 0x00, 108,
-			8,  11, 11, 11, 11, 0 },
+			30900, 11, 108, 8, 11, 11, 11, 11 },
 		{ INVALID, INVALID, WLAN_RC_PHY_HT_20_SS, 6500, /* 6.5 Mb */
-			6400, 0x80, 0x00, 0,
-			4, 12, 28, 12, 28, 3216 },
+			6400, 0, 0, 4, 12, 28, 12, 28 },
 		{ VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 13000, /* 13 Mb */
-			12700, 0x81, 0x00, 1,
-			6, 13, 29, 13, 29, 6434 },
+			12700, 1, 1, 6, 13, 29, 13, 29 },
 		{ VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 19500, /* 19.5 Mb */
-			18800, 0x82, 0x00, 2,
-			6, 14, 30, 14, 30, 9650 },
+			18800, 2, 2, 6, 14, 30, 14, 30 },
 		{ VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 26000, /* 26 Mb */
-			25000, 0x83, 0x00, 3,
-			8,  15, 31, 15, 31, 12868 },
+			25000, 3, 3, 8, 15, 31, 15, 31 },
 		{ VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 39000, /* 39 Mb */
-			36700, 0x84, 0x00, 4,
-			8,  16, 32, 16, 32, 19304 },
+			36700, 4, 4, 8, 16, 32, 16, 32 },
 		{ INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 52000, /* 52 Mb */
-			48100, 0x85, 0x00, 5,
-			8,  17, 33, 17, 33, 25740 },
+			48100, 5, 5, 8, 17, 33, 17, 33 },
 		{ INVALID,  VALID_20, WLAN_RC_PHY_HT_20_SS, 58500, /* 58.5 Mb */
-			53500, 0x86, 0x00, 6,
-			8,  18, 34, 18, 34, 28956 },
+			53500, 6, 6, 8, 18, 34, 18, 34 },
 		{ INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 65000, /* 65 Mb */
-			59000, 0x87, 0x00, 7,
-			8,  19, 35, 19, 36, 32180 },
+			59000, 7, 7, 8, 19, 35, 19, 36 },
 		{ INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 13000, /* 13 Mb */
-			12700, 0x88, 0x00, 8,
-			4, 20, 37, 20, 37, 6430 },
+			12700, 8, 8, 4, 20, 37, 20, 37 },
 		{ INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 26000, /* 26 Mb */
-			24800, 0x89, 0x00, 9,
-			6, 21, 38, 21, 38, 12860 },
+			24800, 9, 9, 6, 21, 38, 21, 38 },
 		{ INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 39000, /* 39 Mb */
-			36600, 0x8a, 0x00, 10,
-			6, 22, 39, 22, 39, 19300 },
+			36600, 10, 10, 6, 22, 39, 22, 39 },
 		{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 52000, /* 52 Mb */
-			48100, 0x8b, 0x00, 11,
-			8,  23, 40, 23, 40, 25736 },
+			48100, 11, 11, 8, 23, 40, 23, 40 },
 		{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 78000, /* 78 Mb */
-			69500, 0x8c, 0x00, 12,
-			8,  24, 41, 24, 41, 38600 },
+			69500, 12, 12, 8, 24, 41, 24, 41 },
 		{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 104000, /* 104 Mb */
-			89500, 0x8d, 0x00, 13,
-			8,  25, 42, 25, 42, 51472 },
+			89500, 13, 13, 8, 25, 42, 25, 42 },
 		{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 117000, /* 117 Mb */
-			98900, 0x8e, 0x00, 14,
-			8,  26, 43, 26, 44, 57890 },
+			98900, 14, 14, 8, 26, 43, 26, 44 },
 		{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 130000, /* 130 Mb */
-			108300, 0x8f, 0x00, 15,
-			8,  27, 44, 27, 45, 64320 },
+			108300, 15, 15, 8, 27, 44, 27, 45 },
 		{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 13500, /* 13.5 Mb */
-			13200, 0x80, 0x00, 0,
-			8, 12, 28, 28, 28, 6684 },
+			13200, 0, 0, 8, 12, 28, 28, 28 },
 		{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 27500, /* 27.0 Mb */
-			25900, 0x81, 0x00, 1,
-			8, 13, 29, 29, 29, 13368 },
+			25900, 1, 1, 8, 13, 29, 29, 29 },
 		{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 40500, /* 40.5 Mb */
-			38600, 0x82, 0x00, 2,
-			8, 14, 30, 30, 30, 20052 },
+			38600, 2, 2, 8, 14, 30, 30, 30 },
 		{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 54000, /* 54 Mb */
-			49800, 0x83, 0x00, 3,
-			8,  15, 31, 31, 31, 26738 },
+			49800, 3, 3, 8,  15, 31, 31, 31 },
 		{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 81500, /* 81 Mb */
-			72200, 0x84, 0x00, 4,
-			8,  16, 32, 32, 32, 40104 },
+			72200, 4, 4, 8, 16, 32, 32, 32 },
 		{ INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 108000, /* 108 Mb */
-			92900, 0x85, 0x00, 5,
-			8,  17, 33, 33, 33, 53476 },
+			92900, 5, 5, 8, 17, 33, 33, 33 },
 		{ INVALID,  VALID_40, WLAN_RC_PHY_HT_40_SS, 121500, /* 121.5 Mb */
-			102700, 0x86, 0x00, 6,
-			8,  18, 34, 34, 34, 60156 },
+			102700, 6, 6, 8, 18, 34, 34, 34 },
 		{ INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 135000, /* 135 Mb */
-			112000, 0x87, 0x00, 7,
-			8,  19, 35, 36, 36, 66840 },
+			112000, 7, 7, 8, 19, 35, 36, 36 },
 		{ INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000, /* 150 Mb */
-			122000, 0x87, 0x00, 7,
-			8,  19, 35, 36, 36, 74200 },
+			122000, 7, 7, 8, 19, 35, 36, 36 },
 		{ INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 27000, /* 27 Mb */
-			25800, 0x88, 0x00, 8,
-			8, 20, 37, 37, 37, 13360 },
+			25800, 8, 8, 8, 20, 37, 37, 37 },
 		{ INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 54000, /* 54 Mb */
-			49800, 0x89, 0x00, 9,
-			8, 21, 38, 38, 38, 26720 },
+			49800, 9, 9, 8, 21, 38, 38, 38 },
 		{ INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 81000, /* 81 Mb */
-			71900, 0x8a, 0x00, 10,
-			8, 22, 39, 39, 39, 40080 },
+			71900, 10, 10, 8, 22, 39, 39, 39 },
 		{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 108000, /* 108 Mb */
-			92500, 0x8b, 0x00, 11,
-			8,  23, 40, 40, 40, 53440 },
+			92500, 11, 11, 8, 23, 40, 40, 40 },
 		{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 162000, /* 162 Mb */
-			130300, 0x8c, 0x00, 12,
-			8,  24, 41, 41, 41, 80160 },
+			130300, 12, 12, 8, 24, 41, 41, 41 },
 		{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 216000, /* 216 Mb */
-			162800, 0x8d, 0x00, 13,
-			8,  25, 42, 42, 42, 106880 },
+			162800, 13, 13, 8, 25, 42, 42, 42 },
 		{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 243000, /* 243 Mb */
-			178200, 0x8e, 0x00, 14,
-			8,  26, 43, 43, 43, 120240 },
+			178200, 14, 14, 8, 26, 43, 43, 43 },
 		{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 270000, /* 270 Mb */
-			192100, 0x8f, 0x00, 15,
-			8,  27, 44, 45, 45, 133600 },
+			192100, 15, 15, 8, 27, 44, 45, 45 },
 		{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS_HGI, 300000, /* 300 Mb */
-			207000, 0x8f, 0x00, 15,
-			8,  27, 44, 45, 45, 148400 },
-		},
+			207000, 15, 15, 8, 27, 44, 45, 45 },
+	},
 	50,  /* probe interval */
 	WLAN_RC_HT_FLAG,  /* Phy rates allowed initially */
 };
 
 static const struct ath_rate_table ar5416_11a_ratetable = {
 	8,
+	0,
 	{
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
-			5400, 0x0b, 0x00, (0x80|12),
-			0, 0, 0 },
+			5400, 0, 12, 0, 0, 0 },
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
-			7800, 0x0f, 0x00, 18,
-			0, 1, 0 },
+			7800,  1, 18, 0, 1, 0 },
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
-			10000, 0x0a, 0x00, (0x80|24),
-			2, 2, 0 },
+			10000, 2, 24, 2, 2, 0 },
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
-			13900, 0x0e, 0x00, 36,
-			2, 3, 0 },
+			13900, 3, 36, 2, 3, 0 },
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
-			17300, 0x09, 0x00, (0x80|48),
-			4,  4, 0 },
+			17300, 4, 48, 4, 4, 0 },
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
-			23000, 0x0d, 0x00, 72,
-			4,  5, 0 },
+			23000, 5, 72, 4, 5, 0 },
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
-			27400, 0x08, 0x00, 96,
-			4,  6, 0 },
+			27400, 6, 96, 4, 6, 0 },
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
-			29300, 0x0c, 0x00, 108,
-			4,  7, 0 },
+			29300, 7, 108, 4, 7, 0 },
 	},
 	50,  /* probe interval */
 	0,   /* Phy rates allowed initially */
@@ -334,48 +241,51 @@ static const struct ath_rate_table ar5416_11a_ratetable = {
 
 static const struct ath_rate_table ar5416_11g_ratetable = {
 	12,
+	0,
 	{
 		{ VALID, VALID, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */
-			900, 0x1b, 0x00, 2,
-			0, 0, 0 },
+			900, 0, 2, 0, 0, 0 },
 		{ VALID, VALID, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */
-			1900, 0x1a, 0x04, 4,
-			1, 1, 0 },
+			1900, 1, 4, 1, 1, 0 },
 		{ VALID, VALID, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */
-			4900, 0x19, 0x04, 11,
-			2, 2, 0 },
+			4900, 2, 11, 2, 2, 0 },
 		{ VALID, VALID, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */
-			8100, 0x18, 0x04, 22,
-			3, 3, 0 },
+			8100, 3, 22, 3, 3, 0 },
 		{ INVALID, INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
-			5400, 0x0b, 0x00, 12,
-			4, 4, 0 },
+			5400, 4, 12, 4, 4, 0 },
 		{ INVALID, INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
-			7800, 0x0f, 0x00, 18,
-			4, 5, 0 },
+			7800, 5, 18, 4, 5, 0 },
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
-			10000, 0x0a, 0x00, 24,
-			6, 6, 0 },
+			10000, 6, 24, 6, 6, 0 },
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
-			13900, 0x0e, 0x00, 36,
-			6, 7, 0 },
+			13900, 7, 36, 6, 7, 0 },
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
-			17300, 0x09, 0x00, 48,
-			8,  8, 0 },
+			17300, 8, 48, 8, 8, 0 },
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
-			23000, 0x0d, 0x00, 72,
-			8,  9, 0 },
+			23000, 9, 72, 8, 9, 0 },
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
-			27400, 0x08, 0x00, 96,
-			8,  10, 0 },
+			27400, 10, 96, 8, 10, 0 },
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
-			29300, 0x0c, 0x00, 108,
-			8,  11, 0 },
+			29300, 11, 108, 8, 11, 0 },
 	},
 	50,  /* probe interval */
 	0,   /* Phy rates allowed initially */
 };
 
+static const struct ath_rate_table *hw_rate_table[ATH9K_MODE_MAX] = {
+	[ATH9K_MODE_11A] = &ar5416_11a_ratetable,
+	[ATH9K_MODE_11G] = &ar5416_11g_ratetable,
+	[ATH9K_MODE_11NA_HT20] = &ar5416_11na_ratetable,
+	[ATH9K_MODE_11NG_HT20] = &ar5416_11ng_ratetable,
+	[ATH9K_MODE_11NA_HT40PLUS] = &ar5416_11na_ratetable,
+	[ATH9K_MODE_11NA_HT40MINUS] = &ar5416_11na_ratetable,
+	[ATH9K_MODE_11NG_HT40PLUS] = &ar5416_11ng_ratetable,
+	[ATH9K_MODE_11NG_HT40MINUS] = &ar5416_11ng_ratetable,
+};
+
+static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table,
+				struct ieee80211_tx_rate *rate);
+
 static inline int8_t median(int8_t a, int8_t b, int8_t c)
 {
 	if (a >= b) {
@@ -425,7 +335,7 @@ static void ath_rc_init_valid_txmask(struct ath_rate_priv *ath_rc_priv)
 static inline void ath_rc_set_valid_txmask(struct ath_rate_priv *ath_rc_priv,
 					   u8 index, int valid_tx_rate)
 {
-	ASSERT(index <= ath_rc_priv->rate_table_size);
+	BUG_ON(index > ath_rc_priv->rate_table_size);
 	ath_rc_priv->valid_rate_index[index] = valid_tx_rate ? 1 : 0;
 }
 
@@ -534,7 +444,7 @@ static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv,
 			 * capflag matches one of the validity
 			 * (VALID/VALID_20/VALID_40) flags */
 
-			if (((rate & 0x7F) == (dot11rate & 0x7F)) &&
+			if ((rate == dot11rate) &&
 			    ((valid & WLAN_RC_CAP_MODE(capflag)) ==
 			     WLAN_RC_CAP_MODE(capflag)) &&
 			    !WLAN_RC_PHY_HT(phy)) {
@@ -576,8 +486,7 @@ static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv,
 			u8 rate = rateset->rs_rates[i];
 			u8 dot11rate = rate_table->info[j].dot11rate;
 
-			if (((rate & 0x7F) != (dot11rate & 0x7F)) ||
-			    !WLAN_RC_PHY_HT(phy) ||
+			if ((rate != dot11rate) || !WLAN_RC_PHY_HT(phy) ||
 			    !WLAN_RC_PHY_HT_VALID(valid, capflag))
 				continue;
 
@@ -696,18 +605,20 @@ static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table,
 				   u8 tries, u8 rix, int rtsctsenable)
 {
 	rate->count = tries;
-	rate->idx = rix;
+	rate->idx = rate_table->info[rix].ratecode;
 
 	if (txrc->short_preamble)
 		rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
 	if (txrc->rts || rtsctsenable)
 		rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
-	if (WLAN_RC_PHY_40(rate_table->info[rix].phy))
-		rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
-	if (WLAN_RC_PHY_SGI(rate_table->info[rix].phy))
-		rate->flags |= IEEE80211_TX_RC_SHORT_GI;
-	if (WLAN_RC_PHY_HT(rate_table->info[rix].phy))
+
+	if (WLAN_RC_PHY_HT(rate_table->info[rix].phy)) {
 		rate->flags |= IEEE80211_TX_RC_MCS;
+		if (WLAN_RC_PHY_40(rate_table->info[rix].phy))
+			rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+		if (WLAN_RC_PHY_SGI(rate_table->info[rix].phy))
+			rate->flags |= IEEE80211_TX_RC_SHORT_GI;
+	}
 }
 
 static void ath_rc_rate_set_rtscts(struct ath_softc *sc,
@@ -720,7 +631,7 @@ static void ath_rc_rate_set_rtscts(struct ath_softc *sc,
 	/* get the cix for the lowest valid rix */
 	for (i = 3; i >= 0; i--) {
 		if (rates[i].count && (rates[i].idx >= 0)) {
-			rix = rates[i].idx;
+			rix = ath_rc_get_rateindex(rate_table, &rates[i]);
 			break;
 		}
 	}
@@ -859,12 +770,12 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
 static bool ath_rc_update_per(struct ath_softc *sc,
 			      const struct ath_rate_table *rate_table,
 			      struct ath_rate_priv *ath_rc_priv,
-			      struct ath_tx_info_priv *tx_info_priv,
+				  struct ieee80211_tx_info *tx_info,
 			      int tx_rate, int xretries, int retries,
 			      u32 now_msec)
 {
 	bool state_change = false;
-	int count;
+	int count, n_bad_frames;
 	u8 last_per;
 	static u32 nretry_to_per_lookup[10] = {
 		100 * 0 / 1,
@@ -880,6 +791,7 @@ static bool ath_rc_update_per(struct ath_softc *sc,
 	};
 
 	last_per = ath_rc_priv->per[tx_rate];
+	n_bad_frames = tx_info->status.ampdu_len - tx_info->status.ampdu_ack_len;
 
 	if (xretries) {
 		if (xretries == 1) {
@@ -907,7 +819,7 @@ static bool ath_rc_update_per(struct ath_softc *sc,
 		if (retries >= count)
 			retries = count - 1;
 
-		if (tx_info_priv->n_bad_frames) {
+		if (n_bad_frames) {
 			/* new_PER = 7/8*old_PER + 1/8*(currentPER)
 			 * Assuming that n_frames is not 0.  The current PER
 			 * from the retries is 100 * retries / (retries+1),
@@ -920,14 +832,14 @@ static bool ath_rc_update_per(struct ath_softc *sc,
 			 * the above PER.  The expression below is a
 			 * simplified version of the sum of these two terms.
 			 */
-			if (tx_info_priv->n_frames > 0) {
-				int n_frames, n_bad_frames;
+			if (tx_info->status.ampdu_len > 0) {
+				int n_frames, n_bad_tries;
 				u8 cur_per, new_per;
 
-				n_bad_frames = retries * tx_info_priv->n_frames +
-					tx_info_priv->n_bad_frames;
-				n_frames = tx_info_priv->n_frames * (retries + 1);
-				cur_per = (100 * n_bad_frames / n_frames) >> 3;
+				n_bad_tries = retries * tx_info->status.ampdu_len +
+					n_bad_frames;
+				n_frames = tx_info->status.ampdu_len * (retries + 1);
+				cur_per = (100 * n_bad_tries / n_frames) >> 3;
 				new_per = (u8)(last_per - (last_per >> 3) + cur_per);
 				ath_rc_priv->per[tx_rate] = new_per;
 			}
@@ -943,8 +855,7 @@ static bool ath_rc_update_per(struct ath_softc *sc,
 		 * this was a probe.  Otherwise, ignore the probe.
 		 */
 		if (ath_rc_priv->probe_rate && ath_rc_priv->probe_rate == tx_rate) {
-			if (retries > 0 || 2 * tx_info_priv->n_bad_frames >
-				tx_info_priv->n_frames) {
+			if (retries > 0 || 2 * n_bad_frames > tx_info->status.ampdu_len) {
 				/*
 				 * Since we probed with just a single attempt,
 				 * any retries means the probe failed.  Also,
@@ -1003,7 +914,7 @@ static bool ath_rc_update_per(struct ath_softc *sc,
 
 static void ath_rc_update_ht(struct ath_softc *sc,
 			     struct ath_rate_priv *ath_rc_priv,
-			     struct ath_tx_info_priv *tx_info_priv,
+			     struct ieee80211_tx_info *tx_info,
 			     int tx_rate, int xretries, int retries)
 {
 	u32 now_msec = jiffies_to_msecs(jiffies);
@@ -1020,7 +931,7 @@ static void ath_rc_update_ht(struct ath_softc *sc,
 
 	/* Update PER first */
 	state_change = ath_rc_update_per(sc, rate_table, ath_rc_priv,
-					 tx_info_priv, tx_rate, xretries,
+					 tx_info, tx_rate, xretries,
 					 retries, now_msec);
 
 	/*
@@ -1080,15 +991,19 @@ static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table,
 {
 	int rix;
 
+	if (!(rate->flags & IEEE80211_TX_RC_MCS))
+		return rate->idx;
+
+	rix = rate->idx + rate_table->mcs_start;
 	if ((rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
 	    (rate->flags & IEEE80211_TX_RC_SHORT_GI))
-		rix = rate_table->info[rate->idx].ht_index;
+		rix = rate_table->info[rix].ht_index;
 	else if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
-		rix = rate_table->info[rate->idx].sgi_index;
+		rix = rate_table->info[rix].sgi_index;
 	else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
-		rix = rate_table->info[rate->idx].cw40index;
+		rix = rate_table->info[rix].cw40index;
 	else
-		rix = rate_table->info[rate->idx].base_index;
+		rix = rate_table->info[rix].base_index;
 
 	return rix;
 }
@@ -1098,7 +1013,6 @@ static void ath_rc_tx_status(struct ath_softc *sc,
 			     struct ieee80211_tx_info *tx_info,
 			     int final_ts_idx, int xretries, int long_retry)
 {
-	struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
 	const struct ath_rate_table *rate_table;
 	struct ieee80211_tx_rate *rates = tx_info->status.rates;
 	u8 flags;
@@ -1124,9 +1038,8 @@ static void ath_rc_tx_status(struct ath_softc *sc,
 					return;
 
 				rix = ath_rc_get_rateindex(rate_table, &rates[i]);
-				ath_rc_update_ht(sc, ath_rc_priv,
-						tx_info_priv, rix,
-						xretries ? 1 : 2,
+				ath_rc_update_ht(sc, ath_rc_priv, tx_info,
+						rix, xretries ? 1 : 2,
 						rates[i].count);
 			}
 		}
@@ -1149,8 +1062,7 @@ static void ath_rc_tx_status(struct ath_softc *sc,
 		return;
 
 	rix = ath_rc_get_rateindex(rate_table, &rates[i]);
-	ath_rc_update_ht(sc, ath_rc_priv, tx_info_priv, rix,
-			 xretries, long_retry);
+	ath_rc_update_ht(sc, ath_rc_priv, tx_info, rix, xretries, long_retry);
 }
 
 static const
@@ -1160,6 +1072,7 @@ struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
 					     bool is_cw_40)
 {
 	int mode = 0;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 
 	switch(band) {
 	case IEEE80211_BAND_2GHZ:
@@ -1177,14 +1090,17 @@ struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
 			mode = ATH9K_MODE_11NA_HT40PLUS;
 		break;
 	default:
-		DPRINTF(sc, ATH_DBG_CONFIG, "Invalid band\n");
+		ath_print(common, ATH_DBG_CONFIG, "Invalid band\n");
 		return NULL;
 	}
 
 	BUG_ON(mode >= ATH9K_MODE_MAX);
 
-	DPRINTF(sc, ATH_DBG_CONFIG, "Choosing rate table for mode: %d\n", mode);
-	return sc->hw_rate_table[mode];
+	ath_print(common, ATH_DBG_CONFIG,
+		  "Choosing rate table for mode: %d\n", mode);
+
+	sc->cur_rate_mode = mode;
+	return hw_rate_table[mode];
 }
 
 static void ath_rc_init(struct ath_softc *sc,
@@ -1194,14 +1110,10 @@ static void ath_rc_init(struct ath_softc *sc,
 			const struct ath_rate_table *rate_table)
 {
 	struct ath_rateset *rateset = &ath_rc_priv->neg_rates;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	u8 *ht_mcs = (u8 *)&ath_rc_priv->neg_ht_rates;
 	u8 i, j, k, hi = 0, hthi = 0;
 
-	if (!rate_table) {
-		DPRINTF(sc, ATH_DBG_FATAL, "Rate table not initialized\n");
-		return;
-	}
-
 	/* Initial rate table size. Will change depending
 	 * on the working rate set */
 	ath_rc_priv->rate_table_size = RATE_TABLE_SIZE;
@@ -1239,7 +1151,7 @@ static void ath_rc_init(struct ath_softc *sc,
 
 	ath_rc_priv->rate_table_size = hi + 1;
 	ath_rc_priv->rate_max_phy = 0;
-	ASSERT(ath_rc_priv->rate_table_size <= RATE_TABLE_SIZE);
+	BUG_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
 
 	for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) {
 		for (j = 0; j < ath_rc_priv->valid_phy_ratecnt[i]; j++) {
@@ -1253,16 +1165,17 @@ static void ath_rc_init(struct ath_softc *sc,
 
 		ath_rc_priv->rate_max_phy = ath_rc_priv->valid_phy_rateidx[i][j-1];
 	}
-	ASSERT(ath_rc_priv->rate_table_size <= RATE_TABLE_SIZE);
-	ASSERT(k <= RATE_TABLE_SIZE);
+	BUG_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
+	BUG_ON(k > RATE_TABLE_SIZE);
 
 	ath_rc_priv->max_valid_rate = k;
 	ath_rc_sort_validrates(rate_table, ath_rc_priv);
 	ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4];
 	sc->cur_rate_table = rate_table;
 
-	DPRINTF(sc, ATH_DBG_CONFIG, "RC Initialized with capabilities: 0x%x\n",
-		ath_rc_priv->ht_cap);
+	ath_print(common, ATH_DBG_CONFIG,
+		  "RC Initialized with capabilities: 0x%x\n",
+		  ath_rc_priv->ht_cap);
 }
 
 static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta,
@@ -1296,44 +1209,52 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
 {
 	struct ath_softc *sc = priv;
 	struct ath_rate_priv *ath_rc_priv = priv_sta;
-	struct ath_tx_info_priv *tx_info_priv = NULL;
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_hdr *hdr;
-	int final_ts_idx, tx_status = 0, is_underrun = 0;
+	int final_ts_idx = 0, tx_status = 0, is_underrun = 0;
+	int long_retry = 0;
 	__le16 fc;
+	int i;
 
 	hdr = (struct ieee80211_hdr *)skb->data;
 	fc = hdr->frame_control;
-	tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
-	final_ts_idx = tx_info_priv->tx.ts_rateindex;
+	for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+		struct ieee80211_tx_rate *rate = &tx_info->status.rates[i];
+		if (!rate->count)
+			break;
+
+		final_ts_idx = i;
+		long_retry = rate->count - 1;
+	}
 
 	if (!priv_sta || !ieee80211_is_data(fc) ||
-	    !tx_info_priv->update_rc)
-		goto exit;
+	    !(tx_info->pad[0] & ATH_TX_INFO_UPDATE_RC))
+		return;
 
-	if (tx_info_priv->tx.ts_status & ATH9K_TXERR_FILT)
-		goto exit;
+	if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED)
+		return;
 
 	/*
-	 * If underrun error is seen assume it as an excessive retry only
-	 * if prefetch trigger level have reached the max (0x3f for 5416)
-	 * Adjust the long retry as if the frame was tried hw->max_rate_tries
-	 * times. This affects how ratectrl updates PER for the failed rate.
+	 * If an underrun error is seen assume it as an excessive retry only
+	 * if max frame trigger level has been reached (2 KB for singel stream,
+	 * and 4 KB for dual stream). Adjust the long retry as if the frame was
+	 * tried hw->max_rate_tries times to affect how ratectrl updates PER for
+	 * the failed rate. In case of congestion on the bus penalizing these
+	 * type of underruns should help hardware actually transmit new frames
+	 * successfully by eventually preferring slower rates. This itself
+	 * should also alleviate congestion on the bus.
 	 */
-	if (tx_info_priv->tx.ts_flags &
-	    (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN) &&
-	    ((sc->sc_ah->tx_trig_level) >= ath_rc_priv->tx_triglevel_max)) {
+	if ((tx_info->pad[0] & ATH_TX_INFO_UNDERRUN) &&
+	    (sc->sc_ah->tx_trig_level >= ath_rc_priv->tx_triglevel_max)) {
 		tx_status = 1;
 		is_underrun = 1;
 	}
 
-	if ((tx_info_priv->tx.ts_status & ATH9K_TXERR_XRETRY) ||
-	    (tx_info_priv->tx.ts_status & ATH9K_TXERR_FIFO))
+	if (tx_info->pad[0] & ATH_TX_INFO_XRETRY)
 		tx_status = 1;
 
 	ath_rc_tx_status(sc, ath_rc_priv, tx_info, final_ts_idx, tx_status,
-			 (is_underrun) ? sc->hw->max_rate_tries :
-			 tx_info_priv->tx.ts_longretry);
+			 (is_underrun) ? sc->hw->max_rate_tries : long_retry);
 
 	/* Check if aggregation has to be enabled for this tid */
 	if (conf_is_ht(&sc->hw->conf) &&
@@ -1347,13 +1268,12 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
 			an = (struct ath_node *)sta->drv_priv;
 
 			if(ath_tx_aggr_check(sc, an, tid))
-				ieee80211_start_tx_ba_session(sc->hw, hdr->addr1, tid);
+				ieee80211_start_tx_ba_session(sta, tid);
 		}
 	}
 
-	ath_debug_stat_rc(sc, skb);
-exit:
-	kfree(tx_info_priv);
+	ath_debug_stat_rc(sc, ath_rc_get_rateindex(sc->cur_rate_table,
+		&tx_info->status.rates[final_ts_idx]));
 }
 
 static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
@@ -1361,7 +1281,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
 {
 	struct ath_softc *sc = priv;
 	struct ath_rate_priv *ath_rc_priv = priv_sta;
-	const struct ath_rate_table *rate_table = NULL;
+	const struct ath_rate_table *rate_table;
 	bool is_cw40, is_sgi40;
 	int i, j = 0;
 
@@ -1393,11 +1313,9 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
 	    (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) ||
 	    (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC)) {
 		rate_table = ath_choose_rate_table(sc, sband->band,
-						   sta->ht_cap.ht_supported,
-						   is_cw40);
-	} else if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
-		/* cur_rate_table would be set on init through config() */
-		rate_table = sc->cur_rate_table;
+		                      sta->ht_cap.ht_supported, is_cw40);
+	} else {
+		rate_table = hw_rate_table[sc->cur_rate_mode];
 	}
 
 	ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, is_cw40, is_sgi40);
@@ -1438,9 +1356,10 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
 						   oper_cw40, oper_sgi40);
 			ath_rc_init(sc, priv_sta, sband, sta, rate_table);
 
-			DPRINTF(sc, ATH_DBG_CONFIG,
-				"Operating HT Bandwidth changed to: %d\n",
-				sc->hw->conf.channel_type);
+			ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
+				  "Operating HT Bandwidth changed to: %d\n",
+				  sc->hw->conf.channel_type);
+			sc->cur_rate_table = hw_rate_table[sc->cur_rate_mode];
 		}
 	}
 }
@@ -1463,8 +1382,8 @@ static void *ath_rate_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp
 
 	rate_priv = kzalloc(sizeof(struct ath_rate_priv), gfp);
 	if (!rate_priv) {
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"Unable to allocate private rc structure\n");
+		ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
+			  "Unable to allocate private rc structure\n");
 		return NULL;
 	}
 
@@ -1493,26 +1412,6 @@ static struct rate_control_ops ath_rate_ops = {
 	.free_sta = ath_rate_free_sta,
 };
 
-void ath_rate_attach(struct ath_softc *sc)
-{
-	sc->hw_rate_table[ATH9K_MODE_11A] =
-		&ar5416_11a_ratetable;
-	sc->hw_rate_table[ATH9K_MODE_11G] =
-		&ar5416_11g_ratetable;
-	sc->hw_rate_table[ATH9K_MODE_11NA_HT20] =
-		&ar5416_11na_ratetable;
-	sc->hw_rate_table[ATH9K_MODE_11NG_HT20] =
-		&ar5416_11ng_ratetable;
-	sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS] =
-		&ar5416_11na_ratetable;
-	sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS] =
-		&ar5416_11na_ratetable;
-	sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS] =
-		&ar5416_11ng_ratetable;
-	sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS] =
-		&ar5416_11ng_ratetable;
-}
-
 int ath_rate_control_register(void)
 {
 	return ieee80211_rate_control_register(&ath_rate_ops);
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index fa21a628ddd0..9eb96f506998 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -19,6 +19,8 @@
 #ifndef RC_H
 #define RC_H
 
+#include "hw.h"
+
 struct ath_softc;
 
 #define ATH_RATE_MAX     30
@@ -102,6 +104,7 @@ enum {
  */
 struct ath_rate_table {
 	int rate_cnt;
+	int mcs_start;
 	struct {
 		int valid;
 		int valid_single_stream;
@@ -109,14 +112,12 @@ struct ath_rate_table {
 		u32 ratekbps;
 		u32 user_ratekbps;
 		u8 ratecode;
-		u8 short_preamble;
 		u8 dot11rate;
 		u8 ctrl_rate;
 		u8 base_index;
 		u8 cw40index;
 		u8 sgi_index;
 		u8 ht_index;
-		u32 max_4ms_framelen;
 	} info[RATE_TABLE_SIZE];
 	u32 probe_interval;
 	u8 initial_ratemax;
@@ -165,26 +166,18 @@ struct ath_rate_priv {
 	struct ath_rate_softc *asc;
 };
 
+#define ATH_TX_INFO_FRAME_TYPE_INTERNAL	(1 << 0)
+#define ATH_TX_INFO_FRAME_TYPE_PAUSE	(1 << 1)
+#define ATH_TX_INFO_UPDATE_RC		(1 << 2)
+#define ATH_TX_INFO_XRETRY		(1 << 3)
+#define ATH_TX_INFO_UNDERRUN		(1 << 4)
+
 enum ath9k_internal_frame_type {
 	ATH9K_NOT_INTERNAL,
 	ATH9K_INT_PAUSE,
 	ATH9K_INT_UNPAUSE
 };
 
-struct ath_tx_info_priv {
-	struct ath_wiphy *aphy;
-	struct ath_tx_status tx;
-	int n_frames;
-	int n_bad_frames;
-	bool update_rc;
-	enum ath9k_internal_frame_type frame_type;
-};
-
-#define ATH_TX_INFO_PRIV(tx_info) \
-	((struct ath_tx_info_priv *)((tx_info)->rate_driver_data[0]))
-
-void ath_rate_attach(struct ath_softc *sc);
-u8 ath_rate_findrateix(struct ath_softc *sc, u8 dot11_rate);
 int ath_rate_control_register(void);
 void ath_rate_control_unregister(void);
 
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index ec0abf823995..477365e5ae69 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -48,6 +48,7 @@ static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc,
 static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
 {
 	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
 	struct ath_desc *ds;
 	struct sk_buff *skb;
 
@@ -59,14 +60,16 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
 
 	/* virtual addr of the beginning of the buffer. */
 	skb = bf->bf_mpdu;
-	ASSERT(skb != NULL);
+	BUG_ON(skb == NULL);
 	ds->ds_vdata = skb->data;
 
-	/* setup rx descriptors. The rx.bufsize here tells the harware
+	/*
+	 * setup rx descriptors. The rx_bufsize here tells the hardware
 	 * how much data it can DMA to us and that we are prepared
-	 * to process */
+	 * to process
+	 */
 	ath9k_hw_setuprxdesc(ah, ds,
-			     sc->rx.bufsize,
+			     common->rx_bufsize,
 			     0);
 
 	if (sc->rx.rxlink == NULL)
@@ -86,192 +89,11 @@ static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
 	sc->rx.rxotherant = 0;
 }
 
-/*
- *  Extend 15-bit time stamp from rx descriptor to
- *  a full 64-bit TSF using the current h/w TSF.
-*/
-static u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp)
-{
-	u64 tsf;
-
-	tsf = ath9k_hw_gettsf64(sc->sc_ah);
-	if ((tsf & 0x7fff) < rstamp)
-		tsf -= 0x8000;
-	return (tsf & ~0x7fff) | rstamp;
-}
-
-/*
- * For Decrypt or Demic errors, we only mark packet status here and always push
- * up the frame up to let mac80211 handle the actual error case, be it no
- * decryption key or real decryption error. This let us keep statistics there.
- */
-static int ath_rx_prepare(struct sk_buff *skb, struct ath_desc *ds,
-			  struct ieee80211_rx_status *rx_status, bool *decrypt_error,
-			  struct ath_softc *sc)
-{
-	struct ieee80211_hdr *hdr;
-	u8 ratecode;
-	__le16 fc;
-	struct ieee80211_hw *hw;
-	struct ieee80211_sta *sta;
-	struct ath_node *an;
-	int last_rssi = ATH_RSSI_DUMMY_MARKER;
-
-
-	hdr = (struct ieee80211_hdr *)skb->data;
-	fc = hdr->frame_control;
-	memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
-	hw = ath_get_virt_hw(sc, hdr);
-
-	if (ds->ds_rxstat.rs_more) {
-		/*
-		 * Frame spans multiple descriptors; this cannot happen yet
-		 * as we don't support jumbograms. If not in monitor mode,
-		 * discard the frame. Enable this if you want to see
-		 * error frames in Monitor mode.
-		 */
-		if (sc->sc_ah->opmode != NL80211_IFTYPE_MONITOR)
-			goto rx_next;
-	} else if (ds->ds_rxstat.rs_status != 0) {
-		if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
-			rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
-		if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY)
-			goto rx_next;
-
-		if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) {
-			*decrypt_error = true;
-		} else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) {
-			if (ieee80211_is_ctl(fc))
-				/*
-				 * Sometimes, we get invalid
-				 * MIC failures on valid control frames.
-				 * Remove these mic errors.
-				 */
-				ds->ds_rxstat.rs_status &= ~ATH9K_RXERR_MIC;
-			else
-				rx_status->flag |= RX_FLAG_MMIC_ERROR;
-		}
-		/*
-		 * Reject error frames with the exception of
-		 * decryption and MIC failures. For monitor mode,
-		 * we also ignore the CRC error.
-		 */
-		if (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR) {
-			if (ds->ds_rxstat.rs_status &
-			    ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
-			      ATH9K_RXERR_CRC))
-				goto rx_next;
-		} else {
-			if (ds->ds_rxstat.rs_status &
-			    ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
-				goto rx_next;
-			}
-		}
-	}
-
-	ratecode = ds->ds_rxstat.rs_rate;
-
-	if (ratecode & 0x80) {
-		/* HT rate */
-		rx_status->flag |= RX_FLAG_HT;
-		if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040)
-			rx_status->flag |= RX_FLAG_40MHZ;
-		if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI)
-			rx_status->flag |= RX_FLAG_SHORT_GI;
-		rx_status->rate_idx = ratecode & 0x7f;
-	} else {
-		int i = 0, cur_band, n_rates;
-
-		cur_band = hw->conf.channel->band;
-		n_rates = sc->sbands[cur_band].n_bitrates;
-
-		for (i = 0; i < n_rates; i++) {
-			if (sc->sbands[cur_band].bitrates[i].hw_value ==
-			    ratecode) {
-				rx_status->rate_idx = i;
-				break;
-			}
-
-			if (sc->sbands[cur_band].bitrates[i].hw_value_short ==
-			    ratecode) {
-				rx_status->rate_idx = i;
-				rx_status->flag |= RX_FLAG_SHORTPRE;
-				break;
-			}
-		}
-	}
-
-	rcu_read_lock();
-	sta = ieee80211_find_sta(sc->hw, hdr->addr2);
-	if (sta) {
-		an = (struct ath_node *) sta->drv_priv;
-		if (ds->ds_rxstat.rs_rssi != ATH9K_RSSI_BAD &&
-		   !ds->ds_rxstat.rs_moreaggr)
-			ATH_RSSI_LPF(an->last_rssi, ds->ds_rxstat.rs_rssi);
-		last_rssi = an->last_rssi;
-	}
-	rcu_read_unlock();
-
-	if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
-		ds->ds_rxstat.rs_rssi = ATH_EP_RND(last_rssi,
-					ATH_RSSI_EP_MULTIPLIER);
-	if (ds->ds_rxstat.rs_rssi < 0)
-		ds->ds_rxstat.rs_rssi = 0;
-	else if (ds->ds_rxstat.rs_rssi > 127)
-		ds->ds_rxstat.rs_rssi = 127;
-
-	/* Update Beacon RSSI, this is used by ANI. */
-	if (ieee80211_is_beacon(fc))
-		sc->sc_ah->stats.avgbrssi = ds->ds_rxstat.rs_rssi;
-
-	rx_status->mactime = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp);
-	rx_status->band = hw->conf.channel->band;
-	rx_status->freq = hw->conf.channel->center_freq;
-	rx_status->noise = sc->ani.noise_floor;
-	rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + ds->ds_rxstat.rs_rssi;
-	rx_status->antenna = ds->ds_rxstat.rs_antenna;
-
-	/*
-	 * Theory for reporting quality:
-	 *
-	 * At a hardware RSSI of 45 you will be able to use MCS 7  reliably.
-	 * At a hardware RSSI of 45 you will be able to use MCS 15 reliably.
-	 * At a hardware RSSI of 35 you should be able use 54 Mbps reliably.
-	 *
-	 * MCS 7  is the highets MCS index usable by a 1-stream device.
-	 * MCS 15 is the highest MCS index usable by a 2-stream device.
-	 *
-	 * All ath9k devices are either 1-stream or 2-stream.
-	 *
-	 * How many bars you see is derived from the qual reporting.
-	 *
-	 * A more elaborate scheme can be used here but it requires tables
-	 * of SNR/throughput for each possible mode used. For the MCS table
-	 * you can refer to the wireless wiki:
-	 *
-	 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
-	 *
-	 */
-	if (conf_is_ht(&hw->conf))
-		rx_status->qual =  ds->ds_rxstat.rs_rssi * 100 / 45;
-	else
-		rx_status->qual =  ds->ds_rxstat.rs_rssi * 100 / 35;
-
-	/* rssi can be more than 45 though, anything above that
-	 * should be considered at 100% */
-	if (rx_status->qual > 100)
-		rx_status->qual = 100;
-
-	rx_status->flag |= RX_FLAG_TSFT;
-
-	return 1;
-rx_next:
-	return 0;
-}
-
 static void ath_opmode_init(struct ath_softc *sc)
 {
 	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
+
 	u32 rfilt, mfilt[2];
 
 	/* configure rx filter */
@@ -280,13 +102,13 @@ static void ath_opmode_init(struct ath_softc *sc)
 
 	/* configure bssid mask */
 	if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
-		ath9k_hw_setbssidmask(sc);
+		ath_hw_setbssidmask(common);
 
 	/* configure operational mode */
 	ath9k_hw_setopmode(ah);
 
 	/* Handle any link-level address change. */
-	ath9k_hw_setmac(ah, sc->sc_ah->macaddr);
+	ath9k_hw_setmac(ah, common->macaddr);
 
 	/* calculate and install multicast filter */
 	mfilt[0] = mfilt[1] = ~0;
@@ -295,6 +117,7 @@ static void ath_opmode_init(struct ath_softc *sc)
 
 int ath_rx_init(struct ath_softc *sc, int nbufs)
 {
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct sk_buff *skb;
 	struct ath_buf *bf;
 	int error = 0;
@@ -303,24 +126,24 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
 	sc->sc_flags &= ~SC_OP_RXFLUSH;
 	spin_lock_init(&sc->rx.rxbuflock);
 
-	sc->rx.bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
-				 min(sc->common.cachelsz, (u16)64));
+	common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
+				     min(common->cachelsz, (u16)64));
 
-	DPRINTF(sc, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
-		sc->common.cachelsz, sc->rx.bufsize);
+	ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
+		  common->cachelsz, common->rx_bufsize);
 
 	/* Initialize rx descriptors */
 
 	error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
 				  "rx", nbufs, 1);
 	if (error != 0) {
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"failed to allocate rx descriptors: %d\n", error);
+		ath_print(common, ATH_DBG_FATAL,
+			  "failed to allocate rx descriptors: %d\n", error);
 		goto err;
 	}
 
 	list_for_each_entry(bf, &sc->rx.rxbuf, list) {
-		skb = ath_rxbuf_alloc(&sc->common, sc->rx.bufsize, GFP_KERNEL);
+		skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
 		if (skb == NULL) {
 			error = -ENOMEM;
 			goto err;
@@ -328,14 +151,14 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
 
 		bf->bf_mpdu = skb;
 		bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
-						 sc->rx.bufsize,
+						 common->rx_bufsize,
 						 DMA_FROM_DEVICE);
 		if (unlikely(dma_mapping_error(sc->dev,
 					       bf->bf_buf_addr))) {
 			dev_kfree_skb_any(skb);
 			bf->bf_mpdu = NULL;
-			DPRINTF(sc, ATH_DBG_FATAL,
-				"dma_mapping_error() on RX init\n");
+			ath_print(common, ATH_DBG_FATAL,
+				  "dma_mapping_error() on RX init\n");
 			error = -ENOMEM;
 			goto err;
 		}
@@ -352,6 +175,8 @@ err:
 
 void ath_rx_cleanup(struct ath_softc *sc)
 {
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
 	struct sk_buff *skb;
 	struct ath_buf *bf;
 
@@ -359,7 +184,7 @@ void ath_rx_cleanup(struct ath_softc *sc)
 		skb = bf->bf_mpdu;
 		if (skb) {
 			dma_unmap_single(sc->dev, bf->bf_buf_addr,
-					 sc->rx.bufsize, DMA_FROM_DEVICE);
+					 common->rx_bufsize, DMA_FROM_DEVICE);
 			dev_kfree_skb(skb);
 		}
 	}
@@ -420,7 +245,10 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
 	else
 		rfilt |= ATH9K_RX_FILTER_BEACON;
 
-	if (sc->rx.rxfilter & FIF_PSPOLL)
+	if ((AR_SREV_9280_10_OR_LATER(sc->sc_ah) ||
+	    AR_SREV_9285_10_OR_LATER(sc->sc_ah)) &&
+	    (sc->sc_ah->opmode == NL80211_IFTYPE_AP) &&
+	    (sc->rx.rxfilter & FIF_PSPOLL))
 		rfilt |= ATH9K_RX_FILTER_PSPOLL;
 
 	if (conf_is_ht(&sc->hw->conf))
@@ -527,20 +355,22 @@ static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
 static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
 {
 	struct ieee80211_mgmt *mgmt;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 
 	if (skb->len < 24 + 8 + 2 + 2)
 		return;
 
 	mgmt = (struct ieee80211_mgmt *)skb->data;
-	if (memcmp(sc->curbssid, mgmt->bssid, ETH_ALEN) != 0)
+	if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0)
 		return; /* not from our current AP */
 
 	sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON;
 
 	if (sc->sc_flags & SC_OP_BEACON_SYNC) {
 		sc->sc_flags &= ~SC_OP_BEACON_SYNC;
-		DPRINTF(sc, ATH_DBG_PS, "Reconfigure Beacon timers based on "
-			"timestamp from the AP\n");
+		ath_print(common, ATH_DBG_PS,
+			  "Reconfigure Beacon timers based on "
+			  "timestamp from the AP\n");
 		ath_beacon_config(sc, NULL);
 	}
 
@@ -552,8 +382,8 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
 		 * a backup trigger for returning into NETWORK SLEEP state,
 		 * so we are waiting for it as well.
 		 */
-		DPRINTF(sc, ATH_DBG_PS, "Received DTIM beacon indicating "
-			"buffered broadcast/multicast frame(s)\n");
+		ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating "
+			  "buffered broadcast/multicast frame(s)\n");
 		sc->sc_flags |= SC_OP_WAIT_FOR_CAB | SC_OP_WAIT_FOR_BEACON;
 		return;
 	}
@@ -565,13 +395,15 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
 		 * been delivered.
 		 */
 		sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB;
-		DPRINTF(sc, ATH_DBG_PS, "PS wait for CAB frames timed out\n");
+		ath_print(common, ATH_DBG_PS,
+			  "PS wait for CAB frames timed out\n");
 	}
 }
 
 static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
 {
 	struct ieee80211_hdr *hdr;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 
 	hdr = (struct ieee80211_hdr *)skb->data;
 
@@ -589,14 +421,15 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
 		 * point.
 		 */
 		sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB;
-		DPRINTF(sc, ATH_DBG_PS, "All PS CAB frames received, back to "
-			"sleep\n");
+		ath_print(common, ATH_DBG_PS,
+			  "All PS CAB frames received, back to sleep\n");
 	} else if ((sc->sc_flags & SC_OP_WAIT_FOR_PSPOLL_DATA) &&
 		   !is_multicast_ether_addr(hdr->addr1) &&
 		   !ieee80211_has_morefrags(hdr->frame_control)) {
 		sc->sc_flags &= ~SC_OP_WAIT_FOR_PSPOLL_DATA;
-		DPRINTF(sc, ATH_DBG_PS, "Going back to sleep after having "
-			"received PS-Poll data (0x%x)\n",
+		ath_print(common, ATH_DBG_PS,
+			  "Going back to sleep after having received "
+			  "PS-Poll data (0x%x)\n",
 			sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
 					SC_OP_WAIT_FOR_CAB |
 					SC_OP_WAIT_FOR_PSPOLL_DATA |
@@ -604,8 +437,9 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
 	}
 }
 
-static void ath_rx_send_to_mac80211(struct ath_softc *sc, struct sk_buff *skb,
-				    struct ieee80211_rx_status *rx_status)
+static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw,
+				    struct ath_softc *sc, struct sk_buff *skb,
+				    struct ieee80211_rx_status *rxs)
 {
 	struct ieee80211_hdr *hdr;
 
@@ -625,19 +459,14 @@ static void ath_rx_send_to_mac80211(struct ath_softc *sc, struct sk_buff *skb,
 			if (aphy == NULL)
 				continue;
 			nskb = skb_copy(skb, GFP_ATOMIC);
-			if (nskb) {
-				memcpy(IEEE80211_SKB_RXCB(nskb), rx_status,
-					sizeof(*rx_status));
-				ieee80211_rx(aphy->hw, nskb);
-			}
+			if (!nskb)
+				continue;
+			ieee80211_rx(aphy->hw, nskb);
 		}
-		memcpy(IEEE80211_SKB_RXCB(skb), rx_status, sizeof(*rx_status));
 		ieee80211_rx(sc->hw, skb);
-	} else {
+	} else
 		/* Deliver unicast frames based on receiver address */
-		memcpy(IEEE80211_SKB_RXCB(skb), rx_status, sizeof(*rx_status));
-		ieee80211_rx(ath_get_virt_hw(sc, hdr), skb);
-	}
+		ieee80211_rx(hw, skb);
 }
 
 int ath_rx_tasklet(struct ath_softc *sc, int flush)
@@ -648,14 +477,20 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
 
 	struct ath_buf *bf;
 	struct ath_desc *ds;
+	struct ath_rx_status *rx_stats;
 	struct sk_buff *skb = NULL, *requeue_skb;
-	struct ieee80211_rx_status rx_status;
+	struct ieee80211_rx_status *rxs;
 	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
+	/*
+	 * The hw can techncically differ from common->hw when using ath9k
+	 * virtual wiphy so to account for that we iterate over the active
+	 * wiphys and find the appropriate wiphy and therefore hw.
+	 */
+	struct ieee80211_hw *hw = NULL;
 	struct ieee80211_hdr *hdr;
-	int hdrlen, padsize, retval;
+	int retval;
 	bool decrypt_error = false;
-	u8 keyix;
-	__le16 fc;
 
 	spin_lock_bh(&sc->rx.rxbuflock);
 
@@ -727,9 +562,15 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
 		 * 2. requeueing the same buffer to h/w
 		 */
 		dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
-				sc->rx.bufsize,
+				common->rx_bufsize,
 				DMA_FROM_DEVICE);
 
+		hdr = (struct ieee80211_hdr *) skb->data;
+		rxs =  IEEE80211_SKB_RXCB(skb);
+
+		hw = ath_get_virt_hw(sc, hdr);
+		rx_stats = &ds->ds_rxstat;
+
 		/*
 		 * If we're asked to flush receive queue, directly
 		 * chain it back at the queue without processing it.
@@ -737,19 +578,14 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
 		if (flush)
 			goto requeue;
 
-		if (!ds->ds_rxstat.rs_datalen)
-			goto requeue;
-
-		/* The status portion of the descriptor could get corrupted. */
-		if (sc->rx.bufsize < ds->ds_rxstat.rs_datalen)
-			goto requeue;
-
-		if (!ath_rx_prepare(skb, ds, &rx_status, &decrypt_error, sc))
+		retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, rx_stats,
+						     rxs, &decrypt_error);
+		if (retval)
 			goto requeue;
 
 		/* Ensure we always have an skb to requeue once we are done
 		 * processing the current buffer's skb */
-		requeue_skb = ath_rxbuf_alloc(&sc->common, sc->rx.bufsize, GFP_ATOMIC);
+		requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
 
 		/* If there is no memory we ignore the current RX'd frame,
 		 * tell hardware it can give us a new frame using the old
@@ -760,60 +596,26 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
 
 		/* Unmap the frame */
 		dma_unmap_single(sc->dev, bf->bf_buf_addr,
-				 sc->rx.bufsize,
+				 common->rx_bufsize,
 				 DMA_FROM_DEVICE);
 
-		skb_put(skb, ds->ds_rxstat.rs_datalen);
-
-		/* see if any padding is done by the hw and remove it */
-		hdr = (struct ieee80211_hdr *)skb->data;
-		hdrlen = ieee80211_get_hdrlen_from_skb(skb);
-		fc = hdr->frame_control;
-
-		/* The MAC header is padded to have 32-bit boundary if the
-		 * packet payload is non-zero. The general calculation for
-		 * padsize would take into account odd header lengths:
-		 * padsize = (4 - hdrlen % 4) % 4; However, since only
-		 * even-length headers are used, padding can only be 0 or 2
-		 * bytes and we can optimize this a bit. In addition, we must
-		 * not try to remove padding from short control frames that do
-		 * not have payload. */
-		padsize = hdrlen & 3;
-		if (padsize && hdrlen >= 24) {
-			memmove(skb->data + padsize, skb->data, hdrlen);
-			skb_pull(skb, padsize);
-		}
-
-		keyix = ds->ds_rxstat.rs_keyix;
+		skb_put(skb, rx_stats->rs_datalen);
 
-		if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error) {
-			rx_status.flag |= RX_FLAG_DECRYPTED;
-		} else if (ieee80211_has_protected(fc)
-			   && !decrypt_error && skb->len >= hdrlen + 4) {
-			keyix = skb->data[hdrlen + 3] >> 6;
-
-			if (test_bit(keyix, sc->keymap))
-				rx_status.flag |= RX_FLAG_DECRYPTED;
-		}
-		if (ah->sw_mgmt_crypto &&
-		    (rx_status.flag & RX_FLAG_DECRYPTED) &&
-		    ieee80211_is_mgmt(fc)) {
-			/* Use software decrypt for management frames. */
-			rx_status.flag &= ~RX_FLAG_DECRYPTED;
-		}
+		ath9k_cmn_rx_skb_postprocess(common, skb, rx_stats,
+					     rxs, decrypt_error);
 
 		/* We will now give hardware our shiny new allocated skb */
 		bf->bf_mpdu = requeue_skb;
 		bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
-					 sc->rx.bufsize,
-					 DMA_FROM_DEVICE);
+						 common->rx_bufsize,
+						 DMA_FROM_DEVICE);
 		if (unlikely(dma_mapping_error(sc->dev,
 			  bf->bf_buf_addr))) {
 			dev_kfree_skb_any(requeue_skb);
 			bf->bf_mpdu = NULL;
-			DPRINTF(sc, ATH_DBG_FATAL,
-				"dma_mapping_error() on RX\n");
-			ath_rx_send_to_mac80211(sc, skb, &rx_status);
+			ath_print(common, ATH_DBG_FATAL,
+				  "dma_mapping_error() on RX\n");
+			ath_rx_send_to_mac80211(hw, sc, skb, rxs);
 			break;
 		}
 		bf->bf_dmacontext = bf->bf_buf_addr;
@@ -824,7 +626,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
 		 */
 		if (sc->rx.defant != ds->ds_rxstat.rs_antenna) {
 			if (++sc->rx.rxotherant >= 3)
-				ath_setdefantenna(sc, ds->ds_rxstat.rs_antenna);
+				ath_setdefantenna(sc, rx_stats->rs_antenna);
 		} else {
 			sc->rx.rxotherant = 0;
 		}
@@ -834,7 +636,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
 					     SC_OP_WAIT_FOR_PSPOLL_DATA)))
 			ath_rx_ps(sc, skb);
 
-		ath_rx_send_to_mac80211(sc, skb, &rx_status);
+		ath_rx_send_to_mac80211(hw, sc, skb, rxs);
 
 requeue:
 		list_move_tail(&bf->list, &sc->rx.rxbuf);
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index d83b77f821e9..8e653fb937a1 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -17,6 +17,8 @@
 #ifndef REG_H
 #define REG_H
 
+#include "../reg.h"
+
 #define AR_CR                0x0008
 #define AR_CR_RXE            0x00000004
 #define AR_CR_RXD            0x00000020
@@ -969,10 +971,10 @@ enum {
 #define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_S         4
 #define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF        0x00000080
 #define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF_S      7
+#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB      0x00000400
+#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB_S    10
 #define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB        0x00001000
 #define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB_S      12
-#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB      0x00001000
-#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB_S    1
 #define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB         0x00008000
 #define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB_S       15
 #define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE        0x00010000
@@ -1330,13 +1332,22 @@ enum {
 #define AR_MCAST_FIL0       0x8040
 #define AR_MCAST_FIL1       0x8044
 
+/*
+ * AR_DIAG_SW - Register which can be used for diagnostics and testing purposes.
+ *
+ * The force RX abort (AR_DIAG_RX_ABORT, bit 25) can be used in conjunction with
+ * RX block (AR_DIAG_RX_DIS, bit 5) to help fast channel change to shut down
+ * receive. The force RX abort bit will kill any frame which is currently being
+ * transferred between the MAC and baseband. The RX block bit (AR_DIAG_RX_DIS)
+ * will prevent any new frames from getting started.
+ */
 #define AR_DIAG_SW                  0x8048
 #define AR_DIAG_CACHE_ACK           0x00000001
 #define AR_DIAG_ACK_DIS             0x00000002
 #define AR_DIAG_CTS_DIS             0x00000004
 #define AR_DIAG_ENCRYPT_DIS         0x00000008
 #define AR_DIAG_DECRYPT_DIS         0x00000010
-#define AR_DIAG_RX_DIS              0x00000020
+#define AR_DIAG_RX_DIS              0x00000020 /* RX block */
 #define AR_DIAG_LOOP_BACK           0x00000040
 #define AR_DIAG_CORR_FCS            0x00000080
 #define AR_DIAG_CHAN_INFO           0x00000100
@@ -1345,12 +1356,12 @@ enum {
 #define AR_DIAG_FRAME_NV0           0x00020000
 #define AR_DIAG_OBS_PT_SEL1         0x000C0000
 #define AR_DIAG_OBS_PT_SEL1_S       18
-#define AR_DIAG_FORCE_RX_CLEAR      0x00100000
+#define AR_DIAG_FORCE_RX_CLEAR      0x00100000 /* force rx_clear high */
 #define AR_DIAG_IGNORE_VIRT_CS      0x00200000
 #define AR_DIAG_FORCE_CH_IDLE_HIGH  0x00400000
 #define AR_DIAG_EIFS_CTRL_ENA       0x00800000
 #define AR_DIAG_DUAL_CHAIN_INFO     0x01000000
-#define AR_DIAG_RX_ABORT            0x02000000
+#define AR_DIAG_RX_ABORT            0x02000000 /* Force RX abort */
 #define AR_DIAG_SATURATE_CYCLE_CNT  0x04000000
 #define AR_DIAG_OBS_PT_SEL2         0x08000000
 #define AR_DIAG_RX_CLEAR_CTL_LOW    0x10000000
@@ -1421,9 +1432,6 @@ enum {
 #define AR_SLEEP2_BEACON_TIMEOUT    0xFFE00000
 #define AR_SLEEP2_BEACON_TIMEOUT_S  21
 
-#define AR_BSSMSKL            0x80e0
-#define AR_BSSMSKU            0x80e4
-
 #define AR_TPC                 0x80e8
 #define AR_TPC_ACK             0x0000003f
 #define AR_TPC_ACK_S           0x00
@@ -1705,4 +1713,7 @@ enum {
 #define AR_KEYTABLE_MAC0(_n)    (AR_KEYTABLE(_n) + 24)
 #define AR_KEYTABLE_MAC1(_n)    (AR_KEYTABLE(_n) + 28)
 
+#define AR9271_CORE_CLOCK	117   /* clock to 117Mhz */
+#define AR9271_TARGET_BAUD_RATE	19200 /* 115200 */
+
 #endif
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
index 19b88f8177fd..cd26caaf44e7 100644
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ b/drivers/net/wireless/ath/ath9k/virtual.c
@@ -40,6 +40,7 @@ void ath9k_set_bssid_mask(struct ieee80211_hw *hw)
 {
 	struct ath_wiphy *aphy = hw->priv;
 	struct ath_softc *sc = aphy->sc;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct ath9k_vif_iter_data iter_data;
 	int i, j;
 	u8 mask[ETH_ALEN];
@@ -51,7 +52,7 @@ void ath9k_set_bssid_mask(struct ieee80211_hw *hw)
 	 */
 	iter_data.addr = kmalloc(ETH_ALEN, GFP_ATOMIC);
 	if (iter_data.addr) {
-		memcpy(iter_data.addr, sc->sc_ah->macaddr, ETH_ALEN);
+		memcpy(iter_data.addr, common->macaddr, ETH_ALEN);
 		iter_data.count = 1;
 	} else
 		iter_data.count = 0;
@@ -86,20 +87,21 @@ void ath9k_set_bssid_mask(struct ieee80211_hw *hw)
 	kfree(iter_data.addr);
 
 	/* Invert the mask and configure hardware */
-	sc->bssidmask[0] = ~mask[0];
-	sc->bssidmask[1] = ~mask[1];
-	sc->bssidmask[2] = ~mask[2];
-	sc->bssidmask[3] = ~mask[3];
-	sc->bssidmask[4] = ~mask[4];
-	sc->bssidmask[5] = ~mask[5];
-
-	ath9k_hw_setbssidmask(sc);
+	common->bssidmask[0] = ~mask[0];
+	common->bssidmask[1] = ~mask[1];
+	common->bssidmask[2] = ~mask[2];
+	common->bssidmask[3] = ~mask[3];
+	common->bssidmask[4] = ~mask[4];
+	common->bssidmask[5] = ~mask[5];
+
+	ath_hw_setbssidmask(common);
 }
 
 int ath9k_wiphy_add(struct ath_softc *sc)
 {
 	int i, error;
 	struct ath_wiphy *aphy;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct ieee80211_hw *hw;
 	u8 addr[ETH_ALEN];
 
@@ -138,7 +140,7 @@ int ath9k_wiphy_add(struct ath_softc *sc)
 	sc->sec_wiphy[i] = aphy;
 	spin_unlock_bh(&sc->wiphy_lock);
 
-	memcpy(addr, sc->sc_ah->macaddr, ETH_ALEN);
+	memcpy(addr, common->macaddr, ETH_ALEN);
 	addr[0] |= 0x02; /* Locally managed address */
 	/*
 	 * XOR virtual wiphy index into the least significant bits to generate
@@ -296,6 +298,7 @@ static void ath9k_wiphy_unpause_channel(struct ath_softc *sc)
 void ath9k_wiphy_chan_work(struct work_struct *work)
 {
 	struct ath_softc *sc = container_of(work, struct ath_softc, chan_work);
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct ath_wiphy *aphy = sc->next_wiphy;
 
 	if (aphy == NULL)
@@ -311,6 +314,10 @@ void ath9k_wiphy_chan_work(struct work_struct *work)
 	/* XXX: remove me eventually */
 	ath9k_update_ichannel(sc, aphy->hw,
 			      &sc->sc_ah->channels[sc->chan_idx]);
+
+	/* sync hw configuration for hw code */
+	common->hw = aphy->hw;
+
 	ath_update_chainmask(sc, sc->chan_is_ht);
 	if (ath_set_channel(sc, aphy->hw,
 			    &sc->sc_ah->channels[sc->chan_idx]) < 0) {
@@ -331,13 +338,11 @@ void ath9k_wiphy_chan_work(struct work_struct *work)
 void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
 	struct ath_wiphy *aphy = hw->priv;
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-	struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
 
-	if (tx_info_priv && tx_info_priv->frame_type == ATH9K_INT_PAUSE &&
+	if ((tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_PAUSE) &&
 	    aphy->state == ATH_WIPHY_PAUSING) {
-		if (!(info->flags & IEEE80211_TX_STAT_ACK)) {
+		if (!(tx_info->flags & IEEE80211_TX_STAT_ACK)) {
 			printk(KERN_DEBUG "ath9k: %s: no ACK for pause "
 			       "frame\n", wiphy_name(hw->wiphy));
 			/*
@@ -356,9 +361,6 @@ void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
 		}
 	}
 
-	kfree(tx_info_priv);
-	tx_info->rate_driver_data[0] = NULL;
-
 	dev_kfree_skb(skb);
 }
 
@@ -519,8 +521,9 @@ int ath9k_wiphy_select(struct ath_wiphy *aphy)
 			 * frame being completed)
 			 */
 			spin_unlock_bh(&sc->wiphy_lock);
-			ath_radio_disable(sc);
-			ath_radio_enable(sc);
+			ath_radio_disable(sc, aphy->hw);
+			ath_radio_enable(sc, aphy->hw);
+			/* Only the primary wiphy hw is used for queuing work */
 			ieee80211_queue_work(aphy->sc->hw,
 				   &aphy->sc->chan_work);
 			return -EBUSY; /* previous select still in progress */
@@ -666,15 +669,78 @@ void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int)
 bool ath9k_all_wiphys_idle(struct ath_softc *sc)
 {
 	unsigned int i;
-	if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE) {
+	if (!sc->pri_wiphy->idle)
 		return false;
-	}
 	for (i = 0; i < sc->num_sec_wiphy; i++) {
 		struct ath_wiphy *aphy = sc->sec_wiphy[i];
 		if (!aphy)
 			continue;
-		if (aphy->state != ATH_WIPHY_INACTIVE)
+		if (!aphy->idle)
 			return false;
 	}
 	return true;
 }
+
+/* caller must hold wiphy_lock */
+void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle)
+{
+	struct ath_softc *sc = aphy->sc;
+
+	aphy->idle = idle;
+	ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
+		  "Marking %s as %s\n",
+		  wiphy_name(aphy->hw->wiphy),
+		  idle ? "idle" : "not-idle");
+}
+/* Only bother starting a queue on an active virtual wiphy */
+void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue)
+{
+	struct ieee80211_hw *hw = sc->pri_wiphy->hw;
+	unsigned int i;
+
+	spin_lock_bh(&sc->wiphy_lock);
+
+	/* Start the primary wiphy */
+	if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) {
+		ieee80211_wake_queue(hw, skb_queue);
+		goto unlock;
+	}
+
+	/* Now start the secondary wiphy queues */
+	for (i = 0; i < sc->num_sec_wiphy; i++) {
+		struct ath_wiphy *aphy = sc->sec_wiphy[i];
+		if (!aphy)
+			continue;
+		if (aphy->state != ATH_WIPHY_ACTIVE)
+			continue;
+
+		hw = aphy->hw;
+		ieee80211_wake_queue(hw, skb_queue);
+		break;
+	}
+
+unlock:
+	spin_unlock_bh(&sc->wiphy_lock);
+}
+
+/* Go ahead and propagate information to all virtual wiphys, it won't hurt */
+void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue)
+{
+	struct ieee80211_hw *hw = sc->pri_wiphy->hw;
+	unsigned int i;
+
+	spin_lock_bh(&sc->wiphy_lock);
+
+	/* Stop the primary wiphy */
+	ieee80211_stop_queue(hw, skb_queue);
+
+	/* Now stop the secondary wiphy queues */
+	for (i = 0; i < sc->num_sec_wiphy; i++) {
+		struct ath_wiphy *aphy = sc->sec_wiphy[i];
+		if (!aphy)
+			continue;
+		hw = aphy->hw;
+		ieee80211_stop_queue(hw, skb_queue);
+	}
+	spin_unlock_bh(&sc->wiphy_lock);
+}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 42551a48c8ac..564c6cb1c2b4 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -70,6 +70,29 @@ static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
 static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
 			     int nbad, int txok, bool update_rc);
 
+enum {
+	MCS_DEFAULT,
+	MCS_HT40,
+	MCS_HT40_SGI,
+};
+
+static int ath_max_4ms_framelen[3][16] = {
+	[MCS_DEFAULT] = {
+		3216,  6434,  9650,  12868, 19304, 25740,  28956,  32180,
+		6430,  12860, 19300, 25736, 38600, 51472,  57890,  64320,
+	},
+	[MCS_HT40] = {
+		6684,  13368, 20052, 26738, 40104, 53476,  60156,  66840,
+		13360, 26720, 40080, 53440, 80160, 106880, 120240, 133600,
+	},
+	[MCS_HT40_SGI] = {
+		/* TODO: Only MCS 7 and 15 updated, recalculate the rest */
+		6684,  13368, 20052, 26738, 40104, 53476,  60156,  74200,
+		13360, 26720, 40080, 53440, 80160, 106880, 120240, 148400,
+	}
+};
+
+
 /*********************/
 /* Aggregation logic */
 /*********************/
@@ -107,7 +130,7 @@ static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
 {
 	struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
 
-	ASSERT(tid->paused > 0);
+	BUG_ON(tid->paused <= 0);
 	spin_lock_bh(&txq->axq_lock);
 
 	tid->paused--;
@@ -131,7 +154,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
 	struct list_head bf_head;
 	INIT_LIST_HEAD(&bf_head);
 
-	ASSERT(tid->paused > 0);
+	BUG_ON(tid->paused <= 0);
 	spin_lock_bh(&txq->axq_lock);
 
 	tid->paused--;
@@ -143,7 +166,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
 
 	while (!list_empty(&tid->buf_q)) {
 		bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
-		ASSERT(!bf_isretried(bf));
+		BUG_ON(bf_isretried(bf));
 		list_move_tail(&bf->list, &bf_head);
 		ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
 	}
@@ -178,7 +201,7 @@ static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
 	index  = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
 	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
 
-	ASSERT(tid->tx_buf[cindex] == NULL);
+	BUG_ON(tid->tx_buf[cindex] != NULL);
 	tid->tx_buf[cindex] = bf;
 
 	if (index >= ((tid->baw_tail - tid->baw_head) &
@@ -251,6 +274,7 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
 
 	ATH_TXBUF_RESET(tbf);
 
+	tbf->aphy = bf->aphy;
 	tbf->bf_mpdu = bf->bf_mpdu;
 	tbf->bf_buf_addr = bf->bf_buf_addr;
 	*(tbf->bf_desc) = *(bf->bf_desc);
@@ -267,7 +291,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
 	struct ath_node *an = NULL;
 	struct sk_buff *skb;
 	struct ieee80211_sta *sta;
+	struct ieee80211_hw *hw;
 	struct ieee80211_hdr *hdr;
+	struct ieee80211_tx_info *tx_info;
 	struct ath_atx_tid *tid = NULL;
 	struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
 	struct ath_desc *ds = bf_last->bf_desc;
@@ -280,9 +306,13 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
 	skb = bf->bf_mpdu;
 	hdr = (struct ieee80211_hdr *)skb->data;
 
+	tx_info = IEEE80211_SKB_CB(skb);
+	hw = bf->aphy->hw;
+
 	rcu_read_lock();
 
-	sta = ieee80211_find_sta(sc->hw, hdr->addr1);
+	/* XXX: use ieee80211_find_sta! */
+	sta = ieee80211_find_sta_by_hw(hw, hdr->addr1);
 	if (!sta) {
 		rcu_read_unlock();
 		return;
@@ -358,7 +388,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
 			else
 				INIT_LIST_HEAD(&bf_head);
 		} else {
-			ASSERT(!list_empty(bf_q));
+			BUG_ON(list_empty(bf_q));
 			list_move_tail(&bf->list, &bf_head);
 		}
 
@@ -452,11 +482,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
 static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
 			   struct ath_atx_tid *tid)
 {
-	const struct ath_rate_table *rate_table = sc->cur_rate_table;
 	struct sk_buff *skb;
 	struct ieee80211_tx_info *tx_info;
 	struct ieee80211_tx_rate *rates;
-	struct ath_tx_info_priv *tx_info_priv;
 	u32 max_4ms_framelen, frmlen;
 	u16 aggr_limit, legacy = 0;
 	int i;
@@ -464,7 +492,6 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
 	skb = bf->bf_mpdu;
 	tx_info = IEEE80211_SKB_CB(skb);
 	rates = tx_info->control.rates;
-	tx_info_priv = (struct ath_tx_info_priv *)tx_info->rate_driver_data[0];
 
 	/*
 	 * Find the lowest frame length among the rate series that will have a
@@ -475,12 +502,20 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
 
 	for (i = 0; i < 4; i++) {
 		if (rates[i].count) {
-			if (!WLAN_RC_PHY_HT(rate_table->info[rates[i].idx].phy)) {
+			int modeidx;
+			if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
 				legacy = 1;
 				break;
 			}
 
-			frmlen = rate_table->info[rates[i].idx].max_4ms_framelen;
+			if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
+				modeidx = MCS_HT40_SGI;
+			else if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+				modeidx = MCS_HT40;
+			else
+				modeidx = MCS_DEFAULT;
+
+			frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
 			max_4ms_framelen = min(max_4ms_framelen, frmlen);
 		}
 	}
@@ -518,12 +553,11 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
 static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
 				  struct ath_buf *bf, u16 frmlen)
 {
-	const struct ath_rate_table *rt = sc->cur_rate_table;
 	struct sk_buff *skb = bf->bf_mpdu;
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
 	u32 nsymbits, nsymbols;
 	u16 minlen;
-	u8 rc, flags, rix;
+	u8 flags, rix;
 	int width, half_gi, ndelim, mindelim;
 
 	/* Select standard number of delimiters based on frame length alone */
@@ -553,7 +587,6 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
 
 	rix = tx_info->control.rates[0].idx;
 	flags = tx_info->control.rates[0].flags;
-	rc = rt->info[rix].ratecode;
 	width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
 	half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
 
@@ -565,7 +598,7 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
 	if (nsymbols == 0)
 		nsymbols = 1;
 
-	nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
+	nsymbits = bits_per_symbol[rix][width];
 	minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
 
 	if (frmlen < minlen) {
@@ -694,7 +727,6 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
 		/* anchor last desc of aggregate */
 		ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
 
-		txq->axq_aggr_depth++;
 		ath_tx_txqaddbuf(sc, txq, &bf_q);
 		TX_STAT_INC(txq->axq_qnum, a_aggr);
 
@@ -815,6 +847,7 @@ static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
 {
 	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
 	struct ath9k_tx_queue_info qi;
 	int qnum;
 
@@ -854,9 +887,9 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
 		return NULL;
 	}
 	if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"qnum %u out of range, max %u!\n",
-			qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
+		ath_print(common, ATH_DBG_FATAL,
+			  "qnum %u out of range, max %u!\n",
+			  qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
 		ath9k_hw_releasetxqueue(ah, qnum);
 		return NULL;
 	}
@@ -869,8 +902,6 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
 		INIT_LIST_HEAD(&txq->axq_acq);
 		spin_lock_init(&txq->axq_lock);
 		txq->axq_depth = 0;
-		txq->axq_aggr_depth = 0;
-		txq->axq_linkbuf = NULL;
 		txq->axq_tx_inprogress = false;
 		sc->tx.txqsetup |= 1<<qnum;
 	}
@@ -884,9 +915,9 @@ int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
 	switch (qtype) {
 	case ATH9K_TX_QUEUE_DATA:
 		if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
-			DPRINTF(sc, ATH_DBG_FATAL,
-				"HAL AC %u out of range, max %zu!\n",
-				haltype, ARRAY_SIZE(sc->tx.hwq_map));
+			ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
+				  "HAL AC %u out of range, max %zu!\n",
+				  haltype, ARRAY_SIZE(sc->tx.hwq_map));
 			return -1;
 		}
 		qnum = sc->tx.hwq_map[haltype];
@@ -906,18 +937,19 @@ int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
 struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
 {
 	struct ath_txq *txq = NULL;
+	u16 skb_queue = skb_get_queue_mapping(skb);
 	int qnum;
 
-	qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
+	qnum = ath_get_hal_qnum(skb_queue, sc);
 	txq = &sc->tx.txq[qnum];
 
 	spin_lock_bh(&txq->axq_lock);
 
 	if (txq->axq_depth >= (ATH_TXBUF - 20)) {
-		DPRINTF(sc, ATH_DBG_XMIT,
-			"TX queue: %d is full, depth: %d\n",
-			qnum, txq->axq_depth);
-		ieee80211_stop_queue(sc->hw, skb_get_queue_mapping(skb));
+		ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_XMIT,
+			  "TX queue: %d is full, depth: %d\n",
+			  qnum, txq->axq_depth);
+		ath_mac80211_stop_queue(sc, skb_queue);
 		txq->stopped = 1;
 		spin_unlock_bh(&txq->axq_lock);
 		return NULL;
@@ -945,7 +977,7 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
 		return 0;
 	}
 
-	ASSERT(sc->tx.txq[qnum].axq_qnum == qnum);
+	BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
 
 	ath9k_hw_get_txq_props(ah, qnum, &qi);
 	qi.tqi_aifs = qinfo->tqi_aifs;
@@ -955,8 +987,8 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
 	qi.tqi_readyTime = qinfo->tqi_readyTime;
 
 	if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"Unable to update hardware queue %u!\n", qnum);
+		ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
+			  "Unable to update hardware queue %u!\n", qnum);
 		error = -EIO;
 	} else {
 		ath9k_hw_resettxqueue(ah, qnum);
@@ -1004,7 +1036,6 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
 
 		if (list_empty(&txq->axq_q)) {
 			txq->axq_link = NULL;
-			txq->axq_linkbuf = NULL;
 			spin_unlock_bh(&txq->axq_lock);
 			break;
 		}
@@ -1055,6 +1086,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
 void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
 {
 	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct ath_txq *txq;
 	int i, npend = 0;
 
@@ -1076,14 +1108,15 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
 	if (npend) {
 		int r;
 
-		DPRINTF(sc, ATH_DBG_XMIT, "Unable to stop TxDMA. Reset HAL!\n");
+		ath_print(common, ATH_DBG_XMIT,
+			  "Unable to stop TxDMA. Reset HAL!\n");
 
 		spin_lock_bh(&sc->sc_resetlock);
 		r = ath9k_hw_reset(ah, sc->sc_ah->curchan, true);
 		if (r)
-			DPRINTF(sc, ATH_DBG_FATAL,
-				"Unable to reset hardware; reset status %d\n",
-				r);
+			ath_print(common, ATH_DBG_FATAL,
+				  "Unable to reset hardware; reset status %d\n",
+				  r);
 		spin_unlock_bh(&sc->sc_resetlock);
 	}
 
@@ -1147,8 +1180,8 @@ int ath_tx_setup(struct ath_softc *sc, int haltype)
 	struct ath_txq *txq;
 
 	if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"HAL AC %u out of range, max %zu!\n",
+		ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
+			  "HAL AC %u out of range, max %zu!\n",
 			 haltype, ARRAY_SIZE(sc->tx.hwq_map));
 		return 0;
 	}
@@ -1172,6 +1205,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
 			     struct list_head *head)
 {
 	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
 	struct ath_buf *bf;
 
 	/*
@@ -1186,21 +1220,20 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
 
 	list_splice_tail_init(head, &txq->axq_q);
 	txq->axq_depth++;
-	txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
 
-	DPRINTF(sc, ATH_DBG_QUEUE,
-		"qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
+	ath_print(common, ATH_DBG_QUEUE,
+		  "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
 
 	if (txq->axq_link == NULL) {
 		ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
-		DPRINTF(sc, ATH_DBG_XMIT,
-			"TXDP[%u] = %llx (%p)\n",
-			txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
+		ath_print(common, ATH_DBG_XMIT,
+			  "TXDP[%u] = %llx (%p)\n",
+			  txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
 	} else {
 		*txq->axq_link = bf->bf_daddr;
-		DPRINTF(sc, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n",
-			txq->axq_qnum, txq->axq_link,
-			ito64(bf->bf_daddr), bf->bf_desc);
+		ath_print(common, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n",
+			  txq->axq_qnum, txq->axq_link,
+			  ito64(bf->bf_daddr), bf->bf_desc);
 	}
 	txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
 	ath9k_hw_txstart(ah, txq->axq_qnum);
@@ -1420,22 +1453,14 @@ static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
 static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
 			    int width, int half_gi, bool shortPreamble)
 {
-	const struct ath_rate_table *rate_table = sc->cur_rate_table;
 	u32 nbits, nsymbits, duration, nsymbols;
-	u8 rc;
 	int streams, pktlen;
 
 	pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
-	rc = rate_table->info[rix].ratecode;
-
-	/* for legacy rates, use old function to compute packet duration */
-	if (!IS_HT_RATE(rc))
-		return ath9k_hw_computetxtime(sc->sc_ah, rate_table, pktlen,
-					      rix, shortPreamble);
 
 	/* find number of symbols: PLCP + data */
 	nbits = (pktlen << 3) + OFDM_PLCP_BITS;
-	nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
+	nsymbits = bits_per_symbol[rix][width];
 	nsymbols = (nbits + nsymbits - 1) / nsymbits;
 
 	if (!half_gi)
@@ -1444,7 +1469,7 @@ static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
 		duration = SYMBOL_TIME_HALFGI(nsymbols);
 
 	/* addup duration for legacy/ht training and signal fields */
-	streams = HT_RC_2_STREAMS(rc);
+	streams = HT_RC_2_STREAMS(rix);
 	duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
 
 	return duration;
@@ -1452,11 +1477,12 @@ static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
 
 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
 {
-	const struct ath_rate_table *rt = sc->cur_rate_table;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct ath9k_11n_rate_series series[4];
 	struct sk_buff *skb;
 	struct ieee80211_tx_info *tx_info;
 	struct ieee80211_tx_rate *rates;
+	const struct ieee80211_rate *rate;
 	struct ieee80211_hdr *hdr;
 	int i, flags = 0;
 	u8 rix = 0, ctsrate = 0;
@@ -1475,11 +1501,10 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
 	 * checking the BSS's global flag.
 	 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
 	 */
+	rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
+	ctsrate = rate->hw_value;
 	if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
-		ctsrate = rt->info[tx_info->control.rts_cts_rate_idx].ratecode |
-			rt->info[tx_info->control.rts_cts_rate_idx].short_preamble;
-	else
-		ctsrate = rt->info[tx_info->control.rts_cts_rate_idx].ratecode;
+		ctsrate |= rate->hw_value_short;
 
 	/*
 	 * ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive.
@@ -1502,18 +1527,15 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
 		flags &= ~(ATH9K_TXDESC_RTSENA);
 
 	for (i = 0; i < 4; i++) {
+		bool is_40, is_sgi, is_sp;
+		int phy;
+
 		if (!rates[i].count || (rates[i].idx < 0))
 			continue;
 
 		rix = rates[i].idx;
 		series[i].Tries = rates[i].count;
-		series[i].ChSel = sc->tx_chainmask;
-
-		if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
-			series[i].Rate = rt->info[rix].ratecode |
-				rt->info[rix].short_preamble;
-		else
-			series[i].Rate = rt->info[rix].ratecode;
+		series[i].ChSel = common->tx_chainmask;
 
 		if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)
 			series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
@@ -1522,10 +1544,36 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
 		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
 			series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
 
-		series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
-			 (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) != 0,
-			 (rates[i].flags & IEEE80211_TX_RC_SHORT_GI),
-			 (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE));
+		is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
+		is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
+		is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
+
+		if (rates[i].flags & IEEE80211_TX_RC_MCS) {
+			/* MCS rates */
+			series[i].Rate = rix | 0x80;
+			series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
+				 is_40, is_sgi, is_sp);
+			continue;
+		}
+
+		/* legcay rates */
+		if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
+		    !(rate->flags & IEEE80211_RATE_ERP_G))
+			phy = WLAN_RC_PHY_CCK;
+		else
+			phy = WLAN_RC_PHY_OFDM;
+
+		rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
+		series[i].Rate = rate->hw_value;
+		if (rate->hw_value_short) {
+			if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+				series[i].Rate |= rate->hw_value_short;
+		} else {
+			is_sp = false;
+		}
+
+		series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
+			phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
 	}
 
 	/* set dur_update_en for l-sig computation except for PS-Poll frames */
@@ -1546,24 +1594,36 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
 	struct ath_softc *sc = aphy->sc;
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-	struct ath_tx_info_priv *tx_info_priv;
 	int hdrlen;
 	__le16 fc;
+	int padpos, padsize;
 
-	tx_info_priv = kzalloc(sizeof(*tx_info_priv), GFP_ATOMIC);
-	if (unlikely(!tx_info_priv))
-		return -ENOMEM;
-	tx_info->rate_driver_data[0] = tx_info_priv;
-	tx_info_priv->aphy = aphy;
-	tx_info_priv->frame_type = txctl->frame_type;
+	tx_info->pad[0] = 0;
+	switch (txctl->frame_type) {
+	case ATH9K_NOT_INTERNAL:
+		break;
+	case ATH9K_INT_PAUSE:
+		tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
+		/* fall through */
+	case ATH9K_INT_UNPAUSE:
+		tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
+		break;
+	}
 	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
 	fc = hdr->frame_control;
 
 	ATH_TXBUF_RESET(bf);
 
-	bf->bf_frmlen = skb->len + FCS_LEN - (hdrlen & 3);
+	bf->aphy = aphy;
+	bf->bf_frmlen = skb->len + FCS_LEN;
+	/* Remove the padding size from bf_frmlen, if any */
+	padpos = ath9k_cmn_padpos(hdr->frame_control);
+	padsize = padpos & 3;
+	if (padsize && skb->len>padpos+padsize) {
+		bf->bf_frmlen -= padsize;
+	}
 
-	if (conf_is_ht(&sc->hw->conf) && !is_pae(skb))
+	if (conf_is_ht(&hw->conf) && !is_pae(skb))
 		bf->bf_state.bf_type |= BUF_HT;
 
 	bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
@@ -1585,13 +1645,20 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
 					   skb->len, DMA_TO_DEVICE);
 	if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) {
 		bf->bf_mpdu = NULL;
-		kfree(tx_info_priv);
-		tx_info->rate_driver_data[0] = NULL;
-		DPRINTF(sc, ATH_DBG_FATAL, "dma_mapping_error() on TX\n");
+		ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
+			  "dma_mapping_error() on TX\n");
 		return -ENOMEM;
 	}
 
 	bf->bf_buf_addr = bf->bf_dmacontext;
+
+	/* tag if this is a nullfunc frame to enable PS when AP acks it */
+	if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
+		bf->bf_isnullfunc = true;
+		sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED;
+	} else
+		bf->bf_isnullfunc = false;
+
 	return 0;
 }
 
@@ -1669,12 +1736,13 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
 {
 	struct ath_wiphy *aphy = hw->priv;
 	struct ath_softc *sc = aphy->sc;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct ath_buf *bf;
 	int r;
 
 	bf = ath_tx_get_buffer(sc);
 	if (!bf) {
-		DPRINTF(sc, ATH_DBG_XMIT, "TX buffers are full\n");
+		ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
 		return -1;
 	}
 
@@ -1682,7 +1750,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
 	if (unlikely(r)) {
 		struct ath_txq *txq = txctl->txq;
 
-		DPRINTF(sc, ATH_DBG_FATAL, "TX mem alloc failure\n");
+		ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
 
 		/* upon ath_tx_processq() this TX queue will be resumed, we
 		 * guarantee this will happen by knowing beforehand that
@@ -1690,8 +1758,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
 		 * on the queue */
 		spin_lock_bh(&txq->axq_lock);
 		if (sc->tx.txq[txq->axq_qnum].axq_depth > 1) {
-			ieee80211_stop_queue(sc->hw,
-				skb_get_queue_mapping(skb));
+			ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
 			txq->stopped = 1;
 		}
 		spin_unlock_bh(&txq->axq_lock);
@@ -1712,6 +1779,7 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
 	struct ath_wiphy *aphy = hw->priv;
 	struct ath_softc *sc = aphy->sc;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	int hdrlen, padsize;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct ath_tx_control txctl;
@@ -1736,7 +1804,8 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
 	if (hdrlen & 3) {
 		padsize = hdrlen % 4;
 		if (skb_headroom(skb) < padsize) {
-			DPRINTF(sc, ATH_DBG_XMIT, "TX CABQ padding failed\n");
+			ath_print(common, ATH_DBG_XMIT,
+				  "TX CABQ padding failed\n");
 			dev_kfree_skb_any(skb);
 			return;
 		}
@@ -1746,10 +1815,11 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
 
 	txctl.txq = sc->beacon.cabq;
 
-	DPRINTF(sc, ATH_DBG_XMIT, "transmitting CABQ packet, skb: %p\n", skb);
+	ath_print(common, ATH_DBG_XMIT,
+		  "transmitting CABQ packet, skb: %p\n", skb);
 
 	if (ath_tx_start(hw, skb, &txctl) != 0) {
-		DPRINTF(sc, ATH_DBG_XMIT, "CABQ TX failed\n");
+		ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
 		goto exit;
 	}
 
@@ -1763,26 +1833,17 @@ exit:
 /*****************/
 
 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
-			    int tx_flags)
+			    struct ath_wiphy *aphy, int tx_flags)
 {
 	struct ieee80211_hw *hw = sc->hw;
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-	struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	int hdrlen, padsize;
-	int frame_type = ATH9K_NOT_INTERNAL;
 
-	DPRINTF(sc, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
+	ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
 
-	if (tx_info_priv) {
-		hw = tx_info_priv->aphy->hw;
-		frame_type = tx_info_priv->frame_type;
-	}
-
-	if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK ||
-	    tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
-		kfree(tx_info_priv);
-		tx_info->rate_driver_data[0] = NULL;
-	}
+	if (aphy)
+		hw = aphy->hw;
 
 	if (tx_flags & ATH_TX_BAR)
 		tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
@@ -1805,18 +1866,19 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
 
 	if (sc->sc_flags & SC_OP_WAIT_FOR_TX_ACK) {
 		sc->sc_flags &= ~SC_OP_WAIT_FOR_TX_ACK;
-		DPRINTF(sc, ATH_DBG_PS, "Going back to sleep after having "
-			"received TX status (0x%x)\n",
+		ath_print(common, ATH_DBG_PS,
+			  "Going back to sleep after having "
+			  "received TX status (0x%x)\n",
 			sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
 					SC_OP_WAIT_FOR_CAB |
 					SC_OP_WAIT_FOR_PSPOLL_DATA |
 					SC_OP_WAIT_FOR_TX_ACK));
 	}
 
-	if (frame_type == ATH9K_NOT_INTERNAL)
-		ieee80211_tx_status(hw, skb);
-	else
+	if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
 		ath9k_tx_status(hw, skb);
+	else
+		ieee80211_tx_status(hw, skb);
 }
 
 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
@@ -1839,7 +1901,7 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
 	}
 
 	dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
-	ath_tx_complete(sc, skb, tx_flags);
+	ath_tx_complete(sc, skb, bf->aphy, tx_flags);
 	ath_debug_stat_tx(sc, txq, bf);
 
 	/*
@@ -1887,8 +1949,7 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
 	struct sk_buff *skb = bf->bf_mpdu;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-	struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
-	struct ieee80211_hw *hw = tx_info_priv->aphy->hw;
+	struct ieee80211_hw *hw = bf->aphy->hw;
 	u8 i, tx_rateindex;
 
 	if (txok)
@@ -1897,22 +1958,29 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
 	tx_rateindex = ds->ds_txstat.ts_rateindex;
 	WARN_ON(tx_rateindex >= hw->max_rates);
 
-	tx_info_priv->update_rc = update_rc;
+	if (update_rc)
+		tx_info->pad[0] |= ATH_TX_INFO_UPDATE_RC;
 	if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
 		tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
 
 	if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
 	    (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
 		if (ieee80211_is_data(hdr->frame_control)) {
-			memcpy(&tx_info_priv->tx, &ds->ds_txstat,
-			       sizeof(tx_info_priv->tx));
-			tx_info_priv->n_frames = bf->bf_nframes;
-			tx_info_priv->n_bad_frames = nbad;
+			if (ds->ds_txstat.ts_flags &
+			    (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
+				tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
+			if ((ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY) ||
+			    (ds->ds_txstat.ts_status & ATH9K_TXERR_FIFO))
+				tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
+			tx_info->status.ampdu_len = bf->bf_nframes;
+			tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
 		}
 	}
 
-	for (i = tx_rateindex + 1; i < hw->max_rates; i++)
+	for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
 		tx_info->status.rates[i].count = 0;
+		tx_info->status.rates[i].idx = -1;
+	}
 
 	tx_info->status.rates[tx_rateindex].count = bf->bf_retries + 1;
 }
@@ -1926,7 +1994,7 @@ static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
 	    sc->tx.txq[txq->axq_qnum].axq_depth <= (ATH_TXBUF - 20)) {
 		qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
 		if (qnum != -1) {
-			ieee80211_wake_queue(sc->hw, qnum);
+			ath_mac80211_start_queue(sc, qnum);
 			txq->stopped = 0;
 		}
 	}
@@ -1936,21 +2004,21 @@ static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
 {
 	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
 	struct ath_buf *bf, *lastbf, *bf_held = NULL;
 	struct list_head bf_head;
 	struct ath_desc *ds;
 	int txok;
 	int status;
 
-	DPRINTF(sc, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
-		txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
-		txq->axq_link);
+	ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
+		  txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
+		  txq->axq_link);
 
 	for (;;) {
 		spin_lock_bh(&txq->axq_lock);
 		if (list_empty(&txq->axq_q)) {
 			txq->axq_link = NULL;
-			txq->axq_linkbuf = NULL;
 			spin_unlock_bh(&txq->axq_lock);
 			break;
 		}
@@ -1984,10 +2052,19 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
 			spin_unlock_bh(&txq->axq_lock);
 			break;
 		}
-		if (bf->bf_desc == txq->axq_lastdsWithCTS)
-			txq->axq_lastdsWithCTS = NULL;
-		if (ds == txq->axq_gatingds)
-			txq->axq_gatingds = NULL;
+
+		/*
+		 * We now know the nullfunc frame has been ACKed so we
+		 * can disable RX.
+		 */
+		if (bf->bf_isnullfunc &&
+		    (ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) {
+			if ((sc->sc_flags & SC_OP_PS_ENABLED)) {
+				sc->ps_enabled = true;
+				ath9k_hw_setrxabort(sc->sc_ah, 1);
+			} else
+				sc->sc_flags |= SC_OP_NULLFUNC_COMPLETED;
+		}
 
 		/*
 		 * Remove ath_buf's of the same transmit unit from txq,
@@ -2001,9 +2078,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
 				&txq->axq_q, lastbf->list.prev);
 
 		txq->axq_depth--;
-		if (bf_isaggr(bf))
-			txq->axq_aggr_depth--;
-
 		txok = (ds->ds_txstat.ts_status == 0);
 		txq->axq_tx_inprogress = false;
 		spin_unlock_bh(&txq->axq_lock);
@@ -2064,8 +2138,11 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
 		}
 
 	if (needreset) {
-		DPRINTF(sc, ATH_DBG_RESET, "tx hung, resetting the chip\n");
+		ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
+			  "tx hung, resetting the chip\n");
+		ath9k_ps_wakeup(sc);
 		ath_reset(sc, false);
+		ath9k_ps_restore(sc);
 	}
 
 	ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
@@ -2093,6 +2170,7 @@ void ath_tx_tasklet(struct ath_softc *sc)
 
 int ath_tx_init(struct ath_softc *sc, int nbufs)
 {
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	int error = 0;
 
 	spin_lock_init(&sc->tx.txbuflock);
@@ -2100,16 +2178,16 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
 	error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
 				  "tx", nbufs, 1);
 	if (error != 0) {
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"Failed to allocate tx descriptors: %d\n", error);
+		ath_print(common, ATH_DBG_FATAL,
+			  "Failed to allocate tx descriptors: %d\n", error);
 		goto err;
 	}
 
 	error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
 				  "beacon", ATH_BCBUF, 1);
 	if (error != 0) {
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"Failed to allocate beacon descriptors: %d\n", error);
+		ath_print(common, ATH_DBG_FATAL,
+			  "Failed to allocate beacon descriptors: %d\n", error);
 		goto err;
 	}
 
diff --git a/drivers/net/wireless/ath/debug.c b/drivers/net/wireless/ath/debug.c
new file mode 100644
index 000000000000..53e77bd131b9
--- /dev/null
+++ b/drivers/net/wireless/ath/debug.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath.h"
+#include "debug.h"
+
+void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...)
+{
+	va_list args;
+
+	if (likely(!(common->debug_mask & dbg_mask)))
+		return;
+
+	va_start(args, fmt);
+	printk(KERN_DEBUG "ath: ");
+	vprintk(fmt, args);
+	va_end(args);
+}
+EXPORT_SYMBOL(ath_print);
diff --git a/drivers/net/wireless/ath/debug.h b/drivers/net/wireless/ath/debug.h
new file mode 100644
index 000000000000..d6b685a06c5e
--- /dev/null
+++ b/drivers/net/wireless/ath/debug.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ATH_DEBUG_H
+#define ATH_DEBUG_H
+
+#include "ath.h"
+
+/**
+ * enum ath_debug_level - atheros wireless debug level
+ *
+ * @ATH_DBG_RESET: reset processing
+ * @ATH_DBG_QUEUE: hardware queue management
+ * @ATH_DBG_EEPROM: eeprom processing
+ * @ATH_DBG_CALIBRATE: periodic calibration
+ * @ATH_DBG_INTERRUPT: interrupt processing
+ * @ATH_DBG_REGULATORY: regulatory processing
+ * @ATH_DBG_ANI: adaptive noise immunitive processing
+ * @ATH_DBG_XMIT: basic xmit operation
+ * @ATH_DBG_BEACON: beacon handling
+ * @ATH_DBG_CONFIG: configuration of the hardware
+ * @ATH_DBG_FATAL: fatal errors, this is the default, DBG_DEFAULT
+ * @ATH_DBG_PS: power save processing
+ * @ATH_DBG_HWTIMER: hardware timer handling
+ * @ATH_DBG_BTCOEX: bluetooth coexistance
+ * @ATH_DBG_ANY: enable all debugging
+ *
+ * The debug level is used to control the amount and type of debugging output
+ * we want to see. Each driver has its own method for enabling debugging and
+ * modifying debug level states -- but this is typically done through a
+ * module parameter 'debug' along with a respective 'debug' debugfs file
+ * entry.
+ */
+enum ATH_DEBUG {
+	ATH_DBG_RESET		= 0x00000001,
+	ATH_DBG_QUEUE		= 0x00000002,
+	ATH_DBG_EEPROM		= 0x00000004,
+	ATH_DBG_CALIBRATE	= 0x00000008,
+	ATH_DBG_INTERRUPT	= 0x00000010,
+	ATH_DBG_REGULATORY	= 0x00000020,
+	ATH_DBG_ANI		= 0x00000040,
+	ATH_DBG_XMIT		= 0x00000080,
+	ATH_DBG_BEACON		= 0x00000100,
+	ATH_DBG_CONFIG		= 0x00000200,
+	ATH_DBG_FATAL		= 0x00000400,
+	ATH_DBG_PS		= 0x00000800,
+	ATH_DBG_HWTIMER		= 0x00001000,
+	ATH_DBG_BTCOEX		= 0x00002000,
+	ATH_DBG_ANY		= 0xffffffff
+};
+
+#define ATH_DBG_DEFAULT (ATH_DBG_FATAL)
+
+#ifdef CONFIG_ATH_DEBUG
+void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...);
+#else
+static inline void ath_print(struct ath_common *common,
+			     int dbg_mask,
+			     const char *fmt, ...)
+{
+}
+#endif /* CONFIG_ATH_DEBUG */
+
+#endif /* ATH_DEBUG_H */
diff --git a/drivers/net/wireless/ath/hw.c b/drivers/net/wireless/ath/hw.c
new file mode 100644
index 000000000000..ecc9eb01f4fa
--- /dev/null
+++ b/drivers/net/wireless/ath/hw.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <asm/unaligned.h>
+
+#include "ath.h"
+#include "reg.h"
+
+#define REG_READ	common->ops->read
+#define REG_WRITE	common->ops->write
+
+/**
+ * ath_hw_set_bssid_mask - filter out bssids we listen
+ *
+ * @common: the ath_common struct for the device.
+ *
+ * BSSID masking is a method used by AR5212 and newer hardware to inform PCU
+ * which bits of the interface's MAC address should be looked at when trying
+ * to decide which packets to ACK. In station mode and AP mode with a single
+ * BSS every bit matters since we lock to only one BSS. In AP mode with
+ * multiple BSSes (virtual interfaces) not every bit matters because hw must
+ * accept frames for all BSSes and so we tweak some bits of our mac address
+ * in order to have multiple BSSes.
+ *
+ * NOTE: This is a simple filter and does *not* filter out all
+ * relevant frames. Some frames that are not for us might get ACKed from us
+ * by PCU because they just match the mask.
+ *
+ * When handling multiple BSSes you can get the BSSID mask by computing the
+ * set of  ~ ( MAC XOR BSSID ) for all bssids we handle.
+ *
+ * When you do this you are essentially computing the common bits of all your
+ * BSSes. Later it is assumed the harware will "and" (&) the BSSID mask with
+ * the MAC address to obtain the relevant bits and compare the result with
+ * (frame's BSSID & mask) to see if they match.
+ *
+ * Simple example: on your card you have have two BSSes you have created with
+ * BSSID-01 and BSSID-02. Lets assume BSSID-01 will not use the MAC address.
+ * There is another BSSID-03 but you are not part of it. For simplicity's sake,
+ * assuming only 4 bits for a mac address and for BSSIDs you can then have:
+ *
+ *                  \
+ * MAC:        0001 |
+ * BSSID-01:   0100 | --> Belongs to us
+ * BSSID-02:   1001 |
+ *                  /
+ * -------------------
+ * BSSID-03:   0110  | --> External
+ * -------------------
+ *
+ * Our bssid_mask would then be:
+ *
+ *             On loop iteration for BSSID-01:
+ *             ~(0001 ^ 0100)  -> ~(0101)
+ *                             ->   1010
+ *             bssid_mask      =    1010
+ *
+ *             On loop iteration for BSSID-02:
+ *             bssid_mask &= ~(0001   ^   1001)
+ *             bssid_mask =   (1010)  & ~(0001 ^ 1001)
+ *             bssid_mask =   (1010)  & ~(1001)
+ *             bssid_mask =   (1010)  &  (0110)
+ *             bssid_mask =   0010
+ *
+ * A bssid_mask of 0010 means "only pay attention to the second least
+ * significant bit". This is because its the only bit common
+ * amongst the MAC and all BSSIDs we support. To findout what the real
+ * common bit is we can simply "&" the bssid_mask now with any BSSID we have
+ * or our MAC address (we assume the hardware uses the MAC address).
+ *
+ * Now, suppose there's an incoming frame for BSSID-03:
+ *
+ * IFRAME-01:  0110
+ *
+ * An easy eye-inspeciton of this already should tell you that this frame
+ * will not pass our check. This is beacuse the bssid_mask tells the
+ * hardware to only look at the second least significant bit and the
+ * common bit amongst the MAC and BSSIDs is 0, this frame has the 2nd LSB
+ * as 1, which does not match 0.
+ *
+ * So with IFRAME-01 we *assume* the hardware will do:
+ *
+ *     allow = (IFRAME-01 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
+ *  --> allow = (0110 & 0010) == (0010 & 0001) ? 1 : 0;
+ *  --> allow = (0010) == 0000 ? 1 : 0;
+ *  --> allow = 0
+ *
+ *  Lets now test a frame that should work:
+ *
+ * IFRAME-02:  0001 (we should allow)
+ *
+ *     allow = (0001 & 1010) == 1010
+ *
+ *     allow = (IFRAME-02 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
+ *  --> allow = (0001 & 0010) ==  (0010 & 0001) ? 1 :0;
+ *  --> allow = (0010) == (0010)
+ *  --> allow = 1
+ *
+ * Other examples:
+ *
+ * IFRAME-03:  0100 --> allowed
+ * IFRAME-04:  1001 --> allowed
+ * IFRAME-05:  1101 --> allowed but its not for us!!!
+ *
+ */
+void ath_hw_setbssidmask(struct ath_common *common)
+{
+	void *ah = common->ah;
+
+	REG_WRITE(ah, get_unaligned_le32(common->bssidmask), AR_BSSMSKL);
+	REG_WRITE(ah, get_unaligned_le16(common->bssidmask + 4), AR_BSSMSKU);
+}
+EXPORT_SYMBOL(ath_hw_setbssidmask);
diff --git a/drivers/net/wireless/ath/reg.h b/drivers/net/wireless/ath/reg.h
new file mode 100644
index 000000000000..dfe1fbec24f5
--- /dev/null
+++ b/drivers/net/wireless/ath/reg.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ATH_REGISTERS_H
+#define ATH_REGISTERS_H
+
+/*
+ * BSSID mask registers. See ath_hw_set_bssid_mask()
+ * for detailed documentation about these registers.
+ */
+#define AR_BSSMSKL		0x80e0
+#define AR_BSSMSKU		0x80e4
+
+#endif /* ATH_REGISTERS_H */
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 077bcc142cde..039ac490465c 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -450,7 +450,7 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
 	const struct ieee80211_regdomain *regd;
 
 	wiphy->reg_notifier = reg_notifier;
-	wiphy->strict_regulatory = true;
+	wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY;
 
 	if (ath_is_world_regd(reg)) {
 		/*
@@ -458,8 +458,7 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
 		 * saved on the wiphy orig_* parameters
 		 */
 		regd = ath_world_regdomain(reg);
-		wiphy->custom_regulatory = true;
-		wiphy->strict_regulatory = false;
+		wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
 	} else {
 		/*
 		 * This gets applied in the case of the absense of CRDA,
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index c1dd857697a7..a1c39526161a 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -65,10 +65,13 @@ enum CountryCode {
 	CTRY_ALGERIA = 12,
 	CTRY_ARGENTINA = 32,
 	CTRY_ARMENIA = 51,
+	CTRY_ARUBA = 533,
 	CTRY_AUSTRALIA = 36,
 	CTRY_AUSTRIA = 40,
 	CTRY_AZERBAIJAN = 31,
 	CTRY_BAHRAIN = 48,
+	CTRY_BANGLADESH = 50,
+	CTRY_BARBADOS = 52,
 	CTRY_BELARUS = 112,
 	CTRY_BELGIUM = 56,
 	CTRY_BELIZE = 84,
@@ -77,6 +80,7 @@ enum CountryCode {
 	CTRY_BRAZIL = 76,
 	CTRY_BRUNEI_DARUSSALAM = 96,
 	CTRY_BULGARIA = 100,
+	CTRY_CAMBODIA = 116,
 	CTRY_CANADA = 124,
 	CTRY_CHILE = 152,
 	CTRY_CHINA = 156,
@@ -97,7 +101,11 @@ enum CountryCode {
 	CTRY_GEORGIA = 268,
 	CTRY_GERMANY = 276,
 	CTRY_GREECE = 300,
+	CTRY_GREENLAND = 304,
+	CTRY_GRENEDA = 308,
+	CTRY_GUAM = 316,
 	CTRY_GUATEMALA = 320,
+	CTRY_HAITI = 332,
 	CTRY_HONDURAS = 340,
 	CTRY_HONG_KONG = 344,
 	CTRY_HUNGARY = 348,
diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h
index 9847af72208c..248c670fdfbe 100644
--- a/drivers/net/wireless/ath/regd_common.h
+++ b/drivers/net/wireless/ath/regd_common.h
@@ -288,13 +288,16 @@ static struct country_code_to_enum_rd allCountries[] = {
 	{CTRY_DEFAULT, FCC1_FCCA, "CO"},
 	{CTRY_ALBANIA, NULL1_WORLD, "AL"},
 	{CTRY_ALGERIA, NULL1_WORLD, "DZ"},
-	{CTRY_ARGENTINA, APL3_WORLD, "AR"},
+	{CTRY_ARGENTINA, FCC3_WORLD, "AR"},
 	{CTRY_ARMENIA, ETSI4_WORLD, "AM"},
+	{CTRY_ARUBA, ETSI1_WORLD, "AW"},
 	{CTRY_AUSTRALIA, FCC2_WORLD, "AU"},
 	{CTRY_AUSTRALIA2, FCC6_WORLD, "AU"},
 	{CTRY_AUSTRIA, ETSI1_WORLD, "AT"},
 	{CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ"},
 	{CTRY_BAHRAIN, APL6_WORLD, "BH"},
+	{CTRY_BANGLADESH, NULL1_WORLD, "BD"},
+	{CTRY_BARBADOS, FCC2_WORLD, "BB"},
 	{CTRY_BELARUS, ETSI1_WORLD, "BY"},
 	{CTRY_BELGIUM, ETSI1_WORLD, "BE"},
 	{CTRY_BELGIUM2, ETSI4_WORLD, "BL"},
@@ -304,13 +307,14 @@ static struct country_code_to_enum_rd allCountries[] = {
 	{CTRY_BRAZIL, FCC3_WORLD, "BR"},
 	{CTRY_BRUNEI_DARUSSALAM, APL1_WORLD, "BN"},
 	{CTRY_BULGARIA, ETSI6_WORLD, "BG"},
-	{CTRY_CANADA, FCC2_FCCA, "CA"},
+	{CTRY_CAMBODIA, ETSI1_WORLD, "KH"},
+	{CTRY_CANADA, FCC3_FCCA, "CA"},
 	{CTRY_CANADA2, FCC6_FCCA, "CA"},
 	{CTRY_CHILE, APL6_WORLD, "CL"},
 	{CTRY_CHINA, APL1_WORLD, "CN"},
 	{CTRY_COLOMBIA, FCC1_FCCA, "CO"},
 	{CTRY_COSTA_RICA, FCC1_WORLD, "CR"},
-	{CTRY_CROATIA, ETSI3_WORLD, "HR"},
+	{CTRY_CROATIA, ETSI1_WORLD, "HR"},
 	{CTRY_CYPRUS, ETSI1_WORLD, "CY"},
 	{CTRY_CZECH, ETSI3_WORLD, "CZ"},
 	{CTRY_DENMARK, ETSI1_WORLD, "DK"},
@@ -324,18 +328,22 @@ static struct country_code_to_enum_rd allCountries[] = {
 	{CTRY_GEORGIA, ETSI4_WORLD, "GE"},
 	{CTRY_GERMANY, ETSI1_WORLD, "DE"},
 	{CTRY_GREECE, ETSI1_WORLD, "GR"},
+	{CTRY_GREENLAND, ETSI1_WORLD, "GL"},
+	{CTRY_GRENEDA, FCC3_FCCA, "GD"},
+	{CTRY_GUAM, FCC1_FCCA, "GU"},
 	{CTRY_GUATEMALA, FCC1_FCCA, "GT"},
+	{CTRY_HAITI, ETSI1_WORLD, "HT"},
 	{CTRY_HONDURAS, NULL1_WORLD, "HN"},
-	{CTRY_HONG_KONG, FCC2_WORLD, "HK"},
+	{CTRY_HONG_KONG, FCC3_WORLD, "HK"},
 	{CTRY_HUNGARY, ETSI1_WORLD, "HU"},
 	{CTRY_ICELAND, ETSI1_WORLD, "IS"},
 	{CTRY_INDIA, APL6_WORLD, "IN"},
-	{CTRY_INDONESIA, APL1_WORLD, "ID"},
+	{CTRY_INDONESIA, NULL1_WORLD, "ID"},
 	{CTRY_IRAN, APL1_WORLD, "IR"},
 	{CTRY_IRELAND, ETSI1_WORLD, "IE"},
 	{CTRY_ISRAEL, NULL1_WORLD, "IL"},
 	{CTRY_ITALY, ETSI1_WORLD, "IT"},
-	{CTRY_JAMAICA, ETSI1_WORLD, "JM"},
+	{CTRY_JAMAICA, FCC3_WORLD, "JM"},
 
 	{CTRY_JAPAN, MKK1_MKKA, "JP"},
 	{CTRY_JAPAN1, MKK1_MKKB, "JP"},
@@ -402,7 +410,7 @@ static struct country_code_to_enum_rd allCountries[] = {
 	{CTRY_KOREA_ROC, APL9_WORLD, "KR"},
 	{CTRY_KOREA_ROC2, APL2_WORLD, "K2"},
 	{CTRY_KOREA_ROC3, APL9_WORLD, "K3"},
-	{CTRY_KUWAIT, NULL1_WORLD, "KW"},
+	{CTRY_KUWAIT, ETSI3_WORLD, "KW"},
 	{CTRY_LATVIA, ETSI1_WORLD, "LV"},
 	{CTRY_LEBANON, NULL1_WORLD, "LB"},
 	{CTRY_LIECHTENSTEIN, ETSI1_WORLD, "LI"},
@@ -414,13 +422,13 @@ static struct country_code_to_enum_rd allCountries[] = {
 	{CTRY_MALTA, ETSI1_WORLD, "MT"},
 	{CTRY_MEXICO, FCC1_FCCA, "MX"},
 	{CTRY_MONACO, ETSI4_WORLD, "MC"},
-	{CTRY_MOROCCO, NULL1_WORLD, "MA"},
+	{CTRY_MOROCCO, APL4_WORLD, "MA"},
 	{CTRY_NEPAL, APL1_WORLD, "NP"},
 	{CTRY_NETHERLANDS, ETSI1_WORLD, "NL"},
 	{CTRY_NETHERLANDS_ANTILLES, ETSI1_WORLD, "AN"},
 	{CTRY_NEW_ZEALAND, FCC2_ETSIC, "NZ"},
 	{CTRY_NORWAY, ETSI1_WORLD, "NO"},
-	{CTRY_OMAN, APL6_WORLD, "OM"},
+	{CTRY_OMAN, FCC3_WORLD, "OM"},
 	{CTRY_PAKISTAN, NULL1_WORLD, "PK"},
 	{CTRY_PANAMA, FCC1_FCCA, "PA"},
 	{CTRY_PAPUA_NEW_GUINEA, FCC1_WORLD, "PG"},
@@ -429,7 +437,7 @@ static struct country_code_to_enum_rd allCountries[] = {
 	{CTRY_POLAND, ETSI1_WORLD, "PL"},
 	{CTRY_PORTUGAL, ETSI1_WORLD, "PT"},
 	{CTRY_PUERTO_RICO, FCC1_FCCA, "PR"},
-	{CTRY_QATAR, NULL1_WORLD, "QA"},
+	{CTRY_QATAR, APL1_WORLD, "QA"},
 	{CTRY_ROMANIA, NULL1_WORLD, "RO"},
 	{CTRY_RUSSIA, NULL1_WORLD, "RU"},
 	{CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA"},
@@ -445,7 +453,7 @@ static struct country_code_to_enum_rd allCountries[] = {
 	{CTRY_SYRIA, NULL1_WORLD, "SY"},
 	{CTRY_TAIWAN, APL3_FCCA, "TW"},
 	{CTRY_THAILAND, FCC3_WORLD, "TH"},
-	{CTRY_TRINIDAD_Y_TOBAGO, ETSI4_WORLD, "TT"},
+	{CTRY_TRINIDAD_Y_TOBAGO, FCC3_WORLD, "TT"},
 	{CTRY_TUNISIA, ETSI3_WORLD, "TN"},
 	{CTRY_TURKEY, ETSI3_WORLD, "TR"},
 	{CTRY_UKRAINE, NULL1_WORLD, "UA"},
@@ -456,7 +464,7 @@ static struct country_code_to_enum_rd allCountries[] = {
 	 * would need to assign new special alpha2 to CRDA db as with the world
 	 * regdomain and use another alpha2 */
 	{CTRY_UNITED_STATES_FCC49, FCC4_FCCA, "PS"},
-	{CTRY_URUGUAY, APL2_WORLD, "UY"},
+	{CTRY_URUGUAY, FCC3_WORLD, "UY"},
 	{CTRY_UZBEKISTAN, FCC3_FCCA, "UZ"},
 	{CTRY_VENEZUELA, APL2_ETSIC, "VE"},
 	{CTRY_VIET_NAM, NULL1_WORLD, "VN"},
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index cce188837d10..3edbbcf0f548 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -99,6 +99,22 @@ static struct {
 	{ ATMEL_FW_TYPE_506,		"atmel_at76c506",	"bin" },
 	{ ATMEL_FW_TYPE_NONE,		NULL,			NULL }
 };
+MODULE_FIRMWARE("atmel_at76c502-wpa.bin");
+MODULE_FIRMWARE("atmel_at76c502.bin");
+MODULE_FIRMWARE("atmel_at76c502d-wpa.bin");
+MODULE_FIRMWARE("atmel_at76c502d.bin");
+MODULE_FIRMWARE("atmel_at76c502e-wpa.bin");
+MODULE_FIRMWARE("atmel_at76c502e.bin");
+MODULE_FIRMWARE("atmel_at76c502_3com-wpa.bin");
+MODULE_FIRMWARE("atmel_at76c502_3com.bin");
+MODULE_FIRMWARE("atmel_at76c504-wpa.bin");
+MODULE_FIRMWARE("atmel_at76c504.bin");
+MODULE_FIRMWARE("atmel_at76c504_2958-wpa.bin");
+MODULE_FIRMWARE("atmel_at76c504_2958.bin");
+MODULE_FIRMWARE("atmel_at76c504a_2958-wpa.bin");
+MODULE_FIRMWARE("atmel_at76c504a_2958.bin");
+MODULE_FIRMWARE("atmel_at76c506-wpa.bin");
+MODULE_FIRMWARE("atmel_at76c506.bin");
 
 #define MAX_SSID_LENGTH 32
 #define MGMT_JIFFIES (256 * HZ / 100)
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 54ea61c15d8b..64c12e1bced3 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -1,6 +1,6 @@
 config B43
 	tristate "Broadcom 43xx wireless support (mac80211 stack)"
-	depends on SSB_POSSIBLE && MAC80211 && WLAN_80211 && HAS_DMA
+	depends on SSB_POSSIBLE && MAC80211 && HAS_DMA
 	select SSB
 	select FW_LOADER
 	---help---
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 660716214d49..fe3bf9491997 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -26,8 +26,6 @@
 # define B43_DEBUG	0
 #endif
 
-#define B43_RX_MAX_SSI			60
-
 /* MMIO offsets */
 #define B43_MMIO_DMA0_REASON		0x20
 #define B43_MMIO_DMA0_IRQ_MASK		0x24
@@ -749,12 +747,6 @@ struct b43_wldev {
 #endif
 };
 
-/*
- * Include goes here to avoid a dependency problem.
- * A better fix would be to integrate xmit.h into b43.h.
- */
-#include "xmit.h"
-
 /* Data structure for the WLAN parts (802.11 cores) of the b43 chip. */
 struct b43_wl {
 	/* Pointer to the active wireless device on this chip */
@@ -830,13 +822,9 @@ struct b43_wl {
 	struct b43_leds leds;
 
 #ifdef CONFIG_B43_PIO
-	/*
-	 * RX/TX header/tail buffers used by the frame transmit functions.
-	 */
-	struct b43_rxhdr_fw4 rxhdr;
-	struct b43_txhdr txhdr;
-	u8 rx_tail[4];
-	u8 tx_tail[4];
+	/* Kmalloc'ed scratch space for PIO TX/RX. Protected by wl->mutex. */
+	u8 pio_scratchspace[110] __attribute__((__aligned__(8)));
+	u8 pio_tailspace[4] __attribute__((__aligned__(8)));
 #endif /* CONFIG_B43_PIO */
 };
 
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index de4e804bedf0..027be275e035 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -383,44 +383,160 @@ static inline
 	}
 }
 
+/* Check if a DMA region fits the device constraints.
+ * Returns true, if the region is OK for usage with this device. */
+static inline bool b43_dma_address_ok(struct b43_dmaring *ring,
+				      dma_addr_t addr, size_t size)
+{
+	switch (ring->type) {
+	case B43_DMA_30BIT:
+		if ((u64)addr + size > (1ULL << 30))
+			return 0;
+		break;
+	case B43_DMA_32BIT:
+		if ((u64)addr + size > (1ULL << 32))
+			return 0;
+		break;
+	case B43_DMA_64BIT:
+		/* Currently we can't have addresses beyond
+		 * 64bit in the kernel. */
+		break;
+	}
+	return 1;
+}
+
+#define is_4k_aligned(addr)	(((u64)(addr) & 0x0FFFull) == 0)
+#define is_8k_aligned(addr)	(((u64)(addr) & 0x1FFFull) == 0)
+
+static void b43_unmap_and_free_ringmem(struct b43_dmaring *ring, void *base,
+				       dma_addr_t dmaaddr, size_t size)
+{
+	ssb_dma_unmap_single(ring->dev->dev, dmaaddr, size, DMA_TO_DEVICE);
+	free_pages((unsigned long)base, get_order(size));
+}
+
+static void * __b43_get_and_map_ringmem(struct b43_dmaring *ring,
+					dma_addr_t *dmaaddr, size_t size,
+					gfp_t gfp_flags)
+{
+	void *base;
+
+	base = (void *)__get_free_pages(gfp_flags, get_order(size));
+	if (!base)
+		return NULL;
+	memset(base, 0, size);
+	*dmaaddr = ssb_dma_map_single(ring->dev->dev, base, size,
+				      DMA_TO_DEVICE);
+	if (ssb_dma_mapping_error(ring->dev->dev, *dmaaddr)) {
+		free_pages((unsigned long)base, get_order(size));
+		return NULL;
+	}
+
+	return base;
+}
+
+static void * b43_get_and_map_ringmem(struct b43_dmaring *ring,
+				      dma_addr_t *dmaaddr, size_t size)
+{
+	void *base;
+
+	base = __b43_get_and_map_ringmem(ring, dmaaddr, size,
+					 GFP_KERNEL);
+	if (!base) {
+		b43err(ring->dev->wl, "Failed to allocate or map pages "
+		       "for DMA ringmemory\n");
+		return NULL;
+	}
+	if (!b43_dma_address_ok(ring, *dmaaddr, size)) {
+		/* The memory does not fit our device constraints.
+		 * Retry with GFP_DMA set to get lower memory. */
+		b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
+		base = __b43_get_and_map_ringmem(ring, dmaaddr, size,
+						 GFP_KERNEL | GFP_DMA);
+		if (!base) {
+			b43err(ring->dev->wl, "Failed to allocate or map pages "
+			       "in the GFP_DMA region for DMA ringmemory\n");
+			return NULL;
+		}
+		if (!b43_dma_address_ok(ring, *dmaaddr, size)) {
+			b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
+			b43err(ring->dev->wl, "Failed to allocate DMA "
+			       "ringmemory that fits device constraints\n");
+			return NULL;
+		}
+	}
+	/* We expect the memory to be 4k aligned, at least. */
+	if (B43_WARN_ON(!is_4k_aligned(*dmaaddr))) {
+		b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
+		return NULL;
+	}
+
+	return base;
+}
+
 static int alloc_ringmemory(struct b43_dmaring *ring)
 {
-	gfp_t flags = GFP_KERNEL;
-
-	/* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
-	 * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
-	 * has shown that 4K is sufficient for the latter as long as the buffer
-	 * does not cross an 8K boundary.
-	 *
-	 * For unknown reasons - possibly a hardware error - the BCM4311 rev
-	 * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
-	 * which accounts for the GFP_DMA flag below.
-	 *
-	 * The flags here must match the flags in free_ringmemory below!
+	unsigned int required;
+	void *base;
+	dma_addr_t dmaaddr;
+
+	/* There are several requirements to the descriptor ring memory:
+	 * - The memory region needs to fit the address constraints for the
+	 *   device (same as for frame buffers).
+	 * - For 30/32bit DMA devices, the descriptor ring must be 4k aligned.
+	 * - For 64bit DMA devices, the descriptor ring must be 8k aligned.
 	 */
+
 	if (ring->type == B43_DMA_64BIT)
-		flags |= GFP_DMA;
-	ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev,
-						  B43_DMA_RINGMEMSIZE,
-						  &(ring->dmabase), flags);
-	if (!ring->descbase) {
-		b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
+		required = ring->nr_slots * sizeof(struct b43_dmadesc64);
+	else
+		required = ring->nr_slots * sizeof(struct b43_dmadesc32);
+	if (B43_WARN_ON(required > 0x1000))
+		return -ENOMEM;
+
+	ring->alloc_descsize = 0x1000;
+	base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize);
+	if (!base)
+		return -ENOMEM;
+	ring->alloc_descbase = base;
+	ring->alloc_dmabase = dmaaddr;
+
+	if ((ring->type != B43_DMA_64BIT) || is_8k_aligned(dmaaddr)) {
+		/* We're on <=32bit DMA, or we already got 8k aligned memory.
+		 * That's all we need, so we're fine. */
+		ring->descbase = base;
+		ring->dmabase = dmaaddr;
+		return 0;
+	}
+	b43_unmap_and_free_ringmem(ring, base, dmaaddr, ring->alloc_descsize);
+
+	/* Ok, we failed at the 8k alignment requirement.
+	 * Try to force-align the memory region now. */
+	ring->alloc_descsize = 0x2000;
+	base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize);
+	if (!base)
 		return -ENOMEM;
+	ring->alloc_descbase = base;
+	ring->alloc_dmabase = dmaaddr;
+
+	if (is_8k_aligned(dmaaddr)) {
+		/* We're already 8k aligned. That Ok, too. */
+		ring->descbase = base;
+		ring->dmabase = dmaaddr;
+		return 0;
 	}
-	memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
+	/* Force-align it to 8k */
+	ring->descbase = (void *)((u8 *)base + 0x1000);
+	ring->dmabase = dmaaddr + 0x1000;
+	B43_WARN_ON(!is_8k_aligned(ring->dmabase));
 
 	return 0;
 }
 
 static void free_ringmemory(struct b43_dmaring *ring)
 {
-	gfp_t flags = GFP_KERNEL;
-
-	if (ring->type == B43_DMA_64BIT)
-		flags |= GFP_DMA;
-
-	ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE,
-				ring->descbase, ring->dmabase, flags);
+	b43_unmap_and_free_ringmem(ring, ring->alloc_descbase,
+				   ring->alloc_dmabase, ring->alloc_descsize);
 }
 
 /* Reset the RX DMA channel */
@@ -530,29 +646,14 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
 	if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr)))
 		return 1;
 
-	switch (ring->type) {
-	case B43_DMA_30BIT:
-		if ((u64)addr + buffersize > (1ULL << 30))
-			goto address_error;
-		break;
-	case B43_DMA_32BIT:
-		if ((u64)addr + buffersize > (1ULL << 32))
-			goto address_error;
-		break;
-	case B43_DMA_64BIT:
-		/* Currently we can't have addresses beyond
-		 * 64bit in the kernel. */
-		break;
+	if (!b43_dma_address_ok(ring, addr, buffersize)) {
+		/* We can't support this address. Unmap it again. */
+		unmap_descbuffer(ring, addr, buffersize, dma_to_device);
+		return 1;
 	}
 
 	/* The address is OK. */
 	return 0;
-
-address_error:
-	/* We can't support this address. Unmap it again. */
-	unmap_descbuffer(ring, addr, buffersize, dma_to_device);
-
-	return 1;
 }
 
 static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
@@ -614,6 +715,9 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
 	meta->dmaaddr = dmaaddr;
 	ring->ops->fill_descriptor(ring, desc, dmaaddr,
 				   ring->rx_buffersize, 0, 0, 0);
+	ssb_dma_sync_single_for_device(ring->dev->dev,
+				       ring->alloc_dmabase,
+				       ring->alloc_descsize, DMA_TO_DEVICE);
 
 	return 0;
 }
@@ -770,7 +874,7 @@ static void free_all_descbuffers(struct b43_dmaring *ring)
 	for (i = 0; i < ring->nr_slots; i++) {
 		desc = ring->ops->idx2desc(ring, i, &meta);
 
-		if (!meta->skb) {
+		if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) {
 			B43_WARN_ON(!ring->tx);
 			continue;
 		}
@@ -822,7 +926,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
 				      enum b43_dmatype type)
 {
 	struct b43_dmaring *ring;
-	int err;
+	int i, err;
 	dma_addr_t dma_test;
 
 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
@@ -837,6 +941,8 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
 			     GFP_KERNEL);
 	if (!ring->meta)
 		goto err_kfree_ring;
+	for (i = 0; i < ring->nr_slots; i++)
+		ring->meta->skb = B43_DMA_PTR_POISON;
 
 	ring->type = type;
 	ring->dev = dev;
@@ -1147,28 +1253,29 @@ struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
 	case 0x5000:
 		ring = dma->tx_ring_mcast;
 		break;
-	default:
-		B43_WARN_ON(1);
 	}
 	*slot = (cookie & 0x0FFF);
-	B43_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
+	if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) {
+		b43dbg(dev->wl, "TX-status contains "
+		       "invalid cookie: 0x%04X\n", cookie);
+		return NULL;
+	}
 
 	return ring;
 }
 
 static int dma_tx_fragment(struct b43_dmaring *ring,
-			   struct sk_buff **in_skb)
+			   struct sk_buff *skb)
 {
-	struct sk_buff *skb = *in_skb;
 	const struct b43_dma_ops *ops = ring->ops;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info);
 	u8 *header;
 	int slot, old_top_slot, old_used_slots;
 	int err;
 	struct b43_dmadesc_generic *desc;
 	struct b43_dmadesc_meta *meta;
 	struct b43_dmadesc_meta *meta_hdr;
-	struct sk_buff *bounce_skb;
 	u16 cookie;
 	size_t hdrsize = b43_txhdr_size(ring->dev);
 
@@ -1212,34 +1319,28 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
 
 	meta->skb = skb;
 	meta->is_last_fragment = 1;
+	priv_info->bouncebuffer = NULL;
 
 	meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
 	/* create a bounce buffer in zone_dma on mapping failure. */
 	if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
-		bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
-		if (!bounce_skb) {
+		priv_info->bouncebuffer = kmalloc(skb->len, GFP_ATOMIC | GFP_DMA);
+		if (!priv_info->bouncebuffer) {
 			ring->current_slot = old_top_slot;
 			ring->used_slots = old_used_slots;
 			err = -ENOMEM;
 			goto out_unmap_hdr;
 		}
+		memcpy(priv_info->bouncebuffer, skb->data, skb->len);
 
-		memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
-		memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb));
-		bounce_skb->dev = skb->dev;
-		skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb));
-		info = IEEE80211_SKB_CB(bounce_skb);
-
-		dev_kfree_skb_any(skb);
-		skb = bounce_skb;
-		*in_skb = bounce_skb;
-		meta->skb = skb;
-		meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
+		meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
 		if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
+			kfree(priv_info->bouncebuffer);
+			priv_info->bouncebuffer = NULL;
 			ring->current_slot = old_top_slot;
 			ring->used_slots = old_used_slots;
 			err = -EIO;
-			goto out_free_bounce;
+			goto out_unmap_hdr;
 		}
 	}
 
@@ -1253,11 +1354,12 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
 	}
 	/* Now transfer the whole frame. */
 	wmb();
+	ssb_dma_sync_single_for_device(ring->dev->dev,
+				       ring->alloc_dmabase,
+				       ring->alloc_descsize, DMA_TO_DEVICE);
 	ops->poke_tx(ring, next_slot(ring, slot));
 	return 0;
 
-out_free_bounce:
-	dev_kfree_skb_any(skb);
 out_unmap_hdr:
 	unmap_descbuffer(ring, meta_hdr->dmaaddr,
 			 hdrsize, 1);
@@ -1362,11 +1464,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
 	 * static, so we don't need to store it per frame. */
 	ring->queue_prio = skb_get_queue_mapping(skb);
 
-	/* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing
-	 * into the skb data or cb now. */
-	hdr = NULL;
-	info = NULL;
-	err = dma_tx_fragment(ring, &skb);
+	err = dma_tx_fragment(ring, skb);
 	if (unlikely(err == -ENOKEY)) {
 		/* Drop this packet, as we don't have the encryption key
 		 * anymore and must not transmit it unencrypted. */
@@ -1400,30 +1498,63 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
 	struct b43_dmaring *ring;
 	struct b43_dmadesc_generic *desc;
 	struct b43_dmadesc_meta *meta;
-	int slot;
+	int slot, firstused;
 	bool frame_succeed;
 
 	ring = parse_cookie(dev, status->cookie, &slot);
 	if (unlikely(!ring))
 		return;
-
 	B43_WARN_ON(!ring->tx);
+
+	/* Sanity check: TX packets are processed in-order on one ring.
+	 * Check if the slot deduced from the cookie really is the first
+	 * used slot. */
+	firstused = ring->current_slot - ring->used_slots + 1;
+	if (firstused < 0)
+		firstused = ring->nr_slots + firstused;
+	if (unlikely(slot != firstused)) {
+		/* This possibly is a firmware bug and will result in
+		 * malfunction, memory leaks and/or stall of DMA functionality. */
+		b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
+		       "Expected %d, but got %d\n",
+		       ring->index, firstused, slot);
+		return;
+	}
+
 	ops = ring->ops;
 	while (1) {
-		B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
+		B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
 		desc = ops->idx2desc(ring, slot, &meta);
 
-		if (meta->skb)
-			unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len,
-					 1);
-		else
+		if (b43_dma_ptr_is_poisoned(meta->skb)) {
+			b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
+			       "on ring %d\n",
+			       slot, firstused, ring->index);
+			break;
+		}
+		if (meta->skb) {
+			struct b43_private_tx_info *priv_info =
+				b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
+
+			unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
+			kfree(priv_info->bouncebuffer);
+			priv_info->bouncebuffer = NULL;
+		} else {
 			unmap_descbuffer(ring, meta->dmaaddr,
 					 b43_txhdr_size(dev), 1);
+		}
 
 		if (meta->is_last_fragment) {
 			struct ieee80211_tx_info *info;
 
-			BUG_ON(!meta->skb);
+			if (unlikely(!meta->skb)) {
+				/* This is a scatter-gather fragment of a frame, so
+				 * the skb pointer must not be NULL. */
+				b43dbg(dev->wl, "TX status unexpected NULL skb "
+				       "at slot %d (first=%d) on ring %d\n",
+				       slot, firstused, ring->index);
+				break;
+			}
 
 			info = IEEE80211_SKB_CB(meta->skb);
 
@@ -1441,20 +1572,29 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
 #endif /* DEBUG */
 			ieee80211_tx_status(dev->wl->hw, meta->skb);
 
-			/* skb is freed by ieee80211_tx_status() */
-			meta->skb = NULL;
+			/* skb will be freed by ieee80211_tx_status().
+			 * Poison our pointer. */
+			meta->skb = B43_DMA_PTR_POISON;
 		} else {
 			/* No need to call free_descriptor_buffer here, as
 			 * this is only the txhdr, which is not allocated.
 			 */
-			B43_WARN_ON(meta->skb);
+			if (unlikely(meta->skb)) {
+				b43dbg(dev->wl, "TX status unexpected non-NULL skb "
+				       "at slot %d (first=%d) on ring %d\n",
+				       slot, firstused, ring->index);
+				break;
+			}
 		}
 
 		/* Everything unmapped and free'd. So it's not used anymore. */
 		ring->used_slots--;
 
-		if (meta->is_last_fragment)
+		if (meta->is_last_fragment) {
+			/* This is the last scatter-gather
+			 * fragment of the frame. We are done. */
 			break;
+		}
 		slot = next_slot(ring, slot);
 	}
 	if (ring->stopped) {
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index f0b0838fb5ba..e607b392314c 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -1,7 +1,7 @@
 #ifndef B43_DMA_H_
 #define B43_DMA_H_
 
-#include <linux/ieee80211.h>
+#include <linux/err.h>
 
 #include "b43.h"
 
@@ -157,7 +157,6 @@ struct b43_dmadesc_generic {
 } __attribute__ ((__packed__));
 
 /* Misc DMA constants */
-#define B43_DMA_RINGMEMSIZE		PAGE_SIZE
 #define B43_DMA0_RX_FRAMEOFFSET		30
 
 /* DMA engine tuning knobs */
@@ -165,6 +164,10 @@ struct b43_dmadesc_generic {
 #define B43_RXRING_SLOTS		64
 #define B43_DMA0_RX_BUFFERSIZE		IEEE80211_MAX_FRAME_LEN
 
+/* Pointer poison */
+#define B43_DMA_PTR_POISON		((void *)ERR_PTR(-ENOMEM))
+#define b43_dma_ptr_is_poisoned(ptr)	(unlikely((ptr) == B43_DMA_PTR_POISON))
+
 
 struct sk_buff;
 struct b43_private;
@@ -243,6 +246,12 @@ struct b43_dmaring {
 	/* The QOS priority assigned to this ring. Only used for TX rings.
 	 * This is the mac80211 "queue" value. */
 	u8 queue_prio;
+	/* Pointers and size of the originally allocated and mapped memory
+	 * region for the descriptor ring. */
+	void *alloc_descbase;
+	dma_addr_t alloc_dmabase;
+	unsigned int alloc_descsize;
+	/* Pointer to our wireless device. */
 	struct b43_wldev *dev;
 #ifdef CONFIG_B43_DEBUG
 	/* Maximum number of used slots. */
diff --git a/drivers/net/wireless/b43/leds.c b/drivers/net/wireless/b43/leds.c
index 1e8dba488004..c587115dd2b9 100644
--- a/drivers/net/wireless/b43/leds.c
+++ b/drivers/net/wireless/b43/leds.c
@@ -246,6 +246,7 @@ static void b43_led_get_sprominfo(struct b43_wldev *dev,
 			*behaviour = B43_LED_OFF;
 			break;
 		default:
+			*behaviour = B43_LED_OFF;
 			B43_WARN_ON(1);
 			return;
 		}
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 098dda1a67c1..077480c4916a 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -3573,7 +3573,7 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
 	if (conf->channel->hw_value != phy->channel)
 		b43_switch_channel(dev, conf->channel->hw_value);
 
-	dev->wl->radiotap_enabled = !!(conf->flags & IEEE80211_CONF_RADIOTAP);
+	dev->wl->radiotap_enabled = !!(conf->flags & IEEE80211_CONF_MONITOR);
 
 	/* Adjust the desired TX power level. */
 	if (conf->power_level != 0) {
@@ -4669,7 +4669,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
 {
 	struct b43_wl *wl = dev->wl;
 	struct ssb_bus *bus = dev->dev->bus;
-	struct pci_dev *pdev = bus->host_pci;
+	struct pci_dev *pdev = (bus->bustype == SSB_BUSTYPE_PCI) ? bus->host_pci : NULL;
 	int err;
 	bool have_2ghz_phy = 0, have_5ghz_phy = 0;
 	u32 tmp;
@@ -4802,7 +4802,7 @@ static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl)
 
 	if (!list_empty(&wl->devlist)) {
 		/* We are not the first core on this chip. */
-		pdev = dev->bus->host_pci;
+		pdev = (dev->bus->bustype == SSB_BUSTYPE_PCI) ? dev->bus->host_pci : NULL;
 		/* Only special chips support more than one wireless
 		 * core, although some of the other chips have more than
 		 * one wireless core as well. Check for this and
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
index 1e318d815a5b..3e046ec1ff86 100644
--- a/drivers/net/wireless/b43/phy_lp.c
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -67,6 +67,7 @@ static void b43_lpphy_op_prepare_structs(struct b43_wldev *dev)
 	struct b43_phy_lp *lpphy = phy->lp;
 
 	memset(lpphy, 0, sizeof(*lpphy));
+	lpphy->antenna = B43_ANTENNA_DEFAULT;
 
 	//TODO
 }
@@ -751,11 +752,17 @@ static void lpphy_clear_deaf(struct b43_wldev *dev, bool user)
 	}
 }
 
+static void lpphy_set_trsw_over(struct b43_wldev *dev, bool tx, bool rx)
+{
+	u16 trsw = (tx << 1) | rx;
+	b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFFC, trsw);
+	b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x3);
+}
+
 static void lpphy_disable_crs(struct b43_wldev *dev, bool user)
 {
 	lpphy_set_deaf(dev, user);
-	b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFFC, 0x1);
-	b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x3);
+	lpphy_set_trsw_over(dev, false, true);
 	b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFFB);
 	b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x4);
 	b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFF7);
@@ -790,6 +797,60 @@ static void lpphy_restore_crs(struct b43_wldev *dev, bool user)
 
 struct lpphy_tx_gains { u16 gm, pga, pad, dac; };
 
+static void lpphy_disable_rx_gain_override(struct b43_wldev *dev)
+{
+	b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFFE);
+	b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFEF);
+	b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFBF);
+	if (dev->phy.rev >= 2) {
+		b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFEFF);
+		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+			b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFBFF);
+			b43_phy_mask(dev, B43_PHY_OFDM(0xE5), 0xFFF7);
+		}
+	} else {
+		b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFDFF);
+	}
+}
+
+static void lpphy_enable_rx_gain_override(struct b43_wldev *dev)
+{
+	b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x1);
+	b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x10);
+	b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x40);
+	if (dev->phy.rev >= 2) {
+		b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x100);
+		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+			b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x400);
+			b43_phy_set(dev, B43_PHY_OFDM(0xE5), 0x8);
+		}
+	} else {
+		b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x200);
+	}
+}
+
+static void lpphy_disable_tx_gain_override(struct b43_wldev *dev)
+{
+	if (dev->phy.rev < 2)
+		b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFEFF);
+	else {
+		b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFF7F);
+		b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xBFFF);
+	}
+	b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVR, 0xFFBF);
+}
+
+static void lpphy_enable_tx_gain_override(struct b43_wldev *dev)
+{
+	if (dev->phy.rev < 2)
+		b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x100);
+	else {
+		b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x80);
+		b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x4000);
+	}
+	b43_phy_set(dev, B43_LPPHY_AFE_CTL_OVR, 0x40);
+}
+
 static struct lpphy_tx_gains lpphy_get_tx_gains(struct b43_wldev *dev)
 {
 	struct lpphy_tx_gains gains;
@@ -819,6 +880,17 @@ static void lpphy_set_dac_gain(struct b43_wldev *dev, u16 dac)
 	b43_phy_maskset(dev, B43_LPPHY_AFE_DAC_CTL, 0xF000, ctl);
 }
 
+static u16 lpphy_get_pa_gain(struct b43_wldev *dev)
+{
+	return b43_phy_read(dev, B43_PHY_OFDM(0xFB)) & 0x7F;
+}
+
+static void lpphy_set_pa_gain(struct b43_wldev *dev, u16 gain)
+{
+	b43_phy_maskset(dev, B43_PHY_OFDM(0xFB), 0xE03F, gain << 6);
+	b43_phy_maskset(dev, B43_PHY_OFDM(0xFD), 0x80FF, gain << 8);
+}
+
 static void lpphy_set_tx_gains(struct b43_wldev *dev,
 			       struct lpphy_tx_gains gains)
 {
@@ -829,25 +901,22 @@ static void lpphy_set_tx_gains(struct b43_wldev *dev,
 		b43_phy_maskset(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL,
 				0xF800, rf_gain);
 	} else {
-		pa_gain = b43_phy_read(dev, B43_PHY_OFDM(0xFB)) & 0x1FC0;
-		pa_gain <<= 2;
+		pa_gain = lpphy_get_pa_gain(dev);
 		b43_phy_write(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL,
 			      (gains.pga << 8) | gains.gm);
+		/*
+		 * SPEC FIXME The spec calls for (pa_gain << 8) here, but that
+		 * conflicts with the spec for set_pa_gain! Vendor driver bug?
+		 */
 		b43_phy_maskset(dev, B43_PHY_OFDM(0xFB),
-				0x8000, gains.pad | pa_gain);
+				0x8000, gains.pad | (pa_gain << 6));
 		b43_phy_write(dev, B43_PHY_OFDM(0xFC),
 			      (gains.pga << 8) | gains.gm);
 		b43_phy_maskset(dev, B43_PHY_OFDM(0xFD),
-				0x8000, gains.pad | pa_gain);
+				0x8000, gains.pad | (pa_gain << 8));
 	}
 	lpphy_set_dac_gain(dev, gains.dac);
-	if (dev->phy.rev < 2) {
-		b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFEFF, 1 << 8);
-	} else {
-		b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFF7F, 1 << 7);
-		b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2, 0xBFFF, 1 << 14);
-	}
-	b43_phy_maskset(dev, B43_LPPHY_AFE_CTL_OVR, 0xFFBF, 1 << 6);
+	lpphy_enable_tx_gain_override(dev);
 }
 
 static void lpphy_rev0_1_set_rx_gain(struct b43_wldev *dev, u32 gain)
@@ -887,38 +956,6 @@ static void lpphy_rev2plus_set_rx_gain(struct b43_wldev *dev, u32 gain)
 	}
 }
 
-static void lpphy_disable_rx_gain_override(struct b43_wldev *dev)
-{
-	b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFFE);
-	b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFEF);
-	b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFBF);
-	if (dev->phy.rev >= 2) {
-		b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFEFF);
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
-			b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFBFF);
-			b43_phy_mask(dev, B43_PHY_OFDM(0xE5), 0xFFF7);
-		}
-	} else {
-		b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFDFF);
-	}
-}
-
-static void lpphy_enable_rx_gain_override(struct b43_wldev *dev)
-{
-	b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x1);
-	b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x10);
-	b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x40);
-	if (dev->phy.rev >= 2) {
-		b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x100);
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
-			b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x400);
-			b43_phy_set(dev, B43_PHY_OFDM(0xE5), 0x8);
-		}
-	} else {
-		b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x200);
-	}
-}
-
 static void lpphy_set_rx_gain(struct b43_wldev *dev, u32 gain)
 {
 	if (dev->phy.rev < 2)
@@ -1003,8 +1040,7 @@ static int lpphy_loopback(struct b43_wldev *dev)
 
 	memset(&iq_est, 0, sizeof(iq_est));
 
-	b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFFC, 0x3);
-	b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x3);
+	lpphy_set_trsw_over(dev, true, true);
 	b43_phy_set(dev, B43_LPPHY_AFE_CTL_OVR, 1);
 	b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0xFFFE);
 	b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x800);
@@ -1126,7 +1162,7 @@ static void lpphy_set_tx_power_control(struct b43_wldev *dev,
 			b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_NNUM,
 					0x8FFF, ((u16)lpphy->tssi_npt << 16));
 			//TODO Set "TSSI Transmit Count" variable to total transmitted frame count
-			//TODO Disable TX gain override
+			lpphy_disable_tx_gain_override(dev);
 			lpphy->tx_pwr_idx_over = -1;
 		}
 	}
@@ -1312,15 +1348,73 @@ static void lpphy_calibrate_rc(struct b43_wldev *dev)
 	}
 }
 
+static void b43_lpphy_op_set_rx_antenna(struct b43_wldev *dev, int antenna)
+{
+	if (dev->phy.rev >= 2)
+		return; // rev2+ doesn't support antenna diversity
+
+	if (B43_WARN_ON(antenna > B43_ANTENNA_AUTO1))
+		return;
+
+	b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_ANTDIVHELP);
+
+	b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFFFD, antenna & 0x2);
+	b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFFFE, antenna & 0x1);
+
+	b43_hf_write(dev, b43_hf_read(dev) | B43_HF_ANTDIVHELP);
+
+	dev->phy.lp->antenna = antenna;
+}
+
+static void lpphy_set_tx_iqcc(struct b43_wldev *dev, u16 a, u16 b)
+{
+	u16 tmp[2];
+
+	tmp[0] = a;
+	tmp[1] = b;
+	b43_lptab_write_bulk(dev, B43_LPTAB16(0, 80), 2, tmp);
+}
+
 static void lpphy_set_tx_power_by_index(struct b43_wldev *dev, u8 index)
 {
 	struct b43_phy_lp *lpphy = dev->phy.lp;
+	struct lpphy_tx_gains gains;
+	u32 iq_comp, tx_gain, coeff, rf_power;
 
 	lpphy->tx_pwr_idx_over = index;
+	lpphy_read_tx_pctl_mode_from_hardware(dev);
 	if (lpphy->txpctl_mode != B43_LPPHY_TXPCTL_OFF)
 		lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_SW);
-
-	//TODO
+	if (dev->phy.rev >= 2) {
+		iq_comp = b43_lptab_read(dev, B43_LPTAB32(7, index + 320));
+		tx_gain = b43_lptab_read(dev, B43_LPTAB32(7, index + 192));
+		gains.pad = (tx_gain >> 16) & 0xFF;
+		gains.gm = tx_gain & 0xFF;
+		gains.pga = (tx_gain >> 8) & 0xFF;
+		gains.dac = (iq_comp >> 28) & 0xFF;
+		lpphy_set_tx_gains(dev, gains);
+	} else {
+		iq_comp = b43_lptab_read(dev, B43_LPTAB32(10, index + 320));
+		tx_gain = b43_lptab_read(dev, B43_LPTAB32(10, index + 192));
+		b43_phy_maskset(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL,
+				0xF800, (tx_gain >> 4) & 0x7FFF);
+		lpphy_set_dac_gain(dev, tx_gain & 0x7);
+		lpphy_set_pa_gain(dev, (tx_gain >> 24) & 0x7F);
+	}
+	lpphy_set_bb_mult(dev, (iq_comp >> 20) & 0xFF);
+	lpphy_set_tx_iqcc(dev, (iq_comp >> 10) & 0x3FF, iq_comp & 0x3FF);
+	if (dev->phy.rev >= 2) {
+		coeff = b43_lptab_read(dev, B43_LPTAB32(7, index + 448));
+	} else {
+		coeff = b43_lptab_read(dev, B43_LPTAB32(10, index + 448));
+	}
+	b43_lptab_write(dev, B43_LPTAB16(0, 85), coeff & 0xFFFF);
+	if (dev->phy.rev >= 2) {
+		rf_power = b43_lptab_read(dev, B43_LPTAB32(7, index + 576));
+		b43_phy_maskset(dev, B43_LPPHY_RF_PWR_OVERRIDE, 0xFF00,
+				rf_power & 0xFFFF);//SPEC FIXME mask & set != 0
+	}
+	lpphy_enable_tx_gain_override(dev);
 }
 
 static void lpphy_btcoex_override(struct b43_wldev *dev)
@@ -1329,58 +1423,45 @@ static void lpphy_btcoex_override(struct b43_wldev *dev)
 	b43_write16(dev, B43_MMIO_BTCOEX_TXCTL, 0xFF);
 }
 
-static void lpphy_pr41573_workaround(struct b43_wldev *dev)
+static void b43_lpphy_op_software_rfkill(struct b43_wldev *dev,
+					 bool blocked)
 {
-	struct b43_phy_lp *lpphy = dev->phy.lp;
-	u32 *saved_tab;
-	const unsigned int saved_tab_size = 256;
-	enum b43_lpphy_txpctl_mode txpctl_mode;
-	s8 tx_pwr_idx_over;
-	u16 tssi_npt, tssi_idx;
-
-	saved_tab = kcalloc(saved_tab_size, sizeof(saved_tab[0]), GFP_KERNEL);
-	if (!saved_tab) {
-		b43err(dev->wl, "PR41573 failed. Out of memory!\n");
-		return;
-	}
-
-	lpphy_read_tx_pctl_mode_from_hardware(dev);
-	txpctl_mode = lpphy->txpctl_mode;
-	tx_pwr_idx_over = lpphy->tx_pwr_idx_over;
-	tssi_npt = lpphy->tssi_npt;
-	tssi_idx = lpphy->tssi_idx;
-
-	if (dev->phy.rev < 2) {
-		b43_lptab_read_bulk(dev, B43_LPTAB32(10, 0x140),
-				    saved_tab_size, saved_tab);
+	//TODO check MAC control register
+	if (blocked) {
+		if (dev->phy.rev >= 2) {
+			b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0x83FF);
+			b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x1F00);
+			b43_phy_mask(dev, B43_LPPHY_AFE_DDFS, 0x80FF);
+			b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xDFFF);
+			b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x0808);
+		} else {
+			b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xE0FF);
+			b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x1F00);
+			b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xFCFF);
+			b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x0018);
+		}
 	} else {
-		b43_lptab_read_bulk(dev, B43_LPTAB32(7, 0x140),
-				    saved_tab_size, saved_tab);
+		b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xE0FF);
+		if (dev->phy.rev >= 2)
+			b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xF7F7);
+		else
+			b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFFE7);
 	}
-	//TODO
-
-	kfree(saved_tab);
 }
 
-static void lpphy_calibration(struct b43_wldev *dev)
+/* This was previously called lpphy_japan_filter */
+static void lpphy_set_analog_filter(struct b43_wldev *dev, int channel)
 {
 	struct b43_phy_lp *lpphy = dev->phy.lp;
-	enum b43_lpphy_txpctl_mode saved_pctl_mode;
-
-	b43_mac_suspend(dev);
-
-	lpphy_btcoex_override(dev);
-	lpphy_read_tx_pctl_mode_from_hardware(dev);
-	saved_pctl_mode = lpphy->txpctl_mode;
-	lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
-	//TODO Perform transmit power table I/Q LO calibration
-	if ((dev->phy.rev == 0) && (saved_pctl_mode != B43_LPPHY_TXPCTL_OFF))
-		lpphy_pr41573_workaround(dev);
-	//TODO If a full calibration has not been performed on this channel yet, perform PAPD TX-power calibration
-	lpphy_set_tx_power_control(dev, saved_pctl_mode);
-	//TODO Perform I/Q calibration with a single control value set
+	u16 tmp = (channel == 14); //SPEC FIXME check japanwidefilter!
 
-	b43_mac_enable(dev);
+	if (dev->phy.rev < 2) { //SPEC FIXME Isn't this rev0/1-specific?
+		b43_phy_maskset(dev, B43_LPPHY_LP_PHY_CTL, 0xFCFF, tmp << 9);
+		if ((dev->phy.rev == 1) && (lpphy->rc_cap))
+			lpphy_set_rc_cap(dev);
+	} else {
+		b43_radio_write(dev, B2063_TX_BB_SP3, 0x3F);
+	}
 }
 
 static void lpphy_set_tssi_mux(struct b43_wldev *dev, enum tssi_mux_mode mode)
@@ -1489,6 +1570,473 @@ static void lpphy_tx_pctl_init(struct b43_wldev *dev)
 	}
 }
 
+static void lpphy_pr41573_workaround(struct b43_wldev *dev)
+{
+	struct b43_phy_lp *lpphy = dev->phy.lp;
+	u32 *saved_tab;
+	const unsigned int saved_tab_size = 256;
+	enum b43_lpphy_txpctl_mode txpctl_mode;
+	s8 tx_pwr_idx_over;
+	u16 tssi_npt, tssi_idx;
+
+	saved_tab = kcalloc(saved_tab_size, sizeof(saved_tab[0]), GFP_KERNEL);
+	if (!saved_tab) {
+		b43err(dev->wl, "PR41573 failed. Out of memory!\n");
+		return;
+	}
+
+	lpphy_read_tx_pctl_mode_from_hardware(dev);
+	txpctl_mode = lpphy->txpctl_mode;
+	tx_pwr_idx_over = lpphy->tx_pwr_idx_over;
+	tssi_npt = lpphy->tssi_npt;
+	tssi_idx = lpphy->tssi_idx;
+
+	if (dev->phy.rev < 2) {
+		b43_lptab_read_bulk(dev, B43_LPTAB32(10, 0x140),
+				    saved_tab_size, saved_tab);
+	} else {
+		b43_lptab_read_bulk(dev, B43_LPTAB32(7, 0x140),
+				    saved_tab_size, saved_tab);
+	}
+	//FIXME PHY reset
+	lpphy_table_init(dev); //FIXME is table init needed?
+	lpphy_baseband_init(dev);
+	lpphy_tx_pctl_init(dev);
+	b43_lpphy_op_software_rfkill(dev, false);
+	lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
+	if (dev->phy.rev < 2) {
+		b43_lptab_write_bulk(dev, B43_LPTAB32(10, 0x140),
+				     saved_tab_size, saved_tab);
+	} else {
+		b43_lptab_write_bulk(dev, B43_LPTAB32(7, 0x140),
+				     saved_tab_size, saved_tab);
+	}
+	b43_write16(dev, B43_MMIO_CHANNEL, lpphy->channel);
+	lpphy->tssi_npt = tssi_npt;
+	lpphy->tssi_idx = tssi_idx;
+	lpphy_set_analog_filter(dev, lpphy->channel);
+	if (tx_pwr_idx_over != -1)
+		lpphy_set_tx_power_by_index(dev, tx_pwr_idx_over);
+	if (lpphy->rc_cap)
+		lpphy_set_rc_cap(dev);
+	b43_lpphy_op_set_rx_antenna(dev, lpphy->antenna);
+	lpphy_set_tx_power_control(dev, txpctl_mode);
+	kfree(saved_tab);
+}
+
+struct lpphy_rx_iq_comp { u8 chan; s8 c1, c0; };
+
+static const struct lpphy_rx_iq_comp lpphy_5354_iq_table[] = {
+	{ .chan = 1, .c1 = -66, .c0 = 15, },
+	{ .chan = 2, .c1 = -66, .c0 = 15, },
+	{ .chan = 3, .c1 = -66, .c0 = 15, },
+	{ .chan = 4, .c1 = -66, .c0 = 15, },
+	{ .chan = 5, .c1 = -66, .c0 = 15, },
+	{ .chan = 6, .c1 = -66, .c0 = 15, },
+	{ .chan = 7, .c1 = -66, .c0 = 14, },
+	{ .chan = 8, .c1 = -66, .c0 = 14, },
+	{ .chan = 9, .c1 = -66, .c0 = 14, },
+	{ .chan = 10, .c1 = -66, .c0 = 14, },
+	{ .chan = 11, .c1 = -66, .c0 = 14, },
+	{ .chan = 12, .c1 = -66, .c0 = 13, },
+	{ .chan = 13, .c1 = -66, .c0 = 13, },
+	{ .chan = 14, .c1 = -66, .c0 = 13, },
+};
+
+static const struct lpphy_rx_iq_comp lpphy_rev0_1_iq_table[] = {
+	{ .chan = 1, .c1 = -64, .c0 = 13, },
+	{ .chan = 2, .c1 = -64, .c0 = 13, },
+	{ .chan = 3, .c1 = -64, .c0 = 13, },
+	{ .chan = 4, .c1 = -64, .c0 = 13, },
+	{ .chan = 5, .c1 = -64, .c0 = 12, },
+	{ .chan = 6, .c1 = -64, .c0 = 12, },
+	{ .chan = 7, .c1 = -64, .c0 = 12, },
+	{ .chan = 8, .c1 = -64, .c0 = 12, },
+	{ .chan = 9, .c1 = -64, .c0 = 12, },
+	{ .chan = 10, .c1 = -64, .c0 = 11, },
+	{ .chan = 11, .c1 = -64, .c0 = 11, },
+	{ .chan = 12, .c1 = -64, .c0 = 11, },
+	{ .chan = 13, .c1 = -64, .c0 = 11, },
+	{ .chan = 14, .c1 = -64, .c0 = 10, },
+	{ .chan = 34, .c1 = -62, .c0 = 24, },
+	{ .chan = 38, .c1 = -62, .c0 = 24, },
+	{ .chan = 42, .c1 = -62, .c0 = 24, },
+	{ .chan = 46, .c1 = -62, .c0 = 23, },
+	{ .chan = 36, .c1 = -62, .c0 = 24, },
+	{ .chan = 40, .c1 = -62, .c0 = 24, },
+	{ .chan = 44, .c1 = -62, .c0 = 23, },
+	{ .chan = 48, .c1 = -62, .c0 = 23, },
+	{ .chan = 52, .c1 = -62, .c0 = 23, },
+	{ .chan = 56, .c1 = -62, .c0 = 22, },
+	{ .chan = 60, .c1 = -62, .c0 = 22, },
+	{ .chan = 64, .c1 = -62, .c0 = 22, },
+	{ .chan = 100, .c1 = -62, .c0 = 16, },
+	{ .chan = 104, .c1 = -62, .c0 = 16, },
+	{ .chan = 108, .c1 = -62, .c0 = 15, },
+	{ .chan = 112, .c1 = -62, .c0 = 14, },
+	{ .chan = 116, .c1 = -62, .c0 = 14, },
+	{ .chan = 120, .c1 = -62, .c0 = 13, },
+	{ .chan = 124, .c1 = -62, .c0 = 12, },
+	{ .chan = 128, .c1 = -62, .c0 = 12, },
+	{ .chan = 132, .c1 = -62, .c0 = 12, },
+	{ .chan = 136, .c1 = -62, .c0 = 11, },
+	{ .chan = 140, .c1 = -62, .c0 = 10, },
+	{ .chan = 149, .c1 = -61, .c0 = 9, },
+	{ .chan = 153, .c1 = -61, .c0 = 9, },
+	{ .chan = 157, .c1 = -61, .c0 = 9, },
+	{ .chan = 161, .c1 = -61, .c0 = 8, },
+	{ .chan = 165, .c1 = -61, .c0 = 8, },
+	{ .chan = 184, .c1 = -62, .c0 = 25, },
+	{ .chan = 188, .c1 = -62, .c0 = 25, },
+	{ .chan = 192, .c1 = -62, .c0 = 25, },
+	{ .chan = 196, .c1 = -62, .c0 = 25, },
+	{ .chan = 200, .c1 = -62, .c0 = 25, },
+	{ .chan = 204, .c1 = -62, .c0 = 25, },
+	{ .chan = 208, .c1 = -62, .c0 = 25, },
+	{ .chan = 212, .c1 = -62, .c0 = 25, },
+	{ .chan = 216, .c1 = -62, .c0 = 26, },
+};
+
+static const struct lpphy_rx_iq_comp lpphy_rev2plus_iq_comp = {
+	.chan = 0,
+	.c1 = -64,
+	.c0 = 0,
+};
+
+static u8 lpphy_nbits(s32 val)
+{
+	u32 tmp = abs(val);
+	u8 nbits = 0;
+
+	while (tmp != 0) {
+		nbits++;
+		tmp >>= 1;
+	}
+
+	return nbits;
+}
+
+static int lpphy_calc_rx_iq_comp(struct b43_wldev *dev, u16 samples)
+{
+	struct lpphy_iq_est iq_est;
+	u16 c0, c1;
+	int prod, ipwr, qpwr, prod_msb, q_msb, tmp1, tmp2, tmp3, tmp4, ret;
+
+	c1 = b43_phy_read(dev, B43_LPPHY_RX_COMP_COEFF_S);
+	c0 = c1 >> 8;
+	c1 |= 0xFF;
+
+	b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S, 0xFF00, 0x00C0);
+	b43_phy_mask(dev, B43_LPPHY_RX_COMP_COEFF_S, 0x00FF);
+
+	ret = lpphy_rx_iq_est(dev, samples, 32, &iq_est);
+	if (!ret)
+		goto out;
+
+	prod = iq_est.iq_prod;
+	ipwr = iq_est.i_pwr;
+	qpwr = iq_est.q_pwr;
+
+	if (ipwr + qpwr < 2) {
+		ret = 0;
+		goto out;
+	}
+
+	prod_msb = lpphy_nbits(prod);
+	q_msb = lpphy_nbits(qpwr);
+	tmp1 = prod_msb - 20;
+
+	if (tmp1 >= 0) {
+		tmp3 = ((prod << (30 - prod_msb)) + (ipwr >> (1 + tmp1))) /
+			(ipwr >> tmp1);
+	} else {
+		tmp3 = ((prod << (30 - prod_msb)) + (ipwr << (-1 - tmp1))) /
+			(ipwr << -tmp1);
+	}
+
+	tmp2 = q_msb - 11;
+
+	if (tmp2 >= 0)
+		tmp4 = (qpwr << (31 - q_msb)) / (ipwr >> tmp2);
+	else
+		tmp4 = (qpwr << (31 - q_msb)) / (ipwr << -tmp2);
+
+	tmp4 -= tmp3 * tmp3;
+	tmp4 = -int_sqrt(tmp4);
+
+	c0 = tmp3 >> 3;
+	c1 = tmp4 >> 4;
+
+out:
+	b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S, 0xFF00, c1);
+	b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S, 0x00FF, c0 << 8);
+	return ret;
+}
+
+/* Complex number using 2 32-bit signed integers */
+typedef struct {s32 i, q;} lpphy_c32;
+
+static lpphy_c32 lpphy_cordic(int theta)
+{
+	u32 arctg[] = { 2949120, 1740967, 919879, 466945, 234379, 117304,
+		      58666, 29335, 14668, 7334, 3667, 1833, 917, 458,
+		      229, 115, 57, 29, };
+	int i, tmp, signx = 1, angle = 0;
+	lpphy_c32 ret = { .i = 39797, .q = 0, };
+
+	theta = clamp_t(int, theta, -180, 180);
+
+	if (theta > 90) {
+		theta -= 180;
+		signx = -1;
+	} else if (theta < -90) {
+		theta += 180;
+		signx = -1;
+	}
+
+	for (i = 0; i <= 17; i++) {
+		if (theta > angle) {
+			tmp = ret.i - (ret.q >> i);
+			ret.q += ret.i >> i;
+			ret.i = tmp;
+			angle += arctg[i];
+		} else {
+			tmp = ret.i + (ret.q >> i);
+			ret.q -= ret.i >> i;
+			ret.i = tmp;
+			angle -= arctg[i];
+		}
+	}
+
+	ret.i *= signx;
+	ret.q *= signx;
+
+	return ret;
+}
+
+static void lpphy_run_samples(struct b43_wldev *dev, u16 samples, u16 loops,
+			      u16 wait)
+{
+	b43_phy_maskset(dev, B43_LPPHY_SMPL_PLAY_BUFFER_CTL,
+			0xFFC0, samples - 1);
+	if (loops != 0xFFFF)
+		loops--;
+	b43_phy_maskset(dev, B43_LPPHY_SMPL_PLAY_COUNT, 0xF000, loops);
+	b43_phy_maskset(dev, B43_LPPHY_SMPL_PLAY_BUFFER_CTL, 0x3F, wait << 6);
+	b43_phy_set(dev, B43_LPPHY_A_PHY_CTL_ADDR, 0x1);
+}
+
+//SPEC FIXME what does a negative freq mean?
+static void lpphy_start_tx_tone(struct b43_wldev *dev, s32 freq, u16 max)
+{
+	struct b43_phy_lp *lpphy = dev->phy.lp;
+	u16 buf[64];
+	int i, samples = 0, angle = 0, rotation = (9 * freq) / 500;
+	lpphy_c32 sample;
+
+	lpphy->tx_tone_freq = freq;
+
+	if (freq) {
+		/* Find i for which abs(freq) integrally divides 20000 * i */
+		for (i = 1; samples * abs(freq) != 20000 * i; i++) {
+			samples = (20000 * i) / abs(freq);
+			if(B43_WARN_ON(samples > 63))
+				return;
+		}
+	} else {
+		samples = 2;
+	}
+
+	for (i = 0; i < samples; i++) {
+		sample = lpphy_cordic(angle);
+		angle += rotation;
+		buf[i] = ((sample.i * max) & 0xFF) << 8;
+		buf[i] |= (sample.q * max) & 0xFF;
+	}
+
+	b43_lptab_write_bulk(dev, B43_LPTAB16(5, 0), samples, buf);
+
+	lpphy_run_samples(dev, samples, 0xFFFF, 0);
+}
+
+static void lpphy_stop_tx_tone(struct b43_wldev *dev)
+{
+	struct b43_phy_lp *lpphy = dev->phy.lp;
+	int i;
+
+	lpphy->tx_tone_freq = 0;
+
+	b43_phy_mask(dev, B43_LPPHY_SMPL_PLAY_COUNT, 0xF000);
+	for (i = 0; i < 31; i++) {
+		if (!(b43_phy_read(dev, B43_LPPHY_A_PHY_CTL_ADDR) & 0x1))
+			break;
+		udelay(100);
+	}
+}
+
+
+static void lpphy_papd_cal(struct b43_wldev *dev, struct lpphy_tx_gains gains,
+			   int mode, bool useindex, u8 index)
+{
+	//TODO
+}
+
+static void lpphy_papd_cal_txpwr(struct b43_wldev *dev)
+{
+	struct b43_phy_lp *lpphy = dev->phy.lp;
+	struct ssb_bus *bus = dev->dev->bus;
+	struct lpphy_tx_gains gains, oldgains;
+	int old_txpctl, old_afe_ovr, old_rf, old_bbmult;
+
+	lpphy_read_tx_pctl_mode_from_hardware(dev);
+	old_txpctl = lpphy->txpctl_mode;
+	old_afe_ovr = b43_phy_read(dev, B43_LPPHY_AFE_CTL_OVR) & 0x40;
+	if (old_afe_ovr)
+		oldgains = lpphy_get_tx_gains(dev);
+	old_rf = b43_phy_read(dev, B43_LPPHY_RF_PWR_OVERRIDE) & 0xFF;
+	old_bbmult = lpphy_get_bb_mult(dev);
+
+	lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
+
+	if (bus->chip_id == 0x4325 && bus->chip_rev == 0)
+		lpphy_papd_cal(dev, gains, 0, 1, 30);
+	else
+		lpphy_papd_cal(dev, gains, 0, 1, 65);
+
+	if (old_afe_ovr)
+		lpphy_set_tx_gains(dev, oldgains);
+	lpphy_set_bb_mult(dev, old_bbmult);
+	lpphy_set_tx_power_control(dev, old_txpctl);
+	b43_phy_maskset(dev, B43_LPPHY_RF_PWR_OVERRIDE, 0xFF00, old_rf);
+}
+
+static int lpphy_rx_iq_cal(struct b43_wldev *dev, bool noise, bool tx,
+			    bool rx, bool pa, struct lpphy_tx_gains *gains)
+{
+	struct b43_phy_lp *lpphy = dev->phy.lp;
+	struct ssb_bus *bus = dev->dev->bus;
+	const struct lpphy_rx_iq_comp *iqcomp = NULL;
+	struct lpphy_tx_gains nogains, oldgains;
+	u16 tmp;
+	int i, ret;
+
+	memset(&nogains, 0, sizeof(nogains));
+	memset(&oldgains, 0, sizeof(oldgains));
+
+	if (bus->chip_id == 0x5354) {
+		for (i = 0; i < ARRAY_SIZE(lpphy_5354_iq_table); i++) {
+			if (lpphy_5354_iq_table[i].chan == lpphy->channel) {
+				iqcomp = &lpphy_5354_iq_table[i];
+			}
+		}
+	} else if (dev->phy.rev >= 2) {
+		iqcomp = &lpphy_rev2plus_iq_comp;
+	} else {
+		for (i = 0; i < ARRAY_SIZE(lpphy_rev0_1_iq_table); i++) {
+			if (lpphy_rev0_1_iq_table[i].chan == lpphy->channel) {
+				iqcomp = &lpphy_rev0_1_iq_table[i];
+			}
+		}
+	}
+
+	if (B43_WARN_ON(!iqcomp))
+		return 0;
+
+	b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S, 0xFF00, iqcomp->c1);
+	b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S,
+			0x00FF, iqcomp->c0 << 8);
+
+	if (noise) {
+		tx = true;
+		rx = false;
+		pa = false;
+	}
+
+	lpphy_set_trsw_over(dev, tx, rx);
+
+	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+		b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x8);
+		b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0,
+				0xFFF7, pa << 3);
+	} else {
+		b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x20);
+		b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0,
+				0xFFDF, pa << 5);
+	}
+
+	tmp = b43_phy_read(dev, B43_LPPHY_AFE_CTL_OVR) & 0x40;
+
+	if (noise)
+		lpphy_set_rx_gain(dev, 0x2D5D);
+	else {
+		if (tmp)
+			oldgains = lpphy_get_tx_gains(dev);
+		if (!gains)
+			gains = &nogains;
+		lpphy_set_tx_gains(dev, *gains);
+	}
+
+	b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVR, 0xFFFE);
+	b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0xFFFE);
+	b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x800);
+	b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0x800);
+	lpphy_set_deaf(dev, false);
+	if (noise)
+		ret = lpphy_calc_rx_iq_comp(dev, 0xFFF0);
+	else {
+		lpphy_start_tx_tone(dev, 4000, 100);
+		ret = lpphy_calc_rx_iq_comp(dev, 0x4000);
+		lpphy_stop_tx_tone(dev);
+	}
+	lpphy_clear_deaf(dev, false);
+	b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFFC);
+	b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFF7);
+	b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFDF);
+	if (!noise) {
+		if (tmp)
+			lpphy_set_tx_gains(dev, oldgains);
+		else
+			lpphy_disable_tx_gain_override(dev);
+	}
+	lpphy_disable_rx_gain_override(dev);
+	b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVR, 0xFFFE);
+	b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0xF7FF);
+	return ret;
+}
+
+static void lpphy_calibration(struct b43_wldev *dev)
+{
+	struct b43_phy_lp *lpphy = dev->phy.lp;
+	enum b43_lpphy_txpctl_mode saved_pctl_mode;
+	bool full_cal = false;
+
+	if (lpphy->full_calib_chan != lpphy->channel) {
+		full_cal = true;
+		lpphy->full_calib_chan = lpphy->channel;
+	}
+
+	b43_mac_suspend(dev);
+
+	lpphy_btcoex_override(dev);
+	if (dev->phy.rev >= 2)
+		lpphy_save_dig_flt_state(dev);
+	lpphy_read_tx_pctl_mode_from_hardware(dev);
+	saved_pctl_mode = lpphy->txpctl_mode;
+	lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
+	//TODO Perform transmit power table I/Q LO calibration
+	if ((dev->phy.rev == 0) && (saved_pctl_mode != B43_LPPHY_TXPCTL_OFF))
+		lpphy_pr41573_workaround(dev);
+	if ((dev->phy.rev >= 2) && full_cal) {
+		lpphy_papd_cal_txpwr(dev);
+	}
+	lpphy_set_tx_power_control(dev, saved_pctl_mode);
+	if (dev->phy.rev >= 2)
+		lpphy_restore_dig_flt_state(dev);
+	lpphy_rx_iq_cal(dev, true, true, false, false, NULL);
+
+	b43_mac_enable(dev);
+}
+
 static u16 b43_lpphy_op_read(struct b43_wldev *dev, u16 reg)
 {
 	b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
@@ -1533,12 +2081,6 @@ static void b43_lpphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
 	b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value);
 }
 
-static void b43_lpphy_op_software_rfkill(struct b43_wldev *dev,
-					 bool blocked)
-{
-	//TODO
-}
-
 struct b206x_channel {
 	u8 channel;
 	u16 freq;
@@ -2004,22 +2546,6 @@ static int lpphy_b2062_tune(struct b43_wldev *dev,
 	return err;
 }
 
-
-/* This was previously called lpphy_japan_filter */
-static void lpphy_set_analog_filter(struct b43_wldev *dev, int channel)
-{
-	struct b43_phy_lp *lpphy = dev->phy.lp;
-	u16 tmp = (channel == 14); //SPEC FIXME check japanwidefilter!
-
-	if (dev->phy.rev < 2) { //SPEC FIXME Isn't this rev0/1-specific?
-		b43_phy_maskset(dev, B43_LPPHY_LP_PHY_CTL, 0xFCFF, tmp << 9);
-		if ((dev->phy.rev == 1) && (lpphy->rc_cap))
-			lpphy_set_rc_cap(dev);
-	} else {
-		b43_radio_write(dev, B2063_TX_BB_SP3, 0x3F);
-	}
-}
-
 static void lpphy_b2063_vco_calib(struct b43_wldev *dev)
 {
 	u16 tmp;
@@ -2204,18 +2730,6 @@ static int b43_lpphy_op_init(struct b43_wldev *dev)
 	return 0;
 }
 
-static void b43_lpphy_op_set_rx_antenna(struct b43_wldev *dev, int antenna)
-{
-	if (dev->phy.rev >= 2)
-		return; // rev2+ doesn't support antenna diversity
-
-	if (B43_WARN_ON(antenna > B43_ANTENNA_AUTO1))
-		return;
-
-	b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFFFD, antenna & 0x2);
-	b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFFFE, antenna & 0x1);
-}
-
 static void b43_lpphy_op_adjust_txpower(struct b43_wldev *dev)
 {
 	//TODO
@@ -2238,6 +2752,11 @@ void b43_lpphy_op_switch_analog(struct b43_wldev *dev, bool on)
        }
 }
 
+static void b43_lpphy_op_pwork_15sec(struct b43_wldev *dev)
+{
+	//TODO
+}
+
 const struct b43_phy_operations b43_phyops_lp = {
 	.allocate		= b43_lpphy_op_allocate,
 	.free			= b43_lpphy_op_free,
@@ -2255,4 +2774,6 @@ const struct b43_phy_operations b43_phyops_lp = {
 	.set_rx_antenna		= b43_lpphy_op_set_rx_antenna,
 	.recalc_txpower		= b43_lpphy_op_recalc_txpower,
 	.adjust_txpower		= b43_lpphy_op_adjust_txpower,
+	.pwork_15sec		= b43_lpphy_op_pwork_15sec,
+	.pwork_60sec		= lpphy_calibration,
 };
diff --git a/drivers/net/wireless/b43/phy_lp.h b/drivers/net/wireless/b43/phy_lp.h
index c3232c17b60a..62737f700cbc 100644
--- a/drivers/net/wireless/b43/phy_lp.h
+++ b/drivers/net/wireless/b43/phy_lp.h
@@ -286,6 +286,7 @@
 #define B43_LPPHY_TR_LOOKUP_6			B43_PHY_OFDM(0xC8) /* TR Lookup 6 */
 #define B43_LPPHY_TR_LOOKUP_7			B43_PHY_OFDM(0xC9) /* TR Lookup 7 */
 #define B43_LPPHY_TR_LOOKUP_8			B43_PHY_OFDM(0xCA) /* TR Lookup 8 */
+#define B43_LPPHY_RF_PWR_OVERRIDE		B43_PHY_OFDM(0xD3) /* RF power override */
 
 
 
@@ -871,12 +872,12 @@ struct b43_phy_lp {
 	u8 rssi_gs;
 
 	/* RC cap */
-	u8 rc_cap; /* FIXME initial value? */
+	u8 rc_cap;
 	/* BX arch */
 	u8 bx_arch;
 
 	/* Full calibration channel */
-	u8 full_calib_chan; /* FIXME initial value? */
+	u8 full_calib_chan;
 
 	/* Transmit iqlocal best coeffs */
 	bool tx_iqloc_best_coeffs_valid;
@@ -891,6 +892,12 @@ struct b43_phy_lp {
 
 	/* The channel we are tuned to */
 	u8 channel;
+
+	/* The active antenna diversity mode */
+	int antenna;
+
+	/* Frequency of the active TX tone */
+	int tx_tone_freq;
 };
 
 enum tssi_mux_mode {
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index 9b9044400218..c01b8e02412f 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -342,12 +342,15 @@ static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
 			q->mmio_base + B43_PIO_TXDATA,
 			sizeof(u16));
 	if (data_len & 1) {
+		u8 *tail = wl->pio_tailspace;
+		BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2);
+
 		/* Write the last byte. */
 		ctl &= ~B43_PIO_TXCTL_WRITEHI;
 		b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
-		wl->tx_tail[0] = data[data_len - 1];
-		wl->tx_tail[1] = 0;
-		ssb_block_write(dev->dev, wl->tx_tail, 2,
+		tail[0] = data[data_len - 1];
+		tail[1] = 0;
+		ssb_block_write(dev->dev, tail, 2,
 				q->mmio_base + B43_PIO_TXDATA,
 				sizeof(u16));
 	}
@@ -393,31 +396,31 @@ static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
 			q->mmio_base + B43_PIO8_TXDATA,
 			sizeof(u32));
 	if (data_len & 3) {
-		wl->tx_tail[3] = 0;
+		u8 *tail = wl->pio_tailspace;
+		BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4);
+
+		memset(tail, 0, 4);
 		/* Write the last few bytes. */
 		ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 |
 			 B43_PIO8_TXCTL_24_31);
 		switch (data_len & 3) {
 		case 3:
 			ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15;
-			wl->tx_tail[0] = data[data_len - 3];
-			wl->tx_tail[1] = data[data_len - 2];
-			wl->tx_tail[2] = data[data_len - 1];
+			tail[0] = data[data_len - 3];
+			tail[1] = data[data_len - 2];
+			tail[2] = data[data_len - 1];
 			break;
 		case 2:
 			ctl |= B43_PIO8_TXCTL_8_15;
-			wl->tx_tail[0] = data[data_len - 2];
-			wl->tx_tail[1] = data[data_len - 1];
-			wl->tx_tail[2] = 0;
+			tail[0] = data[data_len - 2];
+			tail[1] = data[data_len - 1];
 			break;
 		case 1:
-			wl->tx_tail[0] = data[data_len - 1];
-			wl->tx_tail[1] = 0;
-			wl->tx_tail[2] = 0;
+			tail[0] = data[data_len - 1];
 			break;
 		}
 		b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
-		ssb_block_write(dev->dev, wl->tx_tail, 4,
+		ssb_block_write(dev->dev, tail, 4,
 				q->mmio_base + B43_PIO8_TXDATA,
 				sizeof(u32));
 	}
@@ -456,6 +459,7 @@ static int pio_tx_frame(struct b43_pio_txqueue *q,
 	int err;
 	unsigned int hdrlen;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct b43_txhdr *txhdr = (struct b43_txhdr *)wl->pio_scratchspace;
 
 	B43_WARN_ON(list_empty(&q->packets_list));
 	pack = list_entry(q->packets_list.next,
@@ -463,7 +467,9 @@ static int pio_tx_frame(struct b43_pio_txqueue *q,
 
 	cookie = generate_cookie(q, pack);
 	hdrlen = b43_txhdr_size(dev);
-	err = b43_generate_txhdr(dev, (u8 *)&wl->txhdr, skb,
+	BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(struct b43_txhdr));
+	B43_WARN_ON(sizeof(wl->pio_scratchspace) < hdrlen);
+	err = b43_generate_txhdr(dev, (u8 *)txhdr, skb,
 				 info, cookie);
 	if (err)
 		return err;
@@ -477,9 +483,9 @@ static int pio_tx_frame(struct b43_pio_txqueue *q,
 
 	pack->skb = skb;
 	if (q->rev >= 8)
-		pio_tx_frame_4byte_queue(pack, (const u8 *)&wl->txhdr, hdrlen);
+		pio_tx_frame_4byte_queue(pack, (const u8 *)txhdr, hdrlen);
 	else
-		pio_tx_frame_2byte_queue(pack, (const u8 *)&wl->txhdr, hdrlen);
+		pio_tx_frame_2byte_queue(pack, (const u8 *)txhdr, hdrlen);
 
 	/* Remove it from the list of available packet slots.
 	 * It will be put back when we receive the status report. */
@@ -625,8 +631,11 @@ static bool pio_rx_frame(struct b43_pio_rxqueue *q)
 	unsigned int i, padding;
 	struct sk_buff *skb;
 	const char *err_msg = NULL;
+	struct b43_rxhdr_fw4 *rxhdr =
+		(struct b43_rxhdr_fw4 *)wl->pio_scratchspace;
 
-	memset(&wl->rxhdr, 0, sizeof(wl->rxhdr));
+	BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(*rxhdr));
+	memset(rxhdr, 0, sizeof(*rxhdr));
 
 	/* Check if we have data and wait for it to get ready. */
 	if (q->rev >= 8) {
@@ -664,16 +673,16 @@ data_ready:
 
 	/* Get the preamble (RX header) */
 	if (q->rev >= 8) {
-		ssb_block_read(dev->dev, &wl->rxhdr, sizeof(wl->rxhdr),
+		ssb_block_read(dev->dev, rxhdr, sizeof(*rxhdr),
 			       q->mmio_base + B43_PIO8_RXDATA,
 			       sizeof(u32));
 	} else {
-		ssb_block_read(dev->dev, &wl->rxhdr, sizeof(wl->rxhdr),
+		ssb_block_read(dev->dev, rxhdr, sizeof(*rxhdr),
 			       q->mmio_base + B43_PIO_RXDATA,
 			       sizeof(u16));
 	}
 	/* Sanity checks. */
-	len = le16_to_cpu(wl->rxhdr.frame_len);
+	len = le16_to_cpu(rxhdr->frame_len);
 	if (unlikely(len > 0x700)) {
 		err_msg = "len > 0x700";
 		goto rx_error;
@@ -683,7 +692,7 @@ data_ready:
 		goto rx_error;
 	}
 
-	macstat = le32_to_cpu(wl->rxhdr.mac_status);
+	macstat = le32_to_cpu(rxhdr->mac_status);
 	if (macstat & B43_RX_MAC_FCSERR) {
 		if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) {
 			/* Drop frames with failed FCS. */
@@ -708,22 +717,25 @@ data_ready:
 			       q->mmio_base + B43_PIO8_RXDATA,
 			       sizeof(u32));
 		if (len & 3) {
+			u8 *tail = wl->pio_tailspace;
+			BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4);
+
 			/* Read the last few bytes. */
-			ssb_block_read(dev->dev, wl->rx_tail, 4,
+			ssb_block_read(dev->dev, tail, 4,
 				       q->mmio_base + B43_PIO8_RXDATA,
 				       sizeof(u32));
 			switch (len & 3) {
 			case 3:
-				skb->data[len + padding - 3] = wl->rx_tail[0];
-				skb->data[len + padding - 2] = wl->rx_tail[1];
-				skb->data[len + padding - 1] = wl->rx_tail[2];
+				skb->data[len + padding - 3] = tail[0];
+				skb->data[len + padding - 2] = tail[1];
+				skb->data[len + padding - 1] = tail[2];
 				break;
 			case 2:
-				skb->data[len + padding - 2] = wl->rx_tail[0];
-				skb->data[len + padding - 1] = wl->rx_tail[1];
+				skb->data[len + padding - 2] = tail[0];
+				skb->data[len + padding - 1] = tail[1];
 				break;
 			case 1:
-				skb->data[len + padding - 1] = wl->rx_tail[0];
+				skb->data[len + padding - 1] = tail[0];
 				break;
 			}
 		}
@@ -732,22 +744,29 @@ data_ready:
 			       q->mmio_base + B43_PIO_RXDATA,
 			       sizeof(u16));
 		if (len & 1) {
+			u8 *tail = wl->pio_tailspace;
+			BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2);
+
 			/* Read the last byte. */
-			ssb_block_read(dev->dev, wl->rx_tail, 2,
+			ssb_block_read(dev->dev, tail, 2,
 				       q->mmio_base + B43_PIO_RXDATA,
 				       sizeof(u16));
-			skb->data[len + padding - 1] = wl->rx_tail[0];
+			skb->data[len + padding - 1] = tail[0];
 		}
 	}
 
-	b43_rx(q->dev, skb, &wl->rxhdr);
+	b43_rx(q->dev, skb, rxhdr);
 
 	return 1;
 
 rx_error:
 	if (err_msg)
 		b43dbg(q->dev->wl, "PIO RX error: %s\n", err_msg);
-	b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY);
+	if (q->rev >= 8)
+		b43_piorx_write32(q, B43_PIO8_RXCTL, B43_PIO8_RXCTL_DATARDY);
+	else
+		b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY);
+
 	return 1;
 }
 
diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c
index ffdce6f3c909..78016ae21c50 100644
--- a/drivers/net/wireless/b43/rfkill.c
+++ b/drivers/net/wireless/b43/rfkill.c
@@ -33,8 +33,14 @@ bool b43_is_hw_radio_enabled(struct b43_wldev *dev)
 		      & B43_MMIO_RADIO_HWENABLED_HI_MASK))
 			return 1;
 	} else {
-		if (b43_status(dev) >= B43_STAT_STARTED &&
-		    b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO)
+		/* To prevent CPU fault on PPC, do not read a register
+		 * unless the interface is started; however, on resume
+		 * for hibernation, this routine is entered early. When
+		 * that happens, unconditionally return TRUE.
+		 */
+		if (b43_status(dev) < B43_STAT_STARTED)
+			return 1;
+		if (b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO)
 		    & B43_MMIO_RADIO_HWENABLED_LO_MASK)
 			return 1;
 	}
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index f4e9695ec186..eda06529ef5f 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -27,7 +27,7 @@
 
 */
 
-#include "b43.h"
+#include "xmit.h"
 #include "phy_common.h"
 #include "dma.h"
 #include "pio.h"
@@ -621,7 +621,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
 						  (phystat0 & B43_RX_PHYST0_OFDM),
 						  (phystat0 & B43_RX_PHYST0_GAINCTL),
 						  (phystat3 & B43_RX_PHYST3_TRSTATE));
-		status.qual = (rxhdr->jssi * 100) / B43_RX_MAX_SSI;
 	}
 
 	if (phystat0 & B43_RX_PHYST0_OFDM)
@@ -690,10 +689,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
 	}
 
 	memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
-
-	local_bh_disable();
-	ieee80211_rx(dev->wl->hw, skb);
-	local_bh_enable();
+	ieee80211_rx_ni(dev->wl->hw, skb);
 
 #if B43_DEBUG
 	dev->rx_count++;
diff --git a/drivers/net/wireless/b43/xmit.h b/drivers/net/wireless/b43/xmit.h
index 3530de871873..d23ff9fe0c9e 100644
--- a/drivers/net/wireless/b43/xmit.h
+++ b/drivers/net/wireless/b43/xmit.h
@@ -2,6 +2,8 @@
 #define B43_XMIT_H_
 
 #include "main.h"
+#include <net/mac80211.h>
+
 
 #define _b43_declare_plcp_hdr(size) \
 	struct b43_plcp_hdr##size {		\
@@ -332,4 +334,21 @@ static inline u8 b43_kidx_to_raw(struct b43_wldev *dev, u8 firmware_kidx)
 	return raw_kidx;
 }
 
+/* struct b43_private_tx_info - TX info private to b43.
+ * The structure is placed in (struct ieee80211_tx_info *)->rate_driver_data
+ *
+ * @bouncebuffer: DMA Bouncebuffer (if used)
+ */
+struct b43_private_tx_info {
+	void *bouncebuffer;
+};
+
+static inline struct b43_private_tx_info *
+b43_get_priv_tx_info(struct ieee80211_tx_info *info)
+{
+	BUILD_BUG_ON(sizeof(struct b43_private_tx_info) >
+		     sizeof(info->rate_driver_data));
+	return (struct b43_private_tx_info *)info->rate_driver_data;
+}
+
 #endif /* B43_XMIT_H_ */
diff --git a/drivers/net/wireless/b43legacy/Kconfig b/drivers/net/wireless/b43legacy/Kconfig
index 94a463478053..1ffa28835c58 100644
--- a/drivers/net/wireless/b43legacy/Kconfig
+++ b/drivers/net/wireless/b43legacy/Kconfig
@@ -1,6 +1,6 @@
 config B43LEGACY
 	tristate "Broadcom 43xx-legacy wireless support (mac80211 stack)"
-	depends on SSB_POSSIBLE && MAC80211 && WLAN_80211 && HAS_DMA
+	depends on SSB_POSSIBLE && MAC80211 && HAS_DMA
 	select SSB
 	select FW_LOADER
 	---help---
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index 038baa8869e2..89fe2f972c72 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -29,8 +29,6 @@
 
 #define B43legacy_IRQWAIT_MAX_RETRIES	20
 
-#define B43legacy_RX_MAX_SSI		60 /* best guess at max ssi */
-
 /* MMIO offsets */
 #define B43legacy_MMIO_DMA0_REASON	0x20
 #define B43legacy_MMIO_DMA0_IRQ_MASK	0x24
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index 866403415811..0a86bdf53154 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -1240,8 +1240,9 @@ struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev,
 }
 
 static int dma_tx_fragment(struct b43legacy_dmaring *ring,
-			    struct sk_buff *skb)
+			    struct sk_buff **in_skb)
 {
+	struct sk_buff *skb = *in_skb;
 	const struct b43legacy_dma_ops *ops = ring->ops;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	u8 *header;
@@ -1305,8 +1306,14 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
 		}
 
 		memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
+		memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb));
+		bounce_skb->dev = skb->dev;
+		skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb));
+		info = IEEE80211_SKB_CB(bounce_skb);
+
 		dev_kfree_skb_any(skb);
 		skb = bounce_skb;
+		*in_skb = bounce_skb;
 		meta->skb = skb;
 		meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
 		if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
@@ -1360,8 +1367,10 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
 		     struct sk_buff *skb)
 {
 	struct b43legacy_dmaring *ring;
+	struct ieee80211_hdr *hdr;
 	int err = 0;
 	unsigned long flags;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 
 	ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
 	spin_lock_irqsave(&ring->lock, flags);
@@ -1386,7 +1395,11 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
 		goto out_unlock;
 	}
 
-	err = dma_tx_fragment(ring, skb);
+	/* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing
+	 * into the skb data or cb now. */
+	hdr = NULL;
+	info = NULL;
+	err = dma_tx_fragment(ring, &skb);
 	if (unlikely(err == -ENOKEY)) {
 		/* Drop this packet, as we don't have the encryption key
 		 * anymore and must not transmit it unencrypted. */
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 4b60148a5e61..ab6a18c2e9d9 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -2677,7 +2677,7 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
 	if (conf->channel->hw_value != phy->channel)
 		b43legacy_radio_selectchannel(dev, conf->channel->hw_value, 0);
 
-	dev->wl->radiotap_enabled = !!(conf->flags & IEEE80211_CONF_RADIOTAP);
+	dev->wl->radiotap_enabled = !!(conf->flags & IEEE80211_CONF_MONITOR);
 
 	/* Adjust the desired TX power level. */
 	if (conf->power_level != 0) {
@@ -3593,7 +3593,7 @@ static int b43legacy_wireless_core_attach(struct b43legacy_wldev *dev)
 {
 	struct b43legacy_wl *wl = dev->wl;
 	struct ssb_bus *bus = dev->dev->bus;
-	struct pci_dev *pdev = bus->host_pci;
+	struct pci_dev *pdev = (bus->bustype == SSB_BUSTYPE_PCI) ? bus->host_pci : NULL;
 	int err;
 	int have_bphy = 0;
 	int have_gphy = 0;
@@ -3707,7 +3707,7 @@ static int b43legacy_one_core_attach(struct ssb_device *dev,
 
 	if (!list_empty(&wl->devlist)) {
 		/* We are not the first core on this chip. */
-		pdev = dev->bus->host_pci;
+		pdev = (dev->bus->bustype == SSB_BUSTYPE_PCI) ? dev->bus->host_pci : NULL;
 		/* Only special chips support more than one wireless
 		 * core, although some of the other chips have more than
 		 * one wireless core as well. Check for this and
diff --git a/drivers/net/wireless/b43legacy/rfkill.c b/drivers/net/wireless/b43legacy/rfkill.c
index 8783022db11e..d579df72b783 100644
--- a/drivers/net/wireless/b43legacy/rfkill.c
+++ b/drivers/net/wireless/b43legacy/rfkill.c
@@ -34,6 +34,13 @@ bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev)
 		      & B43legacy_MMIO_RADIO_HWENABLED_HI_MASK))
 			return 1;
 	} else {
+		/* To prevent CPU fault on PPC, do not read a register
+		 * unless the interface is started; however, on resume
+		 * for hibernation, this routine is entered early. When
+		 * that happens, unconditionally return TRUE.
+		 */
+		if (b43legacy_status(dev) < B43legacy_STAT_STARTED)
+			return 1;
 		if (b43legacy_read16(dev, B43legacy_MMIO_RADIO_HWENABLED_LO)
 		    & B43legacy_MMIO_RADIO_HWENABLED_LO_MASK)
 			return 1;
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 103f3c9e7f58..9c8882d9275e 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -549,7 +549,6 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
 				      (phystat0 & B43legacy_RX_PHYST0_GAINCTL),
 				      (phystat3 & B43legacy_RX_PHYST3_TRSTATE));
 	status.noise = dev->stats.link_noise;
-	status.qual = (jssi * 100) / B43legacy_RX_MAX_SSI;
 	/* change to support A PHY */
 	if (phystat0 & B43legacy_RX_PHYST0_OFDM)
 		status.rate_idx = b43legacy_plcp_get_bitrate_idx_ofdm(plcp, false);
diff --git a/drivers/net/wireless/hostap/Kconfig b/drivers/net/wireless/hostap/Kconfig
index c15db2293515..287d82728bc3 100644
--- a/drivers/net/wireless/hostap/Kconfig
+++ b/drivers/net/wireless/hostap/Kconfig
@@ -1,7 +1,8 @@
 config HOSTAP
 	tristate "IEEE 802.11 for Host AP (Prism2/2.5/3 and WEP/TKIP/CCMP)"
-	depends on WLAN_80211
 	select WIRELESS_EXT
+	select WEXT_SPY
+	select WEXT_PRIV
 	select CRYPTO
 	select CRYPTO_ARC4
 	select CRYPTO_ECB
diff --git a/drivers/net/wireless/i82593.h b/drivers/net/wireless/i82593.h
deleted file mode 100644
index afac5c7a323d..000000000000
--- a/drivers/net/wireless/i82593.h
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Definitions for Intel 82593 CSMA/CD Core LAN Controller
- * The definitions are taken from the 1992 users manual with Intel
- * order number 297125-001.
- *
- * /usr/src/pc/RCS/i82593.h,v 1.1 1996/07/17 15:23:12 root Exp
- *
- * Copyright 1994, Anders Klemets <klemets@it.kth.se>
- *
- * HISTORY
- * i82593.h,v
- * Revision 1.4  2005/11/4  09:15:00  baroniunas
- * Modified copyright with permission of author as follows:
- *
- *   "If I82539.H is the only file with my copyright statement
- *    that is included in the Source Forge project, then you have
- *    my approval to change the copyright statement to be a GPL
- *    license, in the way you proposed on October 10."
- *
- * Revision 1.1  1996/07/17 15:23:12  root
- * Initial revision
- *
- * Revision 1.3  1995/04/05  15:13:58  adj
- * Initial alpha release
- *
- * Revision 1.2  1994/06/16  23:57:31  klemets
- * Mirrored all the fields in the configuration block.
- *
- * Revision 1.1  1994/06/02  20:25:34  klemets
- * Initial revision
- *
- *
- */
-#ifndef	_I82593_H
-#define	_I82593_H
-
-/* Intel 82593 CSMA/CD Core LAN Controller */
-
-/* Port 0 Command Register definitions */
-
-/* Execution operations */
-#define OP0_NOP			0	/* CHNL = 0 */
-#define OP0_SWIT_TO_PORT_1	0	/* CHNL = 1 */
-#define OP0_IA_SETUP		1
-#define OP0_CONFIGURE		2
-#define OP0_MC_SETUP		3
-#define OP0_TRANSMIT		4
-#define OP0_TDR			5
-#define OP0_DUMP		6
-#define OP0_DIAGNOSE		7
-#define OP0_TRANSMIT_NO_CRC	9
-#define OP0_RETRANSMIT		12
-#define OP0_ABORT		13
-/* Reception operations */
-#define OP0_RCV_ENABLE		8
-#define OP0_RCV_DISABLE		10
-#define OP0_STOP_RCV		11
-/* Status pointer control operations */
-#define OP0_FIX_PTR		15	/* CHNL = 1 */
-#define OP0_RLS_PTR		15	/* CHNL = 0 */
-#define OP0_RESET		14
-
-#define CR0_CHNL		(1 << 4)	/* 0=Channel 0, 1=Channel 1 */
-#define CR0_STATUS_0		0x00
-#define CR0_STATUS_1		0x20
-#define CR0_STATUS_2		0x40
-#define CR0_STATUS_3		0x60
-#define CR0_INT_ACK		(1 << 7)	/* 0=No ack, 1=acknowledge */
-
-/* Port 0 Status Register definitions */
-
-#define SR0_NO_RESULT		0		/* dummy */
-#define SR0_EVENT_MASK		0x0f
-#define SR0_IA_SETUP_DONE	1
-#define SR0_CONFIGURE_DONE	2
-#define SR0_MC_SETUP_DONE	3
-#define SR0_TRANSMIT_DONE	4
-#define SR0_TDR_DONE		5
-#define SR0_DUMP_DONE		6
-#define SR0_DIAGNOSE_PASSED	7
-#define SR0_TRANSMIT_NO_CRC_DONE 9
-#define SR0_RETRANSMIT_DONE	12
-#define SR0_EXECUTION_ABORTED	13
-#define SR0_END_OF_FRAME	8
-#define SR0_RECEPTION_ABORTED	10
-#define SR0_DIAGNOSE_FAILED	15
-#define SR0_STOP_REG_HIT	11
-
-#define SR0_CHNL		(1 << 4)
-#define SR0_EXECUTION		(1 << 5)
-#define SR0_RECEPTION		(1 << 6)
-#define SR0_INTERRUPT		(1 << 7)
-#define SR0_BOTH_RX_TX		(SR0_EXECUTION | SR0_RECEPTION)
-
-#define SR3_EXEC_STATE_MASK	0x03
-#define SR3_EXEC_IDLE		0
-#define SR3_TX_ABORT_IN_PROGRESS 1
-#define SR3_EXEC_ACTIVE		2
-#define SR3_ABORT_IN_PROGRESS	3
-#define SR3_EXEC_CHNL		(1 << 2)
-#define SR3_STP_ON_NO_RSRC	(1 << 3)
-#define SR3_RCVING_NO_RSRC	(1 << 4)
-#define SR3_RCV_STATE_MASK	0x60
-#define SR3_RCV_IDLE		0x00
-#define SR3_RCV_READY		0x20
-#define SR3_RCV_ACTIVE		0x40
-#define SR3_RCV_STOP_IN_PROG	0x60
-#define SR3_RCV_CHNL		(1 << 7)
-
-/* Port 1 Command Register definitions */
-
-#define OP1_NOP			0
-#define OP1_SWIT_TO_PORT_0	1
-#define OP1_INT_DISABLE		2
-#define OP1_INT_ENABLE		3
-#define OP1_SET_TS		5
-#define OP1_RST_TS		7
-#define OP1_POWER_DOWN		8
-#define OP1_RESET_RING_MNGMT	11
-#define OP1_RESET		14
-#define OP1_SEL_RST		15
-
-#define CR1_STATUS_4		0x00
-#define CR1_STATUS_5		0x20
-#define CR1_STATUS_6		0x40
-#define CR1_STOP_REG_UPDATE	(1 << 7)
-
-/* Receive frame status bits */
-
-#define	RX_RCLD			(1 << 0)
-#define RX_IA_MATCH		(1 << 1)
-#define	RX_NO_AD_MATCH		(1 << 2)
-#define RX_NO_SFD		(1 << 3)
-#define RX_SRT_FRM		(1 << 7)
-#define RX_OVRRUN		(1 << 8)
-#define RX_ALG_ERR		(1 << 10)
-#define RX_CRC_ERR		(1 << 11)
-#define RX_LEN_ERR		(1 << 12)
-#define RX_RCV_OK		(1 << 13)
-#define RX_TYP_LEN		(1 << 15)
-
-/* Transmit status bits */
-
-#define TX_NCOL_MASK		0x0f
-#define TX_FRTL			(1 << 4)
-#define TX_MAX_COL		(1 << 5)
-#define TX_HRT_BEAT		(1 << 6)
-#define TX_DEFER		(1 << 7)
-#define TX_UND_RUN		(1 << 8)
-#define TX_LOST_CTS		(1 << 9)
-#define TX_LOST_CRS		(1 << 10)
-#define TX_LTCOL		(1 << 11)
-#define TX_OK			(1 << 13)
-#define TX_COLL			(1 << 15)
-
-struct i82593_conf_block {
-  u_char fifo_limit : 4,
-  	 forgnesi   : 1,
-  	 fifo_32    : 1,
-  	 d6mod      : 1,
-  	 throttle_enb : 1;
-  u_char throttle   : 6,
-	 cntrxint   : 1,
-	 contin	    : 1;
-  u_char addr_len   : 3,
-  	 acloc 	    : 1,
- 	 preamb_len : 2,
-  	 loopback   : 2;
-  u_char lin_prio   : 3,
-	 tbofstop   : 1,
-	 exp_prio   : 3,
-	 bof_met    : 1;
-  u_char	    : 4,
-	 ifrm_spc   : 4;
-  u_char	    : 5,
-	 slottim_low : 3;
-  u_char slottim_hi : 3,
-		    : 1,
-	 max_retr   : 4;
-  u_char prmisc     : 1,
-	 bc_dis     : 1,
-  		    : 1,
-	 crs_1	    : 1,
-	 nocrc_ins  : 1,
-	 crc_1632   : 1,
-  	 	    : 1,
-  	 crs_cdt    : 1;
-  u_char cs_filter  : 3,
-	 crs_src    : 1,
-	 cd_filter  : 3,
-		    : 1;
-  u_char	    : 2,
-  	 min_fr_len : 6;
-  u_char lng_typ    : 1,
-	 lng_fld    : 1,
-	 rxcrc_xf   : 1,
-	 artx	    : 1,
-	 sarec	    : 1,
-	 tx_jabber  : 1,	/* why is this called max_len in the manual? */
-	 hash_1	    : 1,
-  	 lbpkpol    : 1;
-  u_char	    : 6,
-  	 fdx	    : 1,
-  	  	    : 1;
-  u_char dummy_6    : 6,	/* supposed to be ones */
-  	 mult_ia    : 1,
-  	 dis_bof    : 1;
-  u_char dummy_1    : 1,	/* supposed to be one */
-	 tx_ifs_retrig : 2,
-	 mc_all     : 1,
-	 rcv_mon    : 2,
-	 frag_acpt  : 1,
-  	 tstrttrs   : 1;
-  u_char fretx	    : 1,
-	 runt_eop   : 1,
-	 hw_sw_pin  : 1,
-	 big_endn   : 1,
-	 syncrqs    : 1,
-	 sttlen     : 1,
-	 tx_eop     : 1,
-  	 rx_eop	    : 1;
-  u_char rbuf_size  : 5,
-	 rcvstop    : 1,
-  	 	    : 2;
-};
-
-#define I82593_MAX_MULTICAST_ADDRESSES	128	/* Hardware hashed filter */
-
-#endif /* _I82593_H */
diff --git a/drivers/net/wireless/ipw2x00/Kconfig b/drivers/net/wireless/ipw2x00/Kconfig
index a8131384c6b9..2715b101aded 100644
--- a/drivers/net/wireless/ipw2x00/Kconfig
+++ b/drivers/net/wireless/ipw2x00/Kconfig
@@ -4,8 +4,10 @@
 
 config IPW2100
 	tristate "Intel PRO/Wireless 2100 Network Connection"
-	depends on PCI && WLAN_80211 && CFG80211
+	depends on PCI && CFG80211
 	select WIRELESS_EXT
+	select WEXT_SPY
+	select WEXT_PRIV
 	select FW_LOADER
 	select LIB80211
 	select LIBIPW
@@ -63,8 +65,10 @@ config IPW2100_DEBUG
 
 config IPW2200
 	tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection"
-	depends on PCI && WLAN_80211 && CFG80211
+	depends on PCI && CFG80211 && CFG80211_WEXT
 	select WIRELESS_EXT
+	select WEXT_SPY
+	select WEXT_PRIV
 	select FW_LOADER
 	select LIB80211
 	select LIBIPW
@@ -150,8 +154,9 @@ config IPW2200_DEBUG
 
 config LIBIPW
 	tristate
-	depends on PCI && WLAN_80211 && CFG80211
+	depends on PCI && CFG80211
 	select WIRELESS_EXT
+	select WEXT_SPY
 	select CRYPTO
 	select CRYPTO_ARC4
 	select CRYPTO_ECB
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 6e2fc0cb6f8a..17a9cb3528fc 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -296,6 +296,33 @@ static const char *command_types[] = {
 };
 #endif
 
+#define WEXT_USECHANNELS 1
+
+static const long ipw2100_frequencies[] = {
+	2412, 2417, 2422, 2427,
+	2432, 2437, 2442, 2447,
+	2452, 2457, 2462, 2467,
+	2472, 2484
+};
+
+#define FREQ_COUNT	ARRAY_SIZE(ipw2100_frequencies)
+
+static const long ipw2100_rates_11b[] = {
+	1000000,
+	2000000,
+	5500000,
+	11000000
+};
+
+static struct ieee80211_rate ipw2100_bg_rates[] = {
+	{ .bitrate = 10 },
+	{ .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+	{ .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+	{ .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+};
+
+#define RATE_COUNT ARRAY_SIZE(ipw2100_rates_11b)
+
 /* Pre-decl until we get the code solid and then we can clean it up */
 static void ipw2100_tx_send_commands(struct ipw2100_priv *priv);
 static void ipw2100_tx_send_data(struct ipw2100_priv *priv);
@@ -1141,6 +1168,7 @@ static int rf_kill_active(struct ipw2100_priv *priv)
 	int i;
 
 	if (!(priv->hw_features & HW_FEATURE_RFKILL)) {
+		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false);
 		priv->status &= ~STATUS_RF_KILL_HW;
 		return 0;
 	}
@@ -1151,10 +1179,13 @@ static int rf_kill_active(struct ipw2100_priv *priv)
 		value = (value << 1) | ((reg & IPW_BIT_GPIO_RF_KILL) ? 0 : 1);
 	}
 
-	if (value == 0)
+	if (value == 0) {
+		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
 		priv->status |= STATUS_RF_KILL_HW;
-	else
+	} else {
+		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false);
 		priv->status &= ~STATUS_RF_KILL_HW;
+	}
 
 	return (value == 0);
 }
@@ -1814,13 +1845,6 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
 	return rc;
 }
 
-/* Called by register_netdev() */
-static int ipw2100_net_init(struct net_device *dev)
-{
-	struct ipw2100_priv *priv = libipw_priv(dev);
-	return ipw2100_up(priv, 1);
-}
-
 static void ipw2100_down(struct ipw2100_priv *priv)
 {
 	unsigned long flags;
@@ -1875,6 +1899,64 @@ static void ipw2100_down(struct ipw2100_priv *priv)
 	netif_stop_queue(priv->net_dev);
 }
 
+/* Called by register_netdev() */
+static int ipw2100_net_init(struct net_device *dev)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
+	struct wireless_dev *wdev = &priv->ieee->wdev;
+	int ret;
+	int i;
+
+	ret = ipw2100_up(priv, 1);
+	if (ret)
+		return ret;
+
+	memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
+
+	/* fill-out priv->ieee->bg_band */
+	if (geo->bg_channels) {
+		struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
+
+		bg_band->band = IEEE80211_BAND_2GHZ;
+		bg_band->n_channels = geo->bg_channels;
+		bg_band->channels =
+			kzalloc(geo->bg_channels *
+				sizeof(struct ieee80211_channel), GFP_KERNEL);
+		/* translate geo->bg to bg_band.channels */
+		for (i = 0; i < geo->bg_channels; i++) {
+			bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
+			bg_band->channels[i].center_freq = geo->bg[i].freq;
+			bg_band->channels[i].hw_value = geo->bg[i].channel;
+			bg_band->channels[i].max_power = geo->bg[i].max_power;
+			if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
+				bg_band->channels[i].flags |=
+					IEEE80211_CHAN_PASSIVE_SCAN;
+			if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
+				bg_band->channels[i].flags |=
+					IEEE80211_CHAN_NO_IBSS;
+			if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
+				bg_band->channels[i].flags |=
+					IEEE80211_CHAN_RADAR;
+			/* No equivalent for LIBIPW_CH_80211H_RULES,
+			   LIBIPW_CH_UNIFORM_SPREADING, or
+			   LIBIPW_CH_B_ONLY... */
+		}
+		/* point at bitrate info */
+		bg_band->bitrates = ipw2100_bg_rates;
+		bg_band->n_bitrates = RATE_COUNT;
+
+		wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
+	}
+
+	set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
+	if (wiphy_register(wdev->wiphy)) {
+		ipw2100_down(priv);
+		return -EIO;
+	}
+	return 0;
+}
+
 static void ipw2100_reset_adapter(struct work_struct *work)
 {
 	struct ipw2100_priv *priv =
@@ -2090,6 +2172,7 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
 		       priv->net_dev->name);
 
 	/* RF_KILL is now enabled (else we wouldn't be here) */
+	wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
 	priv->status |= STATUS_RF_KILL_HW;
 
 	/* Make sure the RF Kill check timer is running */
@@ -6029,7 +6112,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
 	struct ipw2100_priv *priv;
 	struct net_device *dev;
 
-	dev = alloc_ieee80211(sizeof(struct ipw2100_priv));
+	dev = alloc_ieee80211(sizeof(struct ipw2100_priv), 0);
 	if (!dev)
 		return NULL;
 	priv = libipw_priv(dev);
@@ -6342,7 +6425,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
 		sysfs_remove_group(&pci_dev->dev.kobj,
 				   &ipw2100_attribute_group);
 
-		free_ieee80211(dev);
+		free_ieee80211(dev, 0);
 		pci_set_drvdata(pci_dev, NULL);
 	}
 
@@ -6400,7 +6483,10 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
 		if (dev->base_addr)
 			iounmap((void __iomem *)dev->base_addr);
 
-		free_ieee80211(dev);
+		/* wiphy_unregister needs to be here, before free_ieee80211 */
+		wiphy_unregister(priv->ieee->wdev.wiphy);
+		kfree(priv->ieee->bg_band.channels);
+		free_ieee80211(dev, 0);
 	}
 
 	pci_release_regions(pci_dev);
@@ -6487,6 +6573,16 @@ static int ipw2100_resume(struct pci_dev *pci_dev)
 }
 #endif
 
+static void ipw2100_shutdown(struct pci_dev *pci_dev)
+{
+	struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
+
+	/* Take down the device; powers it off, etc. */
+	ipw2100_down(priv);
+
+	pci_disable_device(pci_dev);
+}
+
 #define IPW2100_DEV_ID(x) { PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, x }
 
 static struct pci_device_id ipw2100_pci_id_table[] __devinitdata = {
@@ -6550,6 +6646,7 @@ static struct pci_driver ipw2100_pci_driver = {
 	.suspend = ipw2100_suspend,
 	.resume = ipw2100_resume,
 #endif
+	.shutdown = ipw2100_shutdown,
 };
 
 /**
@@ -6601,26 +6698,6 @@ static void __exit ipw2100_exit(void)
 module_init(ipw2100_init);
 module_exit(ipw2100_exit);
 
-#define WEXT_USECHANNELS 1
-
-static const long ipw2100_frequencies[] = {
-	2412, 2417, 2422, 2427,
-	2432, 2437, 2442, 2447,
-	2452, 2457, 2462, 2467,
-	2472, 2484
-};
-
-#define FREQ_COUNT	ARRAY_SIZE(ipw2100_frequencies)
-
-static const long ipw2100_rates_11b[] = {
-	1000000,
-	2000000,
-	5500000,
-	11000000
-};
-
-#define RATE_COUNT ARRAY_SIZE(ipw2100_rates_11b)
-
 static int ipw2100_wx_get_name(struct net_device *dev,
 			       struct iw_request_info *info,
 			       union iwreq_data *wrqu, char *extra)
@@ -8462,6 +8539,12 @@ static int ipw2100_get_firmware(struct ipw2100_priv *priv,
 	return 0;
 }
 
+MODULE_FIRMWARE(IPW2100_FW_NAME("-i"));
+#ifdef CONFIG_IPW2100_MONITOR
+MODULE_FIRMWARE(IPW2100_FW_NAME("-p"));
+#endif
+MODULE_FIRMWARE(IPW2100_FW_NAME(""));
+
 static void ipw2100_release_firmware(struct ipw2100_priv *priv,
 				     struct ipw2100_fw *fw)
 {
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index a6ca536e44f8..c28984ae46ff 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -81,6 +81,11 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
 MODULE_VERSION(DRV_VERSION);
 MODULE_AUTHOR(DRV_COPYRIGHT);
 MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("ipw2200-ibss.fw");
+#ifdef CONFIG_IPW2200_MONITOR
+MODULE_FIRMWARE("ipw2200-sniffer.fw");
+#endif
+MODULE_FIRMWARE("ipw2200-bss.fw");
 
 static int cmdlog = 0;
 static int debug = 0;
@@ -104,6 +109,25 @@ static int antenna = CFG_SYS_ANTENNA_BOTH;
 static int rtap_iface = 0;     /* def: 0 -- do not create rtap interface */
 #endif
 
+static struct ieee80211_rate ipw2200_rates[] = {
+	{ .bitrate = 10 },
+	{ .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+	{ .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+	{ .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+	{ .bitrate = 60 },
+	{ .bitrate = 90 },
+	{ .bitrate = 120 },
+	{ .bitrate = 180 },
+	{ .bitrate = 240 },
+	{ .bitrate = 360 },
+	{ .bitrate = 480 },
+	{ .bitrate = 540 }
+};
+
+#define ipw2200_a_rates		(ipw2200_rates + 4)
+#define ipw2200_num_a_rates	8
+#define ipw2200_bg_rates	(ipw2200_rates + 0)
+#define ipw2200_num_bg_rates	12
 
 #ifdef CONFIG_IPW2200_QOS
 static int qos_enable = 0;
@@ -1734,10 +1758,13 @@ static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
 
 static int rf_kill_active(struct ipw_priv *priv)
 {
-	if (0 == (ipw_read32(priv, 0x30) & 0x10000))
+	if (0 == (ipw_read32(priv, 0x30) & 0x10000)) {
 		priv->status |= STATUS_RF_KILL_HW;
-	else
+		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
+	} else {
 		priv->status &= ~STATUS_RF_KILL_HW;
+		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false);
+	}
 
 	return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
 }
@@ -2020,6 +2047,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
 	if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
 		IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
 		priv->status |= STATUS_RF_KILL_HW;
+		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
 		wake_up_interruptible(&priv->wait_command_queue);
 		priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
 		cancel_delayed_work(&priv->request_scan);
@@ -8655,24 +8683,6 @@ static int ipw_sw_reset(struct ipw_priv *priv, int option)
  *
  */
 
-static int ipw_wx_get_name(struct net_device *dev,
-			   struct iw_request_info *info,
-			   union iwreq_data *wrqu, char *extra)
-{
-	struct ipw_priv *priv = libipw_priv(dev);
-	mutex_lock(&priv->mutex);
-	if (priv->status & STATUS_RF_KILL_MASK)
-		strcpy(wrqu->name, "radio off");
-	else if (!(priv->status & STATUS_ASSOCIATED))
-		strcpy(wrqu->name, "unassociated");
-	else
-		snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
-			 ipw_modes[priv->assoc_request.ieee_mode]);
-	IPW_DEBUG_WX("Name: %s\n", wrqu->name);
-	mutex_unlock(&priv->mutex);
-	return 0;
-}
-
 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
 {
 	if (channel == 0) {
@@ -9972,7 +9982,7 @@ static int ipw_wx_sw_reset(struct net_device *dev,
 /* Rebase the WE IOCTLs to zero for the handler array */
 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
 static iw_handler ipw_wx_handlers[] = {
-	IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
+	IW_IOCTL(SIOCGIWNAME) = (iw_handler) cfg80211_wext_giwname,
 	IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
 	IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
 	IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
@@ -11275,6 +11285,7 @@ static int ipw_up(struct ipw_priv *priv)
 		if (!(priv->config & CFG_CUSTOM_MAC))
 			eeprom_parse_mac(priv, priv->mac_addr);
 		memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
+		memcpy(priv->net_dev->perm_addr, priv->mac_addr, ETH_ALEN);
 
 		for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
 			if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
@@ -11416,16 +11427,100 @@ static void ipw_bg_down(struct work_struct *work)
 /* Called by register_netdev() */
 static int ipw_net_init(struct net_device *dev)
 {
+	int i, rc = 0;
 	struct ipw_priv *priv = libipw_priv(dev);
+	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
+	struct wireless_dev *wdev = &priv->ieee->wdev;
 	mutex_lock(&priv->mutex);
 
 	if (ipw_up(priv)) {
-		mutex_unlock(&priv->mutex);
-		return -EIO;
+		rc = -EIO;
+		goto out;
+	}
+
+	memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
+
+	/* fill-out priv->ieee->bg_band */
+	if (geo->bg_channels) {
+		struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
+
+		bg_band->band = IEEE80211_BAND_2GHZ;
+		bg_band->n_channels = geo->bg_channels;
+		bg_band->channels =
+			kzalloc(geo->bg_channels *
+				sizeof(struct ieee80211_channel), GFP_KERNEL);
+		/* translate geo->bg to bg_band.channels */
+		for (i = 0; i < geo->bg_channels; i++) {
+			bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
+			bg_band->channels[i].center_freq = geo->bg[i].freq;
+			bg_band->channels[i].hw_value = geo->bg[i].channel;
+			bg_band->channels[i].max_power = geo->bg[i].max_power;
+			if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
+				bg_band->channels[i].flags |=
+					IEEE80211_CHAN_PASSIVE_SCAN;
+			if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
+				bg_band->channels[i].flags |=
+					IEEE80211_CHAN_NO_IBSS;
+			if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
+				bg_band->channels[i].flags |=
+					IEEE80211_CHAN_RADAR;
+			/* No equivalent for LIBIPW_CH_80211H_RULES,
+			   LIBIPW_CH_UNIFORM_SPREADING, or
+			   LIBIPW_CH_B_ONLY... */
+		}
+		/* point at bitrate info */
+		bg_band->bitrates = ipw2200_bg_rates;
+		bg_band->n_bitrates = ipw2200_num_bg_rates;
+
+		wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
+	}
+
+	/* fill-out priv->ieee->a_band */
+	if (geo->a_channels) {
+		struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
+
+		a_band->band = IEEE80211_BAND_5GHZ;
+		a_band->n_channels = geo->a_channels;
+		a_band->channels =
+			kzalloc(geo->a_channels *
+				sizeof(struct ieee80211_channel), GFP_KERNEL);
+		/* translate geo->bg to a_band.channels */
+		for (i = 0; i < geo->a_channels; i++) {
+			a_band->channels[i].band = IEEE80211_BAND_2GHZ;
+			a_band->channels[i].center_freq = geo->a[i].freq;
+			a_band->channels[i].hw_value = geo->a[i].channel;
+			a_band->channels[i].max_power = geo->a[i].max_power;
+			if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
+				a_band->channels[i].flags |=
+					IEEE80211_CHAN_PASSIVE_SCAN;
+			if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
+				a_band->channels[i].flags |=
+					IEEE80211_CHAN_NO_IBSS;
+			if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
+				a_band->channels[i].flags |=
+					IEEE80211_CHAN_RADAR;
+			/* No equivalent for LIBIPW_CH_80211H_RULES,
+			   LIBIPW_CH_UNIFORM_SPREADING, or
+			   LIBIPW_CH_B_ONLY... */
+		}
+		/* point at bitrate info */
+		a_band->bitrates = ipw2200_a_rates;
+		a_band->n_bitrates = ipw2200_num_a_rates;
+
+		wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
+	}
+
+	set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
+
+	/* With that information in place, we can now register the wiphy... */
+	if (wiphy_register(wdev->wiphy)) {
+		rc = -EIO;
+		goto out;
 	}
 
+out:
 	mutex_unlock(&priv->mutex);
-	return 0;
+	return rc;
 }
 
 /* PCI driver stuff */
@@ -11556,7 +11651,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv)
 	if (priv->prom_net_dev)
 		return -EPERM;
 
-	priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv));
+	priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv), 1);
 	if (priv->prom_net_dev == NULL)
 		return -ENOMEM;
 
@@ -11575,7 +11670,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv)
 
 	rc = register_netdev(priv->prom_net_dev);
 	if (rc) {
-		free_ieee80211(priv->prom_net_dev);
+		free_ieee80211(priv->prom_net_dev, 1);
 		priv->prom_net_dev = NULL;
 		return rc;
 	}
@@ -11589,7 +11684,7 @@ static void ipw_prom_free(struct ipw_priv *priv)
 		return;
 
 	unregister_netdev(priv->prom_net_dev);
-	free_ieee80211(priv->prom_net_dev);
+	free_ieee80211(priv->prom_net_dev, 1);
 
 	priv->prom_net_dev = NULL;
 }
@@ -11617,7 +11712,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
 	struct ipw_priv *priv;
 	int i;
 
-	net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
+	net_dev = alloc_ieee80211(sizeof(struct ipw_priv), 0);
 	if (net_dev == NULL) {
 		err = -ENOMEM;
 		goto out;
@@ -11765,7 +11860,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
 	pci_disable_device(pdev);
 	pci_set_drvdata(pdev, NULL);
       out_free_ieee80211:
-	free_ieee80211(priv->net_dev);
+	free_ieee80211(priv->net_dev, 0);
       out:
 	return err;
 }
@@ -11832,7 +11927,11 @@ static void __devexit ipw_pci_remove(struct pci_dev *pdev)
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
 	pci_set_drvdata(pdev, NULL);
-	free_ieee80211(priv->net_dev);
+	/* wiphy_unregister needs to be here, before free_ieee80211 */
+	wiphy_unregister(priv->ieee->wdev.wiphy);
+	kfree(priv->ieee->a_band.channels);
+	kfree(priv->ieee->bg_band.channels);
+	free_ieee80211(priv->net_dev, 0);
 	free_firmware();
 }
 
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index 1e334ff6bd52..bf45391172f3 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -31,6 +31,7 @@
 #include <linux/ieee80211.h>
 
 #include <net/lib80211.h>
+#include <net/cfg80211.h>
 
 #define LIBIPW_VERSION "git-1.1.13"
 
@@ -783,12 +784,15 @@ struct libipw_geo {
 
 struct libipw_device {
 	struct net_device *dev;
+	struct wireless_dev wdev;
 	struct libipw_security sec;
 
 	/* Bookkeeping structures */
 	struct libipw_stats ieee_stats;
 
 	struct libipw_geo geo;
+	struct ieee80211_supported_band bg_band;
+	struct ieee80211_supported_band a_band;
 
 	/* Probe / Beacon management */
 	struct list_head network_free_list;
@@ -1014,8 +1018,8 @@ static inline int libipw_is_cck_rate(u8 rate)
 }
 
 /* ieee80211.c */
-extern void free_ieee80211(struct net_device *dev);
-extern struct net_device *alloc_ieee80211(int sizeof_priv);
+extern void free_ieee80211(struct net_device *dev, int monitor);
+extern struct net_device *alloc_ieee80211(int sizeof_priv, int monitor);
 extern int libipw_change_mtu(struct net_device *dev, int new_mtu);
 
 extern void libipw_networks_age(struct libipw_device *ieee,
diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c
index eb2b60834c17..bf21eb383dbd 100644
--- a/drivers/net/wireless/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/ipw2x00/libipw_module.c
@@ -62,6 +62,9 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
 MODULE_AUTHOR(DRV_COPYRIGHT);
 MODULE_LICENSE("GPL");
 
+struct cfg80211_ops libipw_config_ops = { };
+void *libipw_wiphy_privid = &libipw_wiphy_privid;
+
 static int libipw_networks_allocate(struct libipw_device *ieee)
 {
 	if (ieee->networks)
@@ -140,7 +143,7 @@ int libipw_change_mtu(struct net_device *dev, int new_mtu)
 }
 EXPORT_SYMBOL(libipw_change_mtu);
 
-struct net_device *alloc_ieee80211(int sizeof_priv)
+struct net_device *alloc_ieee80211(int sizeof_priv, int monitor)
 {
 	struct libipw_device *ieee;
 	struct net_device *dev;
@@ -157,10 +160,31 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
 
 	ieee->dev = dev;
 
+	if (!monitor) {
+		ieee->wdev.wiphy = wiphy_new(&libipw_config_ops, 0);
+		if (!ieee->wdev.wiphy) {
+			LIBIPW_ERROR("Unable to allocate wiphy.\n");
+			goto failed_free_netdev;
+		}
+
+		ieee->dev->ieee80211_ptr = &ieee->wdev;
+		ieee->wdev.iftype = NL80211_IFTYPE_STATION;
+
+		/* Fill-out wiphy structure bits we know...  Not enough info
+		   here to call set_wiphy_dev or set MAC address or channel info
+		   -- have to do that in ->ndo_init... */
+		ieee->wdev.wiphy->privid = libipw_wiphy_privid;
+
+		ieee->wdev.wiphy->max_scan_ssids = 1;
+		ieee->wdev.wiphy->max_scan_ie_len = 0;
+		ieee->wdev.wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION)
+						| BIT(NL80211_IFTYPE_ADHOC);
+	}
+
 	err = libipw_networks_allocate(ieee);
 	if (err) {
 		LIBIPW_ERROR("Unable to allocate beacon storage: %d\n", err);
-		goto failed_free_netdev;
+		goto failed_free_wiphy;
 	}
 	libipw_networks_initialize(ieee);
 
@@ -193,19 +217,27 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
 
 	return dev;
 
+failed_free_wiphy:
+	if (!monitor)
+		wiphy_free(ieee->wdev.wiphy);
 failed_free_netdev:
 	free_netdev(dev);
 failed:
 	return NULL;
 }
 
-void free_ieee80211(struct net_device *dev)
+void free_ieee80211(struct net_device *dev, int monitor)
 {
 	struct libipw_device *ieee = netdev_priv(dev);
 
 	lib80211_crypt_info_free(&ieee->crypt_info);
 
 	libipw_networks_free(ieee);
+
+	/* free cfg80211 resources */
+	if (!monitor)
+		wiphy_free(ieee->wdev.wiphy);
+
 	free_netdev(dev);
 }
 
@@ -216,17 +248,22 @@ u32 libipw_debug_level = 0;
 EXPORT_SYMBOL_GPL(libipw_debug_level);
 static struct proc_dir_entry *libipw_proc = NULL;
 
-static int show_debug_level(char *page, char **start, off_t offset,
-			    int count, int *eof, void *data)
+static int debug_level_proc_show(struct seq_file *m, void *v)
 {
-	return snprintf(page, count, "0x%08X\n", libipw_debug_level);
+	seq_printf(m, "0x%08X\n", libipw_debug_level);
+	return 0;
 }
 
-static int store_debug_level(struct file *file, const char __user * buffer,
-			     unsigned long count, void *data)
+static int debug_level_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, debug_level_proc_show, NULL);
+}
+
+static ssize_t debug_level_proc_write(struct file *file,
+		const char __user *buffer, size_t count, loff_t *pos)
 {
 	char buf[] = "0x00000000\n";
-	unsigned long len = min((unsigned long)sizeof(buf) - 1, count);
+	size_t len = min(sizeof(buf) - 1, count);
 	unsigned long val;
 
 	if (copy_from_user(buf, buffer, len))
@@ -240,6 +277,15 @@ static int store_debug_level(struct file *file, const char __user * buffer,
 
 	return strnlen(buf, len);
 }
+
+static const struct file_operations debug_level_proc_fops = {
+	.owner		= THIS_MODULE,
+	.open		= debug_level_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+	.write		= debug_level_proc_write,
+};
 #endif				/* CONFIG_LIBIPW_DEBUG */
 
 static int __init libipw_init(void)
@@ -254,16 +300,13 @@ static int __init libipw_init(void)
 				" proc directory\n");
 		return -EIO;
 	}
-	e = create_proc_entry("debug_level", S_IFREG | S_IRUGO | S_IWUSR,
-			      libipw_proc);
+	e = proc_create("debug_level", S_IRUGO | S_IWUSR, libipw_proc,
+			&debug_level_proc_fops);
 	if (!e) {
 		remove_proc_entry(DRV_NAME, init_net.proc_net);
 		libipw_proc = NULL;
 		return -EIO;
 	}
-	e->read_proc = show_debug_level;
-	e->write_proc = store_debug_level;
-	e->data = NULL;
 #endif				/* CONFIG_LIBIPW_DEBUG */
 
 	printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 99310c033253..b16b06c2031f 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -1,17 +1,7 @@
 config IWLWIFI
 	tristate "Intel Wireless Wifi"
-	depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL
-	select LIB80211
+	depends on PCI && MAC80211 && EXPERIMENTAL
 	select FW_LOADER
-	select MAC80211_LEDS if IWLWIFI_LEDS
-	select LEDS_CLASS if IWLWIFI_LEDS
-
-config IWLWIFI_LEDS
-	bool "Enable LED support in iwlagn and iwl3945 drivers"
-	depends on IWLWIFI
-	default y
-	---help---
-	  Select this if you want LED support.
 
 config IWLWIFI_SPECTRUM_MEASUREMENT
 	bool "Enable Spectrum Measurement in iwlagn driver"
@@ -50,6 +40,24 @@ config IWLWIFI_DEBUGFS
         ---help---
 	  Enable creation of debugfs files for the iwlwifi drivers.
 
+config IWLWIFI_DEVICE_TRACING
+	bool "iwlwifi device access tracing"
+	depends on IWLWIFI
+	depends on EVENT_TRACING
+	help
+	  Say Y here to trace all commands, including TX frames and IO
+	  accesses, sent to the device. If you say yes, iwlwifi will
+	  register with the ftrace framework for event tracing and dump
+	  all this information to the ringbuffer, you may need to
+	  increase the ringbuffer size. See the ftrace documentation
+	  for more information.
+
+	  When tracing is not enabled, this option still has some
+	  (though rather small) overhead.
+
+	  If unsure, say Y so we can help you better when problems
+	  occur.
+
 config IWLAGN
 	tristate "Intel Wireless WiFi Next Gen AGN (iwlagn)"
 	depends on IWLWIFI
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 1d4e0a226fd4..7f82044af242 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,20 +1,22 @@
 obj-$(CONFIG_IWLWIFI)	+= iwlcore.o
 iwlcore-objs 		:= iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
 iwlcore-objs 		+= iwl-rx.o iwl-tx.o iwl-sta.o iwl-calib.o
-iwlcore-objs 		+= iwl-scan.o
+iwlcore-objs 		+= iwl-scan.o iwl-led.o
 iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
-iwlcore-$(CONFIG_IWLWIFI_LEDS) += iwl-led.o
 iwlcore-$(CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT) += iwl-spectrum.o
+iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
 
+CFLAGS_iwl-devtrace.o := -I$(src)
+
+# AGN
 obj-$(CONFIG_IWLAGN)	+= iwlagn.o
-iwlagn-objs		:= iwl-agn.o iwl-agn-rs.o
+iwlagn-objs		:= iwl-agn.o iwl-agn-rs.o iwl-agn-led.o
 
 iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
 iwlagn-$(CONFIG_IWL5000) += iwl-5000.o
 iwlagn-$(CONFIG_IWL5000) += iwl-6000.o
 iwlagn-$(CONFIG_IWL5000) += iwl-1000.o
 
+# 3945
 obj-$(CONFIG_IWL3945)	+= iwl3945.o
 iwl3945-objs		:= iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
-
-
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 950267ab556a..8414178bcff4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -44,6 +44,7 @@
 #include "iwl-sta.h"
 #include "iwl-helpers.h"
 #include "iwl-5000-hw.h"
+#include "iwl-agn-led.h"
 
 /* Highest firmware API version supported */
 #define IWL1000_UCODE_API_MAX 3
@@ -76,7 +77,10 @@ static void iwl1000_set_ct_threshold(struct iwl_priv *priv)
 /* NIC configuration for 1000 series */
 static void iwl1000_nic_config(struct iwl_priv *priv)
 {
-	iwl5000_nic_config(priv);
+	/* set CSR_HW_CONFIG_REG for uCode use */
+	iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+		    CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+		    CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
 
 	/* Setting digital SVR for 1000 card to 1.32V */
 	/* locking is acquired in iwl_set_bits_mask_prph() function */
@@ -106,9 +110,8 @@ static struct iwl_lib_ops iwl1000_lib = {
 	.send_tx_power = iwl5000_send_tx_power,
 	.update_chain_flags = iwl_update_chain_flags,
 	.apm_ops = {
-		.init =	iwl5000_apm_init,
-		.reset = iwl5000_apm_reset,
-		.stop = iwl5000_apm_stop,
+		.init = iwl_apm_init,
+		.stop = iwl_apm_stop,
 		.config = iwl1000_nic_config,
 		.set_pwr_src = iwl_set_pwr_src,
 	},
@@ -142,6 +145,7 @@ static struct iwl_ops iwl1000_ops = {
 	.lib = &iwl1000_lib,
 	.hcmd = &iwl5000_hcmd,
 	.utils = &iwl5000_hcmd_utils,
+	.led = &iwlagn_led_ops,
 };
 
 struct iwl_cfg iwl1000_bgn_cfg = {
@@ -152,15 +156,50 @@ struct iwl_cfg iwl1000_bgn_cfg = {
 	.sku = IWL_SKU_G|IWL_SKU_N,
 	.ops = &iwl1000_ops,
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
-	.eeprom_ver = EEPROM_5000_EEPROM_VERSION,
+	.eeprom_ver = EEPROM_1000_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_A,
 	.valid_rx_ant = ANT_AB,
-	.need_pll_cfg = true,
+	.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
+	.set_l0s = true,
+	.use_bsm = false,
 	.max_ll_items = OTP_MAX_LL_ITEMS_1000,
 	.shadow_ram_support = false,
 	.ht_greenfield_support = true,
+	.led_compensation = 51,
 	.use_rts_for_ht = true, /* use rts/cts protection */
+	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+	.support_ct_kill_exit = true,
+	.sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
 };
 
+struct iwl_cfg iwl1000_bg_cfg = {
+	.name = "1000 Series BG",
+	.fw_name_pre = IWL1000_FW_PRE,
+	.ucode_api_max = IWL1000_UCODE_API_MAX,
+	.ucode_api_min = IWL1000_UCODE_API_MIN,
+	.sku = IWL_SKU_G,
+	.ops = &iwl1000_ops,
+	.eeprom_size = OTP_LOW_IMAGE_SIZE,
+	.eeprom_ver = EEPROM_1000_EEPROM_VERSION,
+	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
+	.mod_params = &iwl50_mod_params,
+	.valid_tx_ant = ANT_A,
+	.valid_rx_ant = ANT_AB,
+	.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
+	.set_l0s = true,
+	.use_bsm = false,
+	.max_ll_items = OTP_MAX_LL_ITEMS_1000,
+	.shadow_ram_support = false,
+	.ht_greenfield_support = true,
+	.led_compensation = 51,
+	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+	.support_ct_kill_exit = true,
+};
+
+MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index 16772780c5b0..6fd10d443ba3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -71,12 +71,6 @@
 
 #include "iwl-eeprom.h"
 
-/*
- * uCode queue management definitions ...
- * Queue #4 is the command queue for 3945 and 4965.
- */
-#define IWL_CMD_QUEUE_NUM	4
-
 /* Time constants */
 #define SHORT_SLOT_TIME 9
 #define LONG_SLOT_TIME 20
@@ -254,12 +248,6 @@ struct iwl3945_eeprom {
 #define TFD_CTL_PAD_SET(n)         (n << 28)
 #define TFD_CTL_PAD_GET(ctl)       (ctl >> 28)
 
-/*
- * RX related structures and functions
- */
-#define RX_FREE_BUFFERS 64
-#define RX_LOW_WATERMARK 8
-
 /* Sizes and addresses for instruction and data memory (SRAM) in
  * 3945's embedded processor.  Driver access is via HBUS_TARG_MEM_* regs. */
 #define IWL39_RTC_INST_LOWER_BOUND		(0x000000)
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
index 8c29ded7d02c..a871d09d598f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
@@ -24,8 +24,6 @@
  *
  *****************************************************************************/
 
-#ifdef CONFIG_IWLWIFI_LEDS
-
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -43,388 +41,51 @@
 #include "iwl-3945.h"
 #include "iwl-core.h"
 #include "iwl-dev.h"
+#include "iwl-3945-led.h"
 
-#ifdef CONFIG_IWLWIFI_DEBUG
-static const char *led_type_str[] = {
-	__stringify(IWL_LED_TRG_TX),
-	__stringify(IWL_LED_TRG_RX),
-	__stringify(IWL_LED_TRG_ASSOC),
-	__stringify(IWL_LED_TRG_RADIO),
-	NULL
-};
-#endif /* CONFIG_IWLWIFI_DEBUG */
-
-static const struct {
-	u16 brightness;
-	u8 on_time;
-	u8 off_time;
-} blink_tbl[] =
-{
-	{300, 25, 25},
-	{200, 40, 40},
-	{100, 55, 55},
-	{70, 65, 65},
-	{50, 75, 75},
-	{20, 85, 85},
-	{15, 95, 95 },
-	{10, 110, 110},
-	{5, 130, 130},
-	{0, 167, 167},
-	/* SOLID_ON */
-	{-1, IWL_LED_SOLID, 0}
-};
-
-#define IWL_1MB_RATE (128 * 1024)
-#define IWL_LED_THRESHOLD (16)
-#define IWL_MAX_BLINK_TBL (ARRAY_SIZE(blink_tbl) - 1) /*Exclude Solid on*/
-#define IWL_SOLID_BLINK_IDX (ARRAY_SIZE(blink_tbl) - 1)
-
-static void iwl3945_led_cmd_callback(struct iwl_priv *priv,
-				     struct iwl_device_cmd *cmd,
-				     struct sk_buff *skb)
-{
-}
-
-static inline int iwl3945_brightness_to_idx(enum led_brightness brightness)
-{
-	return fls(0x000000FF & (u32)brightness);
-}
 
 /* Send led command */
-static int iwl_send_led_cmd(struct iwl_priv *priv,
-			    struct iwl_led_cmd *led_cmd)
+static int iwl3945_send_led_cmd(struct iwl_priv *priv,
+				struct iwl_led_cmd *led_cmd)
 {
 	struct iwl_host_cmd cmd = {
 		.id = REPLY_LEDS_CMD,
 		.len = sizeof(struct iwl_led_cmd),
 		.data = led_cmd,
 		.flags = CMD_ASYNC,
-		.callback = iwl3945_led_cmd_callback,
+		.callback = NULL,
 	};
 
 	return iwl_send_cmd(priv, &cmd);
 }
 
-
-
-/* Set led on command */
-static int iwl3945_led_pattern(struct iwl_priv *priv, int led_id,
-			       unsigned int idx)
-{
-	struct iwl_led_cmd led_cmd = {
-		.id = led_id,
-		.interval = IWL_DEF_LED_INTRVL
-	};
-
-	BUG_ON(idx > IWL_MAX_BLINK_TBL);
-
-	led_cmd.on = blink_tbl[idx].on_time;
-	led_cmd.off = blink_tbl[idx].off_time;
-
-	return iwl_send_led_cmd(priv, &led_cmd);
-}
-
-
 /* Set led on command */
-static int iwl3945_led_on(struct iwl_priv *priv, int led_id)
+static int iwl3945_led_on(struct iwl_priv *priv)
 {
 	struct iwl_led_cmd led_cmd = {
-		.id = led_id,
+		.id = IWL_LED_LINK,
 		.on = IWL_LED_SOLID,
 		.off = 0,
 		.interval = IWL_DEF_LED_INTRVL
 	};
-	return iwl_send_led_cmd(priv, &led_cmd);
+	return iwl3945_send_led_cmd(priv, &led_cmd);
 }
 
 /* Set led off command */
-static int iwl3945_led_off(struct iwl_priv *priv, int led_id)
+static int iwl3945_led_off(struct iwl_priv *priv)
 {
 	struct iwl_led_cmd led_cmd = {
-		.id = led_id,
+		.id = IWL_LED_LINK,
 		.on = 0,
 		.off = 0,
 		.interval = IWL_DEF_LED_INTRVL
 	};
-	IWL_DEBUG_LED(priv, "led off %d\n", led_id);
-	return iwl_send_led_cmd(priv, &led_cmd);
+	IWL_DEBUG_LED(priv, "led off\n");
+	return iwl3945_send_led_cmd(priv, &led_cmd);
 }
 
-/*
- *  Set led on in case of association
- *  */
-static int iwl3945_led_associate(struct iwl_priv *priv, int led_id)
-{
-	IWL_DEBUG_LED(priv, "Associated\n");
-
-	priv->allow_blinking = 1;
-	return iwl3945_led_on(priv, led_id);
-}
-/* Set Led off in case of disassociation */
-static int iwl3945_led_disassociate(struct iwl_priv *priv, int led_id)
-{
-	IWL_DEBUG_LED(priv, "Disassociated\n");
-
-	priv->allow_blinking = 0;
-
-	return 0;
-}
-
-/*
- * brightness call back function for Tx/Rx LED
- */
-static int iwl3945_led_associated(struct iwl_priv *priv, int led_id)
-{
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
-	    !test_bit(STATUS_READY, &priv->status))
-		return 0;
-
-
-	/* start counting Tx/Rx bytes */
-	if (!priv->last_blink_time && priv->allow_blinking)
-		priv->last_blink_time = jiffies;
-	return 0;
-}
-
-/*
- * brightness call back for association and radio
- */
-static void iwl3945_led_brightness_set(struct led_classdev *led_cdev,
-				enum led_brightness brightness)
-{
-	struct iwl_led *led = container_of(led_cdev,
-					   struct iwl_led, led_dev);
-	struct iwl_priv *priv = led->priv;
-
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-		return;
-
-	IWL_DEBUG_LED(priv, "Led type = %s brightness = %d\n",
-			led_type_str[led->type], brightness);
-
-	switch (brightness) {
-	case LED_FULL:
-		if (led->led_on)
-			led->led_on(priv, IWL_LED_LINK);
-		break;
-	case LED_OFF:
-		if (led->led_off)
-			led->led_off(priv, IWL_LED_LINK);
-		break;
-	default:
-		if (led->led_pattern) {
-			int idx = iwl3945_brightness_to_idx(brightness);
-			led->led_pattern(priv, IWL_LED_LINK, idx);
-		}
-		break;
-	}
-}
-
-/*
- * Register led class with the system
- */
-static int iwl3945_led_register_led(struct iwl_priv *priv,
-				   struct iwl_led *led,
-				   enum led_type type, u8 set_led,
-				   char *trigger)
-{
-	struct device *device = wiphy_dev(priv->hw->wiphy);
-	int ret;
-
-	led->led_dev.name = led->name;
-	led->led_dev.brightness_set = iwl3945_led_brightness_set;
-	led->led_dev.default_trigger = trigger;
-
-	led->priv = priv;
-	led->type = type;
-
-	ret = led_classdev_register(device, &led->led_dev);
-	if (ret) {
-		IWL_ERR(priv, "Error: failed to register led handler.\n");
-		return ret;
-	}
-
-	led->registered = 1;
-
-	if (set_led && led->led_on)
-		led->led_on(priv, IWL_LED_LINK);
-	return 0;
-}
-
-
-/*
- * calculate blink rate according to last 2 sec Tx/Rx activities
- */
-static inline u8 get_blink_rate(struct iwl_priv *priv)
-{
-	int index;
-	s64 tpt = priv->rxtxpackets;
-
-	if (tpt < 0)
-		tpt = -tpt;
-
-	IWL_DEBUG_LED(priv, "tpt %lld \n", (long long)tpt);
-
-	if (!priv->allow_blinking)
-		index = IWL_MAX_BLINK_TBL;
-	else
-		for (index = 0; index < IWL_MAX_BLINK_TBL; index++)
-			if (tpt > (blink_tbl[index].brightness * IWL_1MB_RATE))
-				break;
-
-	IWL_DEBUG_LED(priv, "LED BLINK IDX=%d\n", index);
-	return index;
-}
-
-/*
- * this function called from handler. Since setting Led command can
- * happen very frequent we postpone led command to be called from
- * REPLY handler so we know ucode is up
- */
-void iwl3945_led_background(struct iwl_priv *priv)
-{
-	u8 blink_idx;
-
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
-		priv->last_blink_time = 0;
-		return;
-	}
-	if (iwl_is_rfkill(priv)) {
-		priv->last_blink_time = 0;
-		return;
-	}
-
-	if (!priv->allow_blinking) {
-		priv->last_blink_time = 0;
-		if (priv->last_blink_rate != IWL_SOLID_BLINK_IDX) {
-			priv->last_blink_rate = IWL_SOLID_BLINK_IDX;
-			iwl3945_led_pattern(priv, IWL_LED_LINK,
-					    IWL_SOLID_BLINK_IDX);
-		}
-		return;
-	}
-	if (!priv->last_blink_time ||
-	    !time_after(jiffies, priv->last_blink_time +
-			msecs_to_jiffies(1000)))
-		return;
-
-	blink_idx = get_blink_rate(priv);
-
-	/* call only if blink rate change */
-	if (blink_idx != priv->last_blink_rate)
-		iwl3945_led_pattern(priv, IWL_LED_LINK, blink_idx);
-
-	priv->last_blink_time = jiffies;
-	priv->last_blink_rate = blink_idx;
-	priv->rxtxpackets = 0;
-}
-
-
-/* Register all led handler */
-int iwl3945_led_register(struct iwl_priv *priv)
-{
-	char *trigger;
-	int ret;
-
-	priv->last_blink_rate = 0;
-	priv->rxtxpackets = 0;
-	priv->led_tpt = 0;
-	priv->last_blink_time = 0;
-	priv->allow_blinking = 0;
-
-	trigger = ieee80211_get_radio_led_name(priv->hw);
-	snprintf(priv->led[IWL_LED_TRG_RADIO].name,
-		 sizeof(priv->led[IWL_LED_TRG_RADIO].name), "iwl-%s::radio",
-		 wiphy_name(priv->hw->wiphy));
-
-	priv->led[IWL_LED_TRG_RADIO].led_on = iwl3945_led_on;
-	priv->led[IWL_LED_TRG_RADIO].led_off = iwl3945_led_off;
-	priv->led[IWL_LED_TRG_RADIO].led_pattern = NULL;
-
-	ret = iwl3945_led_register_led(priv,
-				   &priv->led[IWL_LED_TRG_RADIO],
-				   IWL_LED_TRG_RADIO, 1, trigger);
-
-	if (ret)
-		goto exit_fail;
-
-	trigger = ieee80211_get_assoc_led_name(priv->hw);
-	snprintf(priv->led[IWL_LED_TRG_ASSOC].name,
-		 sizeof(priv->led[IWL_LED_TRG_ASSOC].name), "iwl-%s::assoc",
-		 wiphy_name(priv->hw->wiphy));
-
-	ret = iwl3945_led_register_led(priv,
-				   &priv->led[IWL_LED_TRG_ASSOC],
-				   IWL_LED_TRG_ASSOC, 0, trigger);
-
-	/* for assoc always turn led on */
-	priv->led[IWL_LED_TRG_ASSOC].led_on = iwl3945_led_associate;
-	priv->led[IWL_LED_TRG_ASSOC].led_off = iwl3945_led_disassociate;
-	priv->led[IWL_LED_TRG_ASSOC].led_pattern = NULL;
-
-	if (ret)
-		goto exit_fail;
-
-	trigger = ieee80211_get_rx_led_name(priv->hw);
-	snprintf(priv->led[IWL_LED_TRG_RX].name,
-		 sizeof(priv->led[IWL_LED_TRG_RX].name), "iwl-%s::RX",
-		 wiphy_name(priv->hw->wiphy));
-
-	ret = iwl3945_led_register_led(priv,
-				   &priv->led[IWL_LED_TRG_RX],
-				   IWL_LED_TRG_RX, 0, trigger);
-
-	priv->led[IWL_LED_TRG_RX].led_on = iwl3945_led_associated;
-	priv->led[IWL_LED_TRG_RX].led_off = iwl3945_led_associated;
-	priv->led[IWL_LED_TRG_RX].led_pattern = iwl3945_led_pattern;
-
-	if (ret)
-		goto exit_fail;
-
-	trigger = ieee80211_get_tx_led_name(priv->hw);
-	snprintf(priv->led[IWL_LED_TRG_TX].name,
-		 sizeof(priv->led[IWL_LED_TRG_TX].name), "iwl-%s::TX",
-		 wiphy_name(priv->hw->wiphy));
-
-	ret = iwl3945_led_register_led(priv,
-				   &priv->led[IWL_LED_TRG_TX],
-				   IWL_LED_TRG_TX, 0, trigger);
-
-	priv->led[IWL_LED_TRG_TX].led_on = iwl3945_led_associated;
-	priv->led[IWL_LED_TRG_TX].led_off = iwl3945_led_associated;
-	priv->led[IWL_LED_TRG_TX].led_pattern = iwl3945_led_pattern;
-
-	if (ret)
-		goto exit_fail;
-
-	return 0;
-
-exit_fail:
-	iwl3945_led_unregister(priv);
-	return ret;
-}
-
-
-/* unregister led class */
-static void iwl3945_led_unregister_led(struct iwl_led *led, u8 set_led)
-{
-	if (!led->registered)
-		return;
-
-	led_classdev_unregister(&led->led_dev);
-
-	if (set_led)
-		led->led_dev.brightness_set(&led->led_dev, LED_OFF);
-	led->registered = 0;
-}
-
-/* Unregister all led handlers */
-void iwl3945_led_unregister(struct iwl_priv *priv)
-{
-	iwl3945_led_unregister_led(&priv->led[IWL_LED_TRG_ASSOC], 0);
-	iwl3945_led_unregister_led(&priv->led[IWL_LED_TRG_RX], 0);
-	iwl3945_led_unregister_led(&priv->led[IWL_LED_TRG_TX], 0);
-	iwl3945_led_unregister_led(&priv->led[IWL_LED_TRG_RADIO], 1);
-}
-
-#endif
+const struct iwl_led_ops iwl3945_led_ops = {
+	.cmd = iwl3945_send_led_cmd,
+	.on = iwl3945_led_on,
+	.off = iwl3945_led_off,
+};
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.h b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
index 3b65642258ca..5a1033ca7aaa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
@@ -24,23 +24,9 @@
  *
  *****************************************************************************/
 
-#ifndef IWL3945_LEDS_H
-#define IWL3945_LEDS_H
+#ifndef __iwl_3945_led_h__
+#define __iwl_3945_led_h__
 
-struct iwl_priv;
+extern const struct iwl_led_ops iwl3945_led_ops;
 
-#ifdef CONFIG_IWLWIFI_LEDS
-
-#include "iwl-led.h"
-
-extern int iwl3945_led_register(struct iwl_priv *priv);
-extern void iwl3945_led_unregister(struct iwl_priv *priv);
-extern void iwl3945_led_background(struct iwl_priv *priv);
-
-#else
-static inline int iwl3945_led_register(struct iwl_priv *priv) { return 0; }
-static inline void iwl3945_led_unregister(struct iwl_priv *priv) {}
-static inline void iwl3945_led_background(struct iwl_priv *priv) {}
-
-#endif /* IWLWIFI_LEDS*/
-#endif /* IWL3945_LEDS_H */
+#endif /* __iwl_3945_led_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index cbb0585083a9..d4b49883b30e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -42,38 +42,6 @@
 
 #define RS_NAME "iwl-3945-rs"
 
-struct iwl3945_rate_scale_data {
-	u64 data;
-	s32 success_counter;
-	s32 success_ratio;
-	s32 counter;
-	s32 average_tpt;
-	unsigned long stamp;
-};
-
-struct iwl3945_rs_sta {
-	spinlock_t lock;
-	struct iwl_priv *priv;
-	s32 *expected_tpt;
-	unsigned long last_partial_flush;
-	unsigned long last_flush;
-	u32 flush_time;
-	u32 last_tx_packets;
-	u32 tx_packets;
-	u8 tgg;
-	u8 flush_pending;
-	u8 start_rate;
-	u8 ibss_sta_added;
-	struct timer_list rate_scale_flush;
-	struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945];
-#ifdef CONFIG_MAC80211_DEBUGFS
-	struct dentry *rs_sta_dbgfs_stats_table_file;
-#endif
-
-	/* used to be in sta_info */
-	int last_txrate_idx;
-};
-
 static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT_3945] = {
 	7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
 };
@@ -370,6 +338,28 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
 
 	IWL_DEBUG_RATE(priv, "enter\n");
 
+	spin_lock_init(&rs_sta->lock);
+
+	rs_sta->priv = priv;
+
+	rs_sta->start_rate = IWL_RATE_INVALID;
+
+	/* default to just 802.11b */
+	rs_sta->expected_tpt = iwl3945_expected_tpt_b;
+
+	rs_sta->last_partial_flush = jiffies;
+	rs_sta->last_flush = jiffies;
+	rs_sta->flush_time = IWL_RATE_FLUSH;
+	rs_sta->last_tx_packets = 0;
+	rs_sta->ibss_sta_added = 0;
+
+	init_timer(&rs_sta->rate_scale_flush);
+	rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
+	rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush;
+
+	for (i = 0; i < IWL_RATE_COUNT_3945; i++)
+		iwl3945_clear_window(&rs_sta->win[i]);
+
 	/* TODO: what is a good starting rate for STA? About middle? Maybe not
 	 * the lowest or the highest rate.. Could consider using RSSI from
 	 * previous packets? Need to have IEEE 802.1X auth succeed immediately
@@ -409,45 +399,11 @@ static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
 {
 	struct iwl3945_rs_sta *rs_sta;
 	struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
-	struct iwl_priv *priv = iwl_priv;
-	int i;
-
-	/*
-	 * XXX: If it's using sta->drv_priv anyway, it might
-	 *	as well just put all the information there.
-	 */
+	struct iwl_priv *priv __maybe_unused = iwl_priv;
 
 	IWL_DEBUG_RATE(priv, "enter\n");
 
-	rs_sta = kzalloc(sizeof(struct iwl3945_rs_sta), gfp);
-	if (!rs_sta) {
-		IWL_DEBUG_RATE(priv, "leave: ENOMEM\n");
-		return NULL;
-	}
-
-	psta->rs_sta = rs_sta;
-
-	spin_lock_init(&rs_sta->lock);
-
-	rs_sta->priv = priv;
-
-	rs_sta->start_rate = IWL_RATE_INVALID;
-
-	/* default to just 802.11b */
-	rs_sta->expected_tpt = iwl3945_expected_tpt_b;
-
-	rs_sta->last_partial_flush = jiffies;
-	rs_sta->last_flush = jiffies;
-	rs_sta->flush_time = IWL_RATE_FLUSH;
-	rs_sta->last_tx_packets = 0;
-	rs_sta->ibss_sta_added = 0;
-
-	init_timer(&rs_sta->rate_scale_flush);
-	rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
-	rs_sta->rate_scale_flush.function = &iwl3945_bg_rate_scale_flush;
-
-	for (i = 0; i < IWL_RATE_COUNT_3945; i++)
-		iwl3945_clear_window(&rs_sta->win[i]);
+	rs_sta = &psta->rs_sta;
 
 	IWL_DEBUG_RATE(priv, "leave\n");
 
@@ -458,14 +414,11 @@ static void rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
 			void *priv_sta)
 {
 	struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
-	struct iwl3945_rs_sta *rs_sta = priv_sta;
+	struct iwl3945_rs_sta *rs_sta = &psta->rs_sta;
 	struct iwl_priv *priv __maybe_unused = rs_sta->priv;
 
-	psta->rs_sta = NULL;
-
 	IWL_DEBUG_RATE(priv, "enter\n");
 	del_timer_sync(&rs_sta->rate_scale_flush);
-	kfree(rs_sta);
 	IWL_DEBUG_RATE(priv, "leave\n");
 }
 
@@ -960,14 +913,15 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
 
 	rcu_read_lock();
 
-	sta = ieee80211_find_sta(hw, priv->stations[sta_id].sta.sta.addr);
+	sta = ieee80211_find_sta(priv->vif,
+				 priv->stations[sta_id].sta.sta.addr);
 	if (!sta) {
 		rcu_read_unlock();
 		return;
 	}
 
 	psta = (void *) sta->drv_priv;
-	rs_sta = psta->rs_sta;
+	rs_sta = &psta->rs_sta;
 
 	spin_lock_irqsave(&rs_sta->lock, flags);
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index f059b49dc691..7da1dab933d9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -47,7 +47,8 @@
 #include "iwl-eeprom.h"
 #include "iwl-helpers.h"
 #include "iwl-core.h"
-#include "iwl-agn-rs.h"
+#include "iwl-led.h"
+#include "iwl-3945-led.h"
 
 #define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np)    \
 	[IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP,   \
@@ -293,7 +294,7 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
 static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
 			    struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
 	int txq_id = SEQ_TO_QUEUE(sequence);
 	int index = SEQ_TO_INDEX(sequence);
@@ -353,16 +354,12 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
 void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
 		struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
 		     (int)sizeof(struct iwl3945_notif_statistics),
 		     le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
 
 	memcpy(&priv->statistics_39, pkt->u.raw, sizeof(priv->statistics_39));
-
-	iwl3945_led_background(priv);
-
-	priv->last_statistics_time = jiffies;
 }
 
 /******************************************************************************
@@ -545,14 +542,18 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
 				   struct iwl_rx_mem_buffer *rxb,
 				   struct ieee80211_rx_status *stats)
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
 	struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
 	struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
-	short len = le16_to_cpu(rx_hdr->len);
+	u16 len = le16_to_cpu(rx_hdr->len);
+	struct sk_buff *skb;
+	int ret;
+	__le16 fc = hdr->frame_control;
 
 	/* We received data from the HW, so stop the watchdog */
-	if (unlikely((len + IWL39_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
+	if (unlikely(len + IWL39_RX_FRAME_SIZE >
+		     PAGE_SIZE << priv->hw_params.rx_page_order)) {
 		IWL_DEBUG_DROP(priv, "Corruption detected!\n");
 		return;
 	}
@@ -564,24 +565,50 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
 		return;
 	}
 
-	skb_reserve(rxb->skb, (void *)rx_hdr->payload - (void *)pkt);
-	/* Set the size of the skb to the size of the frame */
-	skb_put(rxb->skb, le16_to_cpu(rx_hdr->len));
+	skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC);
+	if (!skb) {
+		IWL_ERR(priv, "alloc_skb failed\n");
+		return;
+	}
 
 	if (!iwl3945_mod_params.sw_crypto)
 		iwl_set_decrypted_flag(priv,
-				       (struct ieee80211_hdr *)rxb->skb->data,
+				       (struct ieee80211_hdr *)rxb_addr(rxb),
 				       le32_to_cpu(rx_end->status), stats);
 
-#ifdef CONFIG_IWLWIFI_LEDS
-	if (ieee80211_is_data(hdr->frame_control))
-		priv->rxtxpackets += len;
-#endif
-	iwl_update_stats(priv, false, hdr->frame_control, len);
+	skb_reserve(skb, IWL_LINK_HDR_MAX);
+	skb_add_rx_frag(skb, 0, rxb->page,
+			(void *)rx_hdr->payload - (void *)pkt, len);
+
+	/* mac80211 currently doesn't support paged SKB. Convert it to
+	 * linear SKB for management frame and data frame requires
+	 * software decryption or software defragementation. */
+	if (ieee80211_is_mgmt(fc) ||
+	    ieee80211_has_protected(fc) ||
+	    ieee80211_has_morefrags(fc) ||
+	    le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
+		ret = skb_linearize(skb);
+	else
+		ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
+			0 : -ENOMEM;
+
+	if (ret) {
+		kfree_skb(skb);
+		goto out;
+	}
 
-	memcpy(IEEE80211_SKB_RXCB(rxb->skb), stats, sizeof(*stats));
-	ieee80211_rx_irqsafe(priv->hw, rxb->skb);
-	rxb->skb = NULL;
+	/*
+	 * XXX: We cannot touch the page and its virtual memory (pkt) after
+	 * here. It might have already been freed by the above skb change.
+	 */
+
+	iwl_update_stats(priv, false, fc, len);
+	memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+	ieee80211_rx(priv->hw, skb);
+ out:
+	priv->alloc_rxb_page--;
+	rxb->page = NULL;
 }
 
 #define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
@@ -591,7 +618,7 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
 {
 	struct ieee80211_hdr *header;
 	struct ieee80211_rx_status rx_status;
-	struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
 	struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
 	struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
@@ -791,29 +818,31 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
 	u8 data_retry_limit;
 	__le32 tx_flags;
 	__le16 fc = hdr->frame_control;
-	struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
+	struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
 
 	rate = iwl3945_rates[rate_index].plcp;
-	tx_flags = tx->tx_flags;
+	tx_flags = tx_cmd->tx_flags;
 
 	/* We need to figure out how to get the sta->supp_rates while
 	 * in this running context */
 	rate_mask = IWL_RATES_MASK;
 
+
+	/* Set retry limit on DATA packets and Probe Responses*/
+	if (ieee80211_is_probe_resp(fc))
+		data_retry_limit = 3;
+	else
+		data_retry_limit = IWL_DEFAULT_TX_RETRY;
+	tx_cmd->data_retry_limit = data_retry_limit;
+
 	if (tx_id >= IWL_CMD_QUEUE_NUM)
 		rts_retry_limit = 3;
 	else
 		rts_retry_limit = 7;
 
-	if (ieee80211_is_probe_resp(fc)) {
-		data_retry_limit = 3;
-		if (data_retry_limit < rts_retry_limit)
-			rts_retry_limit = data_retry_limit;
-	} else
-		data_retry_limit = IWL_DEFAULT_TX_RETRY;
-
-	if (priv->data_retry_limit != -1)
-		data_retry_limit = priv->data_retry_limit;
+	if (data_retry_limit < rts_retry_limit)
+		rts_retry_limit = data_retry_limit;
+	tx_cmd->rts_retry_limit = rts_retry_limit;
 
 	if (ieee80211_is_mgmt(fc)) {
 		switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
@@ -831,22 +860,20 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
 		}
 	}
 
-	tx->rts_retry_limit = rts_retry_limit;
-	tx->data_retry_limit = data_retry_limit;
-	tx->rate = rate;
-	tx->tx_flags = tx_flags;
+	tx_cmd->rate = rate;
+	tx_cmd->tx_flags = tx_flags;
 
 	/* OFDM */
-	tx->supp_rates[0] =
+	tx_cmd->supp_rates[0] =
 	   ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF;
 
 	/* CCK */
-	tx->supp_rates[1] = (rate_mask & 0xF);
+	tx_cmd->supp_rates[1] = (rate_mask & 0xF);
 
 	IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
 		       "cck/ofdm mask: 0x%x/0x%x\n", sta_id,
-		       tx->rate, le32_to_cpu(tx->tx_flags),
-		       tx->supp_rates[1], tx->supp_rates[0]);
+		       tx_cmd->rate, le32_to_cpu(tx_cmd->tx_flags),
+		       tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]);
 }
 
 u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate, u8 flags)
@@ -962,6 +989,11 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
 
 	iwl3945_hw_txq_ctx_free(priv);
 
+	/* allocate tx queue structure */
+	rc = iwl_alloc_txq_mem(priv);
+	if (rc)
+		return rc;
+
 	/* Tx CMD queue */
 	rc = iwl3945_tx_reset(priv);
 	if (rc)
@@ -986,41 +1018,25 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
 	return rc;
 }
 
+
+/*
+ * Start up 3945's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via iwl_apm_stop())
+ * NOTE:  This does not load uCode nor start the embedded processor
+ */
 static int iwl3945_apm_init(struct iwl_priv *priv)
 {
-	int ret;
-
-	iwl_power_initialize(priv);
-
-	iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
-			  CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+	int ret = iwl_apm_init(priv);
 
-	/* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */
-	iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
-			  CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
-
-	/* set "initialization complete" bit to move adapter
-	* D0U* --> D0A* state */
-	iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-
-	ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
-			    CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
-	if (ret < 0) {
-		IWL_DEBUG_INFO(priv, "Failed to init the card\n");
-		goto out;
-	}
-
-	/* enable DMA */
-	iwl_write_prph(priv, APMG_CLK_CTRL_REG, APMG_CLK_VAL_DMA_CLK_RQT |
-						APMG_CLK_VAL_BSM_CLK_RQT);
-
-	udelay(20);
+	/* Clear APMG (NIC's internal power management) interrupts */
+	iwl_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
+	iwl_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
 
-	/* disable L1-Active */
-	iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
-			  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+	/* Reset radio chip */
+	iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
+	udelay(5);
+	iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
 
-out:
 	return ret;
 }
 
@@ -1145,12 +1161,16 @@ void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
 	int txq_id;
 
 	/* Tx queues */
-	for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
-		if (txq_id == IWL_CMD_QUEUE_NUM)
-			iwl_cmd_queue_free(priv);
-		else
-			iwl_tx_queue_free(priv, txq_id);
+	if (priv->txq)
+		for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
+		     txq_id++)
+			if (txq_id == IWL_CMD_QUEUE_NUM)
+				iwl_cmd_queue_free(priv);
+			else
+				iwl_tx_queue_free(priv, txq_id);
 
+	/* free tx queue structure */
+	iwl_free_txq_mem(priv);
 }
 
 void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
@@ -1159,6 +1179,7 @@ void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
 
 	/* stop SCD */
 	iwl_write_prph(priv, ALM_SCD_MODE_REG, 0);
+	iwl_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
 
 	/* reset TFD queues */
 	for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
@@ -1171,85 +1192,6 @@ void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
 	iwl3945_hw_txq_ctx_free(priv);
 }
 
-static int iwl3945_apm_stop_master(struct iwl_priv *priv)
-{
-	int ret = 0;
-	unsigned long flags;
-
-	spin_lock_irqsave(&priv->lock, flags);
-
-	/* set stop master bit */
-	iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
-
-	iwl_poll_direct_bit(priv, CSR_RESET,
-			    CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
-
-	if (ret < 0)
-		goto out;
-
-out:
-	spin_unlock_irqrestore(&priv->lock, flags);
-	IWL_DEBUG_INFO(priv, "stop master\n");
-
-	return ret;
-}
-
-static void iwl3945_apm_stop(struct iwl_priv *priv)
-{
-	unsigned long flags;
-
-	iwl3945_apm_stop_master(priv);
-
-	spin_lock_irqsave(&priv->lock, flags);
-
-	iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
-
-	udelay(10);
-	/* clear "init complete"  move adapter D0A* --> D0U state */
-	iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-	spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static int iwl3945_apm_reset(struct iwl_priv *priv)
-{
-	iwl3945_apm_stop_master(priv);
-
-
-	iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
-	udelay(10);
-
-	iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-
-	iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
-			 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
-
-	iwl_write_prph(priv, APMG_CLK_CTRL_REG,
-				APMG_CLK_VAL_BSM_CLK_RQT);
-
-	iwl_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
-	iwl_write_prph(priv, APMG_RTC_INT_STT_REG,
-					0xFFFFFFFF);
-
-	/* enable DMA */
-	iwl_write_prph(priv, APMG_CLK_EN_REG,
-				APMG_CLK_VAL_DMA_CLK_RQT |
-				APMG_CLK_VAL_BSM_CLK_RQT);
-	udelay(10);
-
-	iwl_set_bits_prph(priv, APMG_PS_CTRL_REG,
-				APMG_PS_CTRL_VAL_RESET_REQ);
-	udelay(5);
-	iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG,
-				APMG_PS_CTRL_VAL_RESET_REQ);
-
-	/* Clear the 'host command active' bit... */
-	clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
-
-	wake_up_interruptible(&priv->wait_command_queue);
-
-	return 0;
-}
-
 /**
  * iwl3945_hw_reg_adjust_power_by_temp
  * return index delta into power gain settings table
@@ -1858,7 +1800,7 @@ int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
 static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
 {
 	int rc = 0;
-	struct iwl_rx_packet *res = NULL;
+	struct iwl_rx_packet *pkt;
 	struct iwl3945_rxon_assoc_cmd rxon_assoc;
 	struct iwl_host_cmd cmd = {
 		.id = REPLY_RXON_ASSOC,
@@ -1887,14 +1829,14 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
 	if (rc)
 		return rc;
 
-	res = (struct iwl_rx_packet *)cmd.reply_skb->data;
-	if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
+	pkt = (struct iwl_rx_packet *)cmd.reply_page;
+	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
 		IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n");
 		rc = -EIO;
 	}
 
-	priv->alloc_rxb_skb--;
-	dev_kfree_skb_any(cmd.reply_skb);
+	priv->alloc_rxb_page--;
+	free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
 
 	return rc;
 }
@@ -2042,12 +1984,6 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
 	return 0;
 }
 
-/* will add 3945 channel switch cmd handling later */
-int iwl3945_hw_channel_switch(struct iwl_priv *priv, u16 channel)
-{
-	return 0;
-}
-
 /**
  * iwl3945_reg_txpower_periodic -  called when time to check our temperature.
  *
@@ -2557,11 +2493,10 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
 	}
 
 	/* Assign number of Usable TX queues */
-	priv->hw_params.max_txq_num = IWL39_NUM_QUEUES;
+	priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
 
 	priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd);
-	priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_3K;
-	priv->hw_params.max_pkt_size = 2342;
+	priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_3K);
 	priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
 	priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
 	priv->hw_params.max_stations = IWL3945_STATION_COUNT;
@@ -2844,8 +2779,7 @@ static struct iwl_lib_ops iwl3945_lib = {
 	.dump_nic_error_log = iwl3945_dump_nic_error_log,
 	.apm_ops = {
 		.init = iwl3945_apm_init,
-		.reset = iwl3945_apm_reset,
-		.stop = iwl3945_apm_stop,
+		.stop = iwl_apm_stop,
 		.config = iwl3945_nic_config,
 		.set_pwr_src = iwl3945_set_pwr_src,
 	},
@@ -2874,6 +2808,7 @@ static struct iwl_lib_ops iwl3945_lib = {
 static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
 	.get_hcmd_size = iwl3945_get_hcmd_size,
 	.build_addsta_hcmd = iwl3945_build_addsta_hcmd,
+	.rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
 };
 
 static struct iwl_ops iwl3945_ops = {
@@ -2881,6 +2816,7 @@ static struct iwl_ops iwl3945_ops = {
 	.lib = &iwl3945_lib,
 	.hcmd = &iwl3945_hcmd,
 	.utils = &iwl3945_hcmd_utils,
+	.led = &iwl3945_led_ops,
 };
 
 static struct iwl_cfg iwl3945_bg_cfg = {
@@ -2892,9 +2828,14 @@ static struct iwl_cfg iwl3945_bg_cfg = {
 	.eeprom_size = IWL3945_EEPROM_IMG_SIZE,
 	.eeprom_ver = EEPROM_3945_EEPROM_VERSION,
 	.ops = &iwl3945_ops,
+	.num_of_queues = IWL39_NUM_QUEUES,
 	.mod_params = &iwl3945_mod_params,
+	.pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
+	.set_l0s = false,
+	.use_bsm = true,
 	.use_isr_legacy = true,
 	.ht_greenfield_support = false,
+	.led_compensation = 64,
 };
 
 static struct iwl_cfg iwl3945_abg_cfg = {
@@ -2906,9 +2847,11 @@ static struct iwl_cfg iwl3945_abg_cfg = {
 	.eeprom_size = IWL3945_EEPROM_IMG_SIZE,
 	.eeprom_ver = EEPROM_3945_EEPROM_VERSION,
 	.ops = &iwl3945_ops,
+	.num_of_queues = IWL39_NUM_QUEUES,
 	.mod_params = &iwl3945_mod_params,
 	.use_isr_legacy = true,
 	.ht_greenfield_support = false,
+	.led_compensation = 64,
 };
 
 struct pci_device_id iwl3945_hw_card_ids[] = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index 21679bf3a1aa..ecc23ec1f6a4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -46,7 +46,7 @@ extern struct pci_device_id iwl3945_hw_card_ids[];
 #include "iwl-debug.h"
 #include "iwl-power.h"
 #include "iwl-dev.h"
-#include "iwl-3945-led.h"
+#include "iwl-led.h"
 
 /* Highest firmware API version supported */
 #define IWL3945_UCODE_API_MAX 2
@@ -74,8 +74,41 @@ extern struct pci_device_id iwl3945_hw_card_ids[];
 /* Module parameters accessible from iwl-*.c */
 extern struct iwl_mod_params iwl3945_mod_params;
 
+struct iwl3945_rate_scale_data {
+	u64 data;
+	s32 success_counter;
+	s32 success_ratio;
+	s32 counter;
+	s32 average_tpt;
+	unsigned long stamp;
+};
+
+struct iwl3945_rs_sta {
+	spinlock_t lock;
+	struct iwl_priv *priv;
+	s32 *expected_tpt;
+	unsigned long last_partial_flush;
+	unsigned long last_flush;
+	u32 flush_time;
+	u32 last_tx_packets;
+	u32 tx_packets;
+	u8 tgg;
+	u8 flush_pending;
+	u8 start_rate;
+	u8 ibss_sta_added;
+	struct timer_list rate_scale_flush;
+	struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945];
+#ifdef CONFIG_MAC80211_DEBUGFS
+	struct dentry *rs_sta_dbgfs_stats_table_file;
+#endif
+
+	/* used to be in sta_info */
+	int last_txrate_idx;
+};
+
+
 struct iwl3945_sta_priv {
-	struct iwl3945_rs_sta *rs_sta;
+	struct iwl3945_rs_sta rs_sta;
 };
 
 enum iwl3945_antenna {
@@ -130,12 +163,6 @@ struct iwl3945_frame {
 #define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
 #define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
 
-/*
- * RX related structures and functions
- */
-#define RX_FREE_BUFFERS 64
-#define RX_LOW_WATERMARK 8
-
 #define SUP_RATE_11A_MAX_NUM_CHANNELS  8
 #define SUP_RATE_11B_MAX_NUM_CHANNELS  4
 #define SUP_RATE_11G_MAX_NUM_CHANNELS  12
@@ -194,22 +221,13 @@ struct iwl3945_ibss_seq {
  * for use by iwl-*.c
  *
  *****************************************************************************/
-extern int iwl3945_power_init_handle(struct iwl_priv *priv);
-extern int iwl3945_eeprom_init(struct iwl_priv *priv);
 extern int iwl3945_calc_db_from_ratio(int sig_ratio);
 extern int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm);
-extern int iwl3945_tx_queue_init(struct iwl_priv *priv,
-			     struct iwl_tx_queue *txq, int count, u32 id);
 extern void iwl3945_rx_replenish(void *data);
 extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-extern void iwl3945_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq);
-extern int iwl3945_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len,
-			    const void *data);
-extern int __must_check iwl3945_send_cmd(struct iwl_priv *priv,
-					 struct iwl_host_cmd *cmd);
 extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
 					struct ieee80211_hdr *hdr,int left);
-extern void iwl3945_dump_nic_event_log(struct iwl_priv *priv);
+extern void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log);
 extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
 
 /*
@@ -280,8 +298,6 @@ extern void iwl3945_config_ap(struct iwl_priv *priv);
  */
 extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
 
-extern int iwl3945_hw_channel_switch(struct iwl_priv *priv, u16 channel);
-
 /*
  * Forward declare iwl-3945.c functions for iwl-base.c
  */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index b34322a32458..c606366b582c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -76,12 +76,9 @@
 
 /*
  * uCode queue management definitions ...
- * Queue #4 is the command queue for 3945 and 4965; map it to Tx FIFO chnl 4.
  * The first queue used for block-ack aggregation is #7 (4965 only).
  * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
  */
-#define IWL_CMD_QUEUE_NUM       4
-#define IWL_CMD_FIFO_NUM        4
 #define IWL49_FIRST_AMPDU_QUEUE	7
 
 /* Time constants */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 6f703a041847..386513b601f5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -45,6 +45,7 @@
 #include "iwl-helpers.h"
 #include "iwl-calib.h"
 #include "iwl-sta.h"
+#include "iwl-agn-led.h"
 
 static int iwl4965_send_tx_power(struct iwl_priv *priv);
 static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
@@ -62,8 +63,6 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
 
 /* module parameters */
 static struct iwl_mod_params iwl4965_mod_params = {
-	.num_of_queues = IWL49_NUM_QUEUES,
-	.num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
 	.amsdu_size_8K = 1,
 	.restart_fw = 1,
 	/* the rest are 0 by default */
@@ -319,63 +318,13 @@ static void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
 	iwl_write_prph(priv, IWL49_SCD_TXFACT, mask);
 }
 
-static int iwl4965_apm_init(struct iwl_priv *priv)
-{
-	int ret = 0;
-
-	iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
-			  CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
-
-	/* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */
-	iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
-			  CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
-
-	/* set "initialization complete" bit to move adapter
-	 * D0U* --> D0A* state */
-	iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-
-	/* wait for clock stabilization */
-	ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
-			CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
-	if (ret < 0) {
-		IWL_DEBUG_INFO(priv, "Failed to init the card\n");
-		goto out;
-	}
-
-	/* enable DMA */
-	iwl_write_prph(priv, APMG_CLK_CTRL_REG, APMG_CLK_VAL_DMA_CLK_RQT |
-						APMG_CLK_VAL_BSM_CLK_RQT);
-
-	udelay(20);
-
-	/* disable L1-Active */
-	iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
-			  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
-
-out:
-	return ret;
-}
-
-
 static void iwl4965_nic_config(struct iwl_priv *priv)
 {
 	unsigned long flags;
 	u16 radio_cfg;
-	u16 lctl;
 
 	spin_lock_irqsave(&priv->lock, flags);
 
-	lctl = iwl_pcie_link_ctl(priv);
-
-	/* HW bug W/A - negligible power consumption */
-	/* L1-ASPM is enabled by BIOS */
-	if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == PCI_CFG_LINK_CTRL_VAL_L1_EN)
-		/* L1-ASPM enabled: disable L0S  */
-		iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
-	else
-		/* L1-ASPM disabled: enable L0S */
-		iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
-
 	radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
 
 	/* write radio config values to register */
@@ -396,79 +345,6 @@ static void iwl4965_nic_config(struct iwl_priv *priv)
 	spin_unlock_irqrestore(&priv->lock, flags);
 }
 
-static int iwl4965_apm_stop_master(struct iwl_priv *priv)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&priv->lock, flags);
-
-	/* set stop master bit */
-	iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
-
-	iwl_poll_direct_bit(priv, CSR_RESET,
-			CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
-
-	spin_unlock_irqrestore(&priv->lock, flags);
-	IWL_DEBUG_INFO(priv, "stop master\n");
-
-	return 0;
-}
-
-static void iwl4965_apm_stop(struct iwl_priv *priv)
-{
-	unsigned long flags;
-
-	iwl4965_apm_stop_master(priv);
-
-	spin_lock_irqsave(&priv->lock, flags);
-
-	iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
-
-	udelay(10);
-	/* clear "init complete"  move adapter D0A* --> D0U state */
-	iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-	spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static int iwl4965_apm_reset(struct iwl_priv *priv)
-{
-	int ret = 0;
-
-	iwl4965_apm_stop_master(priv);
-
-
-	iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
-
-	udelay(10);
-
-	/* FIXME: put here L1A -L0S w/a */
-
-	iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-
-	ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
-			CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
-	if (ret < 0)
-		goto out;
-
-	udelay(10);
-
-	/* Enable DMA and BSM Clock */
-	iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT |
-					      APMG_CLK_VAL_BSM_CLK_RQT);
-
-	udelay(10);
-
-	/* disable L1A */
-	iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
-			  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
-
-	clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
-	wake_up_interruptible(&priv->wait_command_queue);
-
-out:
-	return ret;
-}
-
 /* Reset differential Rx gains in NIC to prepare for chain noise calibration.
  * Called after every association, but this runs only once!
  *  ... once chain noise is calibrated the first time, it's good forever.  */
@@ -496,14 +372,15 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
 static void iwl4965_gain_computation(struct iwl_priv *priv,
 		u32 *average_noise,
 		u16 min_average_noise_antenna_i,
-		u32 min_average_noise)
+		u32 min_average_noise,
+		u8 default_chain)
 {
 	int i, ret;
 	struct iwl_chain_noise_data *data = &priv->chain_noise_data;
 
 	data->delta_gain_code[min_average_noise_antenna_i] = 0;
 
-	for (i = 0; i < NUM_RX_CHAINS; i++) {
+	for (i = default_chain; i < NUM_RX_CHAINS; i++) {
 		s32 delta_g = 0;
 
 		if (!(data->disconn_array[i]) &&
@@ -557,18 +434,6 @@ static void iwl4965_gain_computation(struct iwl_priv *priv,
 	data->beacon_count = 0;
 }
 
-static void iwl4965_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
-			__le32 *tx_flags)
-{
-	if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
-		*tx_flags |= TX_CMD_FLG_RTS_MSK;
-		*tx_flags &= ~TX_CMD_FLG_CTS_MSK;
-	} else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
-		*tx_flags &= ~TX_CMD_FLG_RTS_MSK;
-		*tx_flags |= TX_CMD_FLG_CTS_MSK;
-	}
-}
-
 static void iwl4965_bg_txpower_work(struct work_struct *work)
 {
 	struct iwl_priv *priv = container_of(work, struct iwl_priv,
@@ -663,7 +528,8 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
 		iwl_write_targ_mem(priv, a, 0);
 	for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
 		iwl_write_targ_mem(priv, a, 0);
-	for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4)
+	for (; a < priv->scd_base_addr +
+	       IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
 		iwl_write_targ_mem(priv, a, 0);
 
 	/* Tel 4965 where to find Tx byte count tables */
@@ -748,6 +614,10 @@ static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
 
 	.nrg_th_cck = 100,
 	.nrg_th_ofdm = 100,
+
+	.barker_corr_th_min = 190,
+	.barker_corr_th_min_mrc = 390,
+	.nrg_th_cca = 62,
 };
 
 static void iwl4965_set_ct_threshold(struct iwl_priv *priv)
@@ -764,19 +634,16 @@ static void iwl4965_set_ct_threshold(struct iwl_priv *priv)
  */
 static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
 {
+	if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
+	    priv->cfg->mod_params->num_of_queues <= IWL49_NUM_QUEUES)
+		priv->cfg->num_of_queues =
+			priv->cfg->mod_params->num_of_queues;
 
-	if ((priv->cfg->mod_params->num_of_queues > IWL49_NUM_QUEUES) ||
-	    (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) {
-		IWL_ERR(priv,
-			"invalid queues_num, should be between %d and %d\n",
-			IWL_MIN_NUM_QUEUES, IWL49_NUM_QUEUES);
-		return -EINVAL;
-	}
-
-	priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues;
+	priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
 	priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
 	priv->hw_params.scd_bc_tbls_size =
-			IWL49_NUM_QUEUES * sizeof(struct iwl4965_scd_bc_tbl);
+			priv->cfg->num_of_queues *
+			sizeof(struct iwl4965_scd_bc_tbl);
 	priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
 	priv->hw_params.max_stations = IWL4965_STATION_COUNT;
 	priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID;
@@ -787,10 +654,10 @@ static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
 
 	priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
 
-	priv->hw_params.tx_chains_num = 2;
-	priv->hw_params.rx_chains_num = 2;
-	priv->hw_params.valid_tx_ant = ANT_A | ANT_B;
-	priv->hw_params.valid_rx_ant = ANT_A | ANT_B;
+	priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+	priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
+	priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
+	priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
 	if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
 		priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
 
@@ -1567,14 +1434,13 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
 	return ret;
 }
 
-#ifdef IEEE80211_CONF_CHANNEL_SWITCH
 static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
 {
 	int rc;
 	u8 band = 0;
 	bool is_ht40 = false;
 	u8 ctrl_chan_high = 0;
-	struct iwl4965_channel_switch_cmd cmd = { 0 };
+	struct iwl4965_channel_switch_cmd cmd;
 	const struct iwl_channel_info *ch_info;
 
 	band = priv->band == IEEE80211_BAND_2GHZ;
@@ -1584,19 +1450,22 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
 	is_ht40 = is_ht40_channel(priv->staging_rxon.flags);
 
 	if (is_ht40 &&
-	    (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
+	    (priv->staging_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
 		ctrl_chan_high = 1;
 
 	cmd.band = band;
 	cmd.expect_beacon = 0;
 	cmd.channel = cpu_to_le16(channel);
-	cmd.rxon_flags = priv->active_rxon.flags;
-	cmd.rxon_filter_flags = priv->active_rxon.filter_flags;
+	cmd.rxon_flags = priv->staging_rxon.flags;
+	cmd.rxon_filter_flags = priv->staging_rxon.filter_flags;
 	cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
 	if (ch_info)
 		cmd.expect_beacon = is_channel_radar(ch_info);
-	else
-		cmd.expect_beacon = 1;
+	else {
+		IWL_ERR(priv, "invalid channel switch from %u to %u\n",
+			priv->active_rxon.channel, channel);
+		return -EFAULT;
+	}
 
 	rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_ht40,
 				      ctrl_chan_high, &cmd.tx_power);
@@ -1605,10 +1474,11 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
 		return rc;
 	}
 
-	rc = iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
-	return rc;
+	priv->switch_rxon.channel = cpu_to_le16(channel);
+	priv->switch_rxon.switch_in_progress = true;
+
+	return iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
 }
-#endif
 
 /**
  * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
@@ -1805,11 +1675,13 @@ static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
 				   u16 ssn_idx, u8 tx_fifo)
 {
 	if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
-	    (IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES <= txq_id)) {
+	    (IWL49_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
+	     <= txq_id)) {
 		IWL_WARN(priv,
 			"queue number out of range: %d, must be %d to %d\n",
 			txq_id, IWL49_FIRST_AMPDU_QUEUE,
-			IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES - 1);
+			IWL49_FIRST_AMPDU_QUEUE +
+			priv->cfg->num_of_ampdu_queues - 1);
 		return -EINVAL;
 	}
 
@@ -1870,11 +1742,13 @@ static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
 	u16 ra_tid;
 
 	if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
-	    (IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES <= txq_id)) {
+	    (IWL49_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
+	     <= txq_id)) {
 		IWL_WARN(priv,
 			"queue number out of range: %d, must be %d to %d\n",
 			txq_id, IWL49_FIRST_AMPDU_QUEUE,
-			IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES - 1);
+			IWL49_FIRST_AMPDU_QUEUE +
+			priv->cfg->num_of_ampdu_queues - 1);
 		return -EINVAL;
 	}
 
@@ -1944,8 +1818,9 @@ static u16 iwl4965_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
 	addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
 	addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
 	addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
+	addsta->sleep_tx_count = cmd->sleep_tx_count;
 	addsta->reserved1 = cpu_to_le16(0);
-	addsta->reserved2 = cpu_to_le32(0);
+	addsta->reserved2 = cpu_to_le16(0);
 
 	return (u16)sizeof(struct iwl4965_addsta_cmd);
 }
@@ -1991,8 +1866,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
 		info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
 		info->status.rates[0].count = tx_resp->failure_frame + 1;
 		info->flags &= ~IEEE80211_TX_CTL_AMPDU;
-		info->flags |= iwl_is_tx_success(status) ?
-			IEEE80211_TX_STAT_ACK : 0;
+		info->flags |= iwl_tx_status_to_mac80211(status);
 		iwl_hwrate_to_tx_control(priv, rate_n_flags, info);
 		/* FIXME: code repetition end */
 
@@ -2078,7 +1952,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
 static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
 				struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
 	int txq_id = SEQ_TO_QUEUE(sequence);
 	int index = SEQ_TO_INDEX(sequence);
@@ -2147,8 +2021,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
 		}
 	} else {
 		info->status.rates[0].count = tx_resp->failure_frame + 1;
-		info->flags |= iwl_is_tx_success(status) ?
-					IEEE80211_TX_STAT_ACK : 0;
+		info->flags |= iwl_tx_status_to_mac80211(status);
 		iwl_hwrate_to_tx_control(priv,
 					le32_to_cpu(tx_resp->rate_n_flags),
 					info);
@@ -2279,7 +2152,7 @@ static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
 	.build_addsta_hcmd = iwl4965_build_addsta_hcmd,
 	.chain_noise_reset = iwl4965_chain_noise_reset,
 	.gain_computation = iwl4965_gain_computation,
-	.rts_tx_cmd_flag = iwl4965_rts_tx_cmd_flag,
+	.rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
 	.calc_rssi = iwl4965_calc_rssi,
 };
 
@@ -2301,10 +2174,10 @@ static struct iwl_lib_ops iwl4965_lib = {
 	.load_ucode = iwl4965_load_bsm,
 	.dump_nic_event_log = iwl_dump_nic_event_log,
 	.dump_nic_error_log = iwl_dump_nic_error_log,
+	.set_channel_switch = iwl4965_hw_channel_switch,
 	.apm_ops = {
-		.init = iwl4965_apm_init,
-		.reset = iwl4965_apm_reset,
-		.stop = iwl4965_apm_stop,
+		.init = iwl_apm_init,
+		.stop = iwl_apm_stop,
 		.config = iwl4965_nic_config,
 		.set_pwr_src = iwl_set_pwr_src,
 	},
@@ -2340,6 +2213,7 @@ static struct iwl_ops iwl4965_ops = {
 	.lib = &iwl4965_lib,
 	.hcmd = &iwl4965_hcmd,
 	.utils = &iwl4965_hcmd_utils,
+	.led = &iwlagn_led_ops,
 };
 
 struct iwl_cfg iwl4965_agn_cfg = {
@@ -2352,30 +2226,41 @@ struct iwl_cfg iwl4965_agn_cfg = {
 	.eeprom_ver = EEPROM_4965_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
 	.ops = &iwl4965_ops,
+	.num_of_queues = IWL49_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl4965_mod_params,
+	.valid_tx_ant = ANT_AB,
+	.valid_rx_ant = ANT_ABC,
+	.pll_cfg_val = 0,
+	.set_l0s = true,
+	.use_bsm = true,
 	.use_isr_legacy = true,
 	.ht_greenfield_support = false,
 	.broken_powersave = true,
+	.led_compensation = 61,
+	.chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
+	.sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
 };
 
 /* Module firmware */
 MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
 
-module_param_named(antenna, iwl4965_mod_params.antenna, int, 0444);
+module_param_named(antenna, iwl4965_mod_params.antenna, int, S_IRUGO);
 MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
-module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, 0444);
+module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
 MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
 module_param_named(
-	disable_hw_scan, iwl4965_mod_params.disable_hw_scan, int, 0444);
+	disable_hw_scan, iwl4965_mod_params.disable_hw_scan, int, S_IRUGO);
 MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
 
-module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, 0444);
+module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
 MODULE_PARM_DESC(queues_num, "number of hw queues.");
 /* 11n */
-module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, 0444);
+module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
 MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
-module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, int, 0444);
+module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
+		   int, S_IRUGO);
 MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
 
-module_param_named(fw_restart4965, iwl4965_mod_params.restart_fw, int, 0444);
+module_param_named(fw_restart4965, iwl4965_mod_params.restart_fw, int, S_IRUGO);
 MODULE_PARM_DESC(fw_restart4965, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 6e6f516ba404..e2f8615c8c9b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -43,6 +43,7 @@
 #include "iwl-io.h"
 #include "iwl-sta.h"
 #include "iwl-helpers.h"
+#include "iwl-agn-led.h"
 #include "iwl-5000-hw.h"
 #include "iwl-6000-hw.h"
 
@@ -72,157 +73,18 @@ static const u16 iwl5000_default_queue_to_tx_fifo[] = {
 	IWL_TX_FIFO_HCCA_2
 };
 
-/* FIXME: same implementation as 4965 */
-static int iwl5000_apm_stop_master(struct iwl_priv *priv)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&priv->lock, flags);
-
-	/* set stop master bit */
-	iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
-
-	iwl_poll_direct_bit(priv, CSR_RESET,
-				  CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
-
-	spin_unlock_irqrestore(&priv->lock, flags);
-	IWL_DEBUG_INFO(priv, "stop master\n");
-
-	return 0;
-}
-
-
-int iwl5000_apm_init(struct iwl_priv *priv)
-{
-	int ret = 0;
-
-	iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
-		    CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
-
-	/* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */
-	iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
-		    CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
-
-	/* Set FH wait threshold to maximum (HW error during stress W/A) */
-	iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
-
-	/* enable HAP INTA to move device L1a -> L0s */
-	iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-		    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
-
-	if (priv->cfg->need_pll_cfg)
-		iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
-
-	/* set "initialization complete" bit to move adapter
-	 * D0U* --> D0A* state */
-	iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-
-	/* wait for clock stabilization */
-	ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
-			CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
-	if (ret < 0) {
-		IWL_DEBUG_INFO(priv, "Failed to init the card\n");
-		return ret;
-	}
-
-	/* enable DMA */
-	iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
-
-	udelay(20);
-
-	/* disable L1-Active */
-	iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
-			  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
-
-	return ret;
-}
-
-/* FIXME: this is identical to 4965 */
-void iwl5000_apm_stop(struct iwl_priv *priv)
-{
-	unsigned long flags;
-
-	iwl5000_apm_stop_master(priv);
-
-	spin_lock_irqsave(&priv->lock, flags);
-
-	iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
-
-	udelay(10);
-
-	/* clear "init complete"  move adapter D0A* --> D0U state */
-	iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-
-	spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-
-int iwl5000_apm_reset(struct iwl_priv *priv)
-{
-	int ret = 0;
-
-	iwl5000_apm_stop_master(priv);
-
-	iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
-
-	udelay(10);
-
-
-	/* FIXME: put here L1A -L0S w/a */
-
-	if (priv->cfg->need_pll_cfg)
-		iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
-
-	/* set "initialization complete" bit to move adapter
-	 * D0U* --> D0A* state */
-	iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-
-	/* wait for clock stabilization */
-	ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
-			CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
-	if (ret < 0) {
-		IWL_DEBUG_INFO(priv, "Failed to init the card\n");
-		goto out;
-	}
-
-	/* enable DMA */
-	iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
-
-	udelay(20);
-
-	/* disable L1-Active */
-	iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
-			  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
-out:
-
-	return ret;
-}
-
-
-/* NIC configuration for 5000 series and up */
+/* NIC configuration for 5000 series */
 void iwl5000_nic_config(struct iwl_priv *priv)
 {
 	unsigned long flags;
 	u16 radio_cfg;
-	u16 lctl;
 
 	spin_lock_irqsave(&priv->lock, flags);
 
-	lctl = iwl_pcie_link_ctl(priv);
-
-	/* HW bug W/A */
-	/* L1-ASPM is enabled by BIOS */
-	if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == PCI_CFG_LINK_CTRL_VAL_L1_EN)
-		/* L1-APSM enabled: disable L0S  */
-		iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
-	else
-		/* L1-ASPM disabled: enable L0S */
-		iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
-
 	radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
 
 	/* write radio config values to register */
-	if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) < EEPROM_5000_RF_CFG_TYPE_MAX)
+	if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) < EEPROM_RF_CONFIG_TYPE_MAX)
 		iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
 			    EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
 			    EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
@@ -302,19 +164,22 @@ u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv)
 static void iwl5000_gain_computation(struct iwl_priv *priv,
 		u32 average_noise[NUM_RX_CHAINS],
 		u16 min_average_noise_antenna_i,
-		u32 min_average_noise)
+		u32 min_average_noise,
+		u8 default_chain)
 {
 	int i;
 	s32 delta_g;
 	struct iwl_chain_noise_data *data = &priv->chain_noise_data;
 
-	/* Find Gain Code for the antennas B and C */
-	for (i = 1; i < NUM_RX_CHAINS; i++) {
+	/*
+	 * Find Gain Code for the chains based on "default chain"
+	 */
+	for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) {
 		if ((data->disconn_array[i])) {
 			data->delta_gain_code[i] = 0;
 			continue;
 		}
-		delta_g = (1000 * ((s32)average_noise[0] -
+		delta_g = (1000 * ((s32)average_noise[default_chain] -
 			(s32)average_noise[i])) / 1500;
 		/* bound gain by 2 bits value max, 3rd bit is sign */
 		data->delta_gain_code[i] =
@@ -407,6 +272,10 @@ static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
 	.auto_corr_max_cck_mrc = 400,
 	.nrg_th_cck = 95,
 	.nrg_th_ofdm = 95,
+
+	.barker_corr_th_min = 190,
+	.barker_corr_th_min_mrc = 390,
+	.nrg_th_cca = 62,
 };
 
 static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
@@ -429,6 +298,10 @@ static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
 	.auto_corr_max_cck_mrc = 400,
 	.nrg_th_cck = 95,
 	.nrg_th_ofdm = 95,
+
+	.barker_corr_th_min = 190,
+	.barker_corr_th_min_mrc = 390,
+	.nrg_th_cca = 62,
 };
 
 const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv,
@@ -493,7 +366,7 @@ static int iwl5000_send_calib_cfg(struct iwl_priv *priv)
 static void iwl5000_rx_calib_result(struct iwl_priv *priv,
 			     struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
 	int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
 	int index;
@@ -719,16 +592,6 @@ static void iwl5000_tx_queue_set_status(struct iwl_priv *priv,
 		       scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
 }
 
-static int iwl5000_send_wimax_coex(struct iwl_priv *priv)
-{
-	struct iwl_wimax_coex_cmd coex_cmd;
-
-	memset(&coex_cmd, 0, sizeof(coex_cmd));
-
-	return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD,
-				sizeof(coex_cmd), &coex_cmd);
-}
-
 int iwl5000_alive_notify(struct iwl_priv *priv)
 {
 	u32 a;
@@ -746,7 +609,8 @@ int iwl5000_alive_notify(struct iwl_priv *priv)
 	for (; a < priv->scd_base_addr + IWL50_SCD_TRANSLATE_TBL_OFFSET;
 		a += 4)
 		iwl_write_targ_mem(priv, a, 0);
-	for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4)
+	for (; a < priv->scd_base_addr +
+	       IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
 		iwl_write_targ_mem(priv, a, 0);
 
 	iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR,
@@ -798,9 +662,13 @@ int iwl5000_alive_notify(struct iwl_priv *priv)
 		iwl_txq_ctx_activate(priv, i);
 		iwl5000_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
 	}
-	/* TODO - need to initialize those FIFOs inside the loop above,
-	 * not only mark them as active */
-	iwl_txq_ctx_activate(priv, 4);
+
+	/*
+	 * TODO - need to initialize these queues and map them to FIFOs
+	 * in the loop above, not only mark them as active. We do this
+	 * because we want the first aggregation queue to be queue #10,
+	 * but do not use 8 or 9 otherwise yet.
+	 */
 	iwl_txq_ctx_activate(priv, 7);
 	iwl_txq_ctx_activate(priv, 8);
 	iwl_txq_ctx_activate(priv, 9);
@@ -808,7 +676,7 @@ int iwl5000_alive_notify(struct iwl_priv *priv)
 	spin_unlock_irqrestore(&priv->lock, flags);
 
 
-	iwl5000_send_wimax_coex(priv);
+	iwl_send_wimax_coex(priv);
 
 	iwl5000_set_Xtal_calib(priv);
 	iwl_send_calib_results(priv);
@@ -818,32 +686,22 @@ int iwl5000_alive_notify(struct iwl_priv *priv)
 
 int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
 {
-	if ((priv->cfg->mod_params->num_of_queues > IWL50_NUM_QUEUES) ||
-	    (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) {
-		IWL_ERR(priv,
-			"invalid queues_num, should be between %d and %d\n",
-			IWL_MIN_NUM_QUEUES, IWL50_NUM_QUEUES);
-		return -EINVAL;
-	}
+	if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
+	    priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES)
+		priv->cfg->num_of_queues =
+			priv->cfg->mod_params->num_of_queues;
 
-	priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues;
+	priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
 	priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
 	priv->hw_params.scd_bc_tbls_size =
-			IWL50_NUM_QUEUES * sizeof(struct iwl5000_scd_bc_tbl);
+			priv->cfg->num_of_queues *
+			sizeof(struct iwl5000_scd_bc_tbl);
 	priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
 	priv->hw_params.max_stations = IWL5000_STATION_COUNT;
 	priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
 
-	switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
-	case CSR_HW_REV_TYPE_6x00:
-	case CSR_HW_REV_TYPE_6x50:
-		priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
-		priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
-		break;
-	default:
-		priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE;
-		priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE;
-	}
+	priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE;
+	priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE;
 
 	priv->hw_params.max_bsm_size = 0;
 	priv->hw_params.ht40_channel =  BIT(IEEE80211_BAND_2GHZ) |
@@ -989,11 +847,13 @@ int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id,
 	u16 ra_tid;
 
 	if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
-	    (IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES <= txq_id)) {
+	    (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
+	     <= txq_id)) {
 		IWL_WARN(priv,
 			"queue number out of range: %d, must be %d to %d\n",
 			txq_id, IWL50_FIRST_AMPDU_QUEUE,
-			IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES - 1);
+			IWL50_FIRST_AMPDU_QUEUE +
+			priv->cfg->num_of_ampdu_queues - 1);
 		return -EINVAL;
 	}
 
@@ -1047,11 +907,13 @@ int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
 				   u16 ssn_idx, u8 tx_fifo)
 {
 	if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
-	    (IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES <= txq_id)) {
+	    (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
+	     <= txq_id)) {
 		IWL_ERR(priv,
 			"queue number out of range: %d, must be %d to %d\n",
 			txq_id, IWL50_FIRST_AMPDU_QUEUE,
-			IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES - 1);
+			IWL50_FIRST_AMPDU_QUEUE +
+			priv->cfg->num_of_ampdu_queues - 1);
 		return -EINVAL;
 	}
 
@@ -1132,8 +994,7 @@ static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv,
 		info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
 		info->status.rates[0].count = tx_resp->failure_frame + 1;
 		info->flags &= ~IEEE80211_TX_CTL_AMPDU;
-		info->flags |= iwl_is_tx_success(status) ?
-					IEEE80211_TX_STAT_ACK : 0;
+		info->flags |= iwl_tx_status_to_mac80211(status);
 		iwl_hwrate_to_tx_control(priv, rate_n_flags, info);
 
 		/* FIXME: code repetition end */
@@ -1218,7 +1079,7 @@ static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv,
 static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
 				struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
 	int txq_id = SEQ_TO_QUEUE(sequence);
 	int index = SEQ_TO_INDEX(sequence);
@@ -1278,8 +1139,7 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
 		BUG_ON(txq_id != txq->swq_id);
 
 		info->status.rates[0].count = tx_resp->failure_frame + 1;
-		info->flags |= iwl_is_tx_success(status) ?
-					IEEE80211_TX_STAT_ACK : 0;
+		info->flags |= iwl_tx_status_to_mac80211(status);
 		iwl_hwrate_to_tx_control(priv,
 					le32_to_cpu(tx_resp->rate_n_flags),
 					info);
@@ -1389,6 +1249,22 @@ int  iwl5000_send_tx_power(struct iwl_priv *priv)
 
 	/* half dBm need to multiply */
 	tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
+
+	if (priv->tx_power_lmt_in_half_dbm &&
+	    priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) {
+		/*
+		 * For the newer devices which using enhanced/extend tx power
+		 * table in EEPROM, the format is in half dBm. driver need to
+		 * convert to dBm format before report to mac80211.
+		 * By doing so, there is a possibility of 1/2 dBm resolution
+		 * lost. driver will perform "round-up" operation before
+		 * reporting, but it will cause 1/2 dBm tx power over the
+		 * regulatory limit. Perform the checking here, if the
+		 * "tx_power_user_lmt" is higher than EEPROM value (in
+		 * half-dBm format), lower the tx power based on EEPROM
+		 */
+		tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm;
+	}
 	tx_power_cmd.flags = IWL50_TX_POWER_NO_CLOSED;
 	tx_power_cmd.srv_chan_lmt = IWL50_TX_POWER_AUTO;
 
@@ -1459,6 +1335,24 @@ int iwl5000_calc_rssi(struct iwl_priv *priv,
 	return max_rssi - agc - IWL49_RSSI_OFFSET;
 }
 
+static int iwl5000_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
+{
+	struct iwl_tx_ant_config_cmd tx_ant_cmd = {
+	  .valid = cpu_to_le32(valid_tx_ant),
+	};
+
+	if (IWL_UCODE_API(priv->ucode_ver) > 1) {
+		IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
+		return iwl_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD,
+					sizeof(struct iwl_tx_ant_config_cmd),
+					&tx_ant_cmd);
+	} else {
+		IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n");
+		return -EOPNOTSUPP;
+	}
+}
+
+
 #define IWL5000_UCODE_GET(item)						\
 static u32 iwl5000_ucode_get_##item(const struct iwl_ucode_header *ucode,\
 				    u32 api_ver)			\
@@ -1497,10 +1391,43 @@ IWL5000_UCODE_GET(init_size);
 IWL5000_UCODE_GET(init_data_size);
 IWL5000_UCODE_GET(boot_size);
 
+static int iwl5000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
+{
+	struct iwl5000_channel_switch_cmd cmd;
+	const struct iwl_channel_info *ch_info;
+	struct iwl_host_cmd hcmd = {
+		.id = REPLY_CHANNEL_SWITCH,
+		.len = sizeof(cmd),
+		.flags = CMD_SIZE_HUGE,
+		.data = &cmd,
+	};
+
+	IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
+		priv->active_rxon.channel, channel);
+	cmd.band = priv->band == IEEE80211_BAND_2GHZ;
+	cmd.channel = cpu_to_le16(channel);
+	cmd.rxon_flags = priv->staging_rxon.flags;
+	cmd.rxon_filter_flags = priv->staging_rxon.filter_flags;
+	cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
+	ch_info = iwl_get_channel_info(priv, priv->band, channel);
+	if (ch_info)
+		cmd.expect_beacon = is_channel_radar(ch_info);
+	else {
+		IWL_ERR(priv, "invalid channel switch from %u to %u\n",
+			priv->active_rxon.channel, channel);
+		return -EFAULT;
+	}
+	priv->switch_rxon.channel = cpu_to_le16(channel);
+	priv->switch_rxon.switch_in_progress = true;
+
+	return iwl_send_cmd_sync(priv, &hcmd);
+}
+
 struct iwl_hcmd_ops iwl5000_hcmd = {
 	.rxon_assoc = iwl5000_send_rxon_assoc,
 	.commit_rxon = iwl_commit_rxon,
 	.set_rxon_chain = iwl_set_rxon_chain,
+	.set_tx_ant = iwl5000_send_tx_ant_config,
 };
 
 struct iwl_hcmd_utils_ops iwl5000_hcmd_utils = {
@@ -1543,10 +1470,10 @@ struct iwl_lib_ops iwl5000_lib = {
 	.alive_notify = iwl5000_alive_notify,
 	.send_tx_power = iwl5000_send_tx_power,
 	.update_chain_flags = iwl_update_chain_flags,
+	.set_channel_switch = iwl5000_hw_channel_switch,
 	.apm_ops = {
-		.init =	iwl5000_apm_init,
-		.reset = iwl5000_apm_reset,
-		.stop = iwl5000_apm_stop,
+		.init = iwl_apm_init,
+		.stop = iwl_apm_stop,
 		.config = iwl5000_nic_config,
 		.set_pwr_src = iwl_set_pwr_src,
 	},
@@ -1595,10 +1522,10 @@ static struct iwl_lib_ops iwl5150_lib = {
 	.alive_notify = iwl5000_alive_notify,
 	.send_tx_power = iwl5000_send_tx_power,
 	.update_chain_flags = iwl_update_chain_flags,
+	.set_channel_switch = iwl5000_hw_channel_switch,
 	.apm_ops = {
-		.init =	iwl5000_apm_init,
-		.reset = iwl5000_apm_reset,
-		.stop = iwl5000_apm_stop,
+		.init = iwl_apm_init,
+		.stop = iwl_apm_stop,
 		.config = iwl5000_nic_config,
 		.set_pwr_src = iwl_set_pwr_src,
 	},
@@ -1627,11 +1554,12 @@ static struct iwl_lib_ops iwl5150_lib = {
 	 },
 };
 
-struct iwl_ops iwl5000_ops = {
+static struct iwl_ops iwl5000_ops = {
 	.ucode = &iwl5000_ucode,
 	.lib = &iwl5000_lib,
 	.hcmd = &iwl5000_hcmd,
 	.utils = &iwl5000_hcmd_utils,
+	.led = &iwlagn_led_ops,
 };
 
 static struct iwl_ops iwl5150_ops = {
@@ -1639,11 +1567,10 @@ static struct iwl_ops iwl5150_ops = {
 	.lib = &iwl5150_lib,
 	.hcmd = &iwl5000_hcmd,
 	.utils = &iwl5000_hcmd_utils,
+	.led = &iwlagn_led_ops,
 };
 
 struct iwl_mod_params iwl50_mod_params = {
-	.num_of_queues = IWL50_NUM_QUEUES,
-	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.amsdu_size_8K = 1,
 	.restart_fw = 1,
 	/* the rest are 0 by default */
@@ -1660,28 +1587,41 @@ struct iwl_cfg iwl5300_agn_cfg = {
 	.eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
 	.eeprom_ver = EEPROM_5000_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_ABC,
 	.valid_rx_ant = ANT_ABC,
-	.need_pll_cfg = true,
+	.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
+	.set_l0s = true,
+	.use_bsm = false,
 	.ht_greenfield_support = true,
+	.led_compensation = 51,
+	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+	.sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
 };
 
-struct iwl_cfg iwl5100_bg_cfg = {
-	.name = "5100BG",
+struct iwl_cfg iwl5100_bgn_cfg = {
+	.name = "5100BGN",
 	.fw_name_pre = IWL5000_FW_PRE,
 	.ucode_api_max = IWL5000_UCODE_API_MAX,
 	.ucode_api_min = IWL5000_UCODE_API_MIN,
-	.sku = IWL_SKU_G,
+	.sku = IWL_SKU_G|IWL_SKU_N,
 	.ops = &iwl5000_ops,
 	.eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
 	.eeprom_ver = EEPROM_5000_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_B,
 	.valid_rx_ant = ANT_AB,
-	.need_pll_cfg = true,
+	.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
+	.set_l0s = true,
+	.use_bsm = false,
 	.ht_greenfield_support = true,
+	.led_compensation = 51,
+	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
 };
 
 struct iwl_cfg iwl5100_abg_cfg = {
@@ -1694,11 +1634,16 @@ struct iwl_cfg iwl5100_abg_cfg = {
 	.eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
 	.eeprom_ver = EEPROM_5000_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_B,
 	.valid_rx_ant = ANT_AB,
-	.need_pll_cfg = true,
-	.ht_greenfield_support = true,
+	.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
+	.set_l0s = true,
+	.use_bsm = false,
+	.led_compensation = 51,
+	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
 };
 
 struct iwl_cfg iwl5100_agn_cfg = {
@@ -1711,11 +1656,18 @@ struct iwl_cfg iwl5100_agn_cfg = {
 	.eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
 	.eeprom_ver = EEPROM_5000_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_B,
 	.valid_rx_ant = ANT_AB,
-	.need_pll_cfg = true,
+	.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
+	.set_l0s = true,
+	.use_bsm = false,
 	.ht_greenfield_support = true,
+	.led_compensation = 51,
+	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+	.sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
 };
 
 struct iwl_cfg iwl5350_agn_cfg = {
@@ -1728,11 +1680,18 @@ struct iwl_cfg iwl5350_agn_cfg = {
 	.eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
 	.eeprom_ver = EEPROM_5050_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_ABC,
 	.valid_rx_ant = ANT_ABC,
-	.need_pll_cfg = true,
+	.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
+	.set_l0s = true,
+	.use_bsm = false,
 	.ht_greenfield_support = true,
+	.led_compensation = 51,
+	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+	.sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
 };
 
 struct iwl_cfg iwl5150_agn_cfg = {
@@ -1745,24 +1704,54 @@ struct iwl_cfg iwl5150_agn_cfg = {
 	.eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
 	.eeprom_ver = EEPROM_5050_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_A,
 	.valid_rx_ant = ANT_AB,
-	.need_pll_cfg = true,
+	.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
+	.set_l0s = true,
+	.use_bsm = false,
 	.ht_greenfield_support = true,
+	.led_compensation = 51,
+	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+	.sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
+};
+
+struct iwl_cfg iwl5150_abg_cfg = {
+	.name = "5150ABG",
+	.fw_name_pre = IWL5150_FW_PRE,
+	.ucode_api_max = IWL5150_UCODE_API_MAX,
+	.ucode_api_min = IWL5150_UCODE_API_MIN,
+	.sku = IWL_SKU_A|IWL_SKU_G,
+	.ops = &iwl5150_ops,
+	.eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
+	.eeprom_ver = EEPROM_5050_EEPROM_VERSION,
+	.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
+	.mod_params = &iwl50_mod_params,
+	.valid_tx_ant = ANT_A,
+	.valid_rx_ant = ANT_AB,
+	.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
+	.set_l0s = true,
+	.use_bsm = false,
+	.led_compensation = 51,
+	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
 };
 
 MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX));
 
-module_param_named(swcrypto50, iwl50_mod_params.sw_crypto, bool, 0444);
+module_param_named(swcrypto50, iwl50_mod_params.sw_crypto, bool, S_IRUGO);
 MODULE_PARM_DESC(swcrypto50,
 		  "using software crypto engine (default 0 [hardware])\n");
-module_param_named(queues_num50, iwl50_mod_params.num_of_queues, int, 0444);
+module_param_named(queues_num50, iwl50_mod_params.num_of_queues, int, S_IRUGO);
 MODULE_PARM_DESC(queues_num50, "number of hw queues in 50xx series");
-module_param_named(11n_disable50, iwl50_mod_params.disable_11n, int, 0444);
+module_param_named(11n_disable50, iwl50_mod_params.disable_11n, int, S_IRUGO);
 MODULE_PARM_DESC(11n_disable50, "disable 50XX 11n functionality");
-module_param_named(amsdu_size_8K50, iwl50_mod_params.amsdu_size_8K, int, 0444);
+module_param_named(amsdu_size_8K50, iwl50_mod_params.amsdu_size_8K,
+		   int, S_IRUGO);
 MODULE_PARM_DESC(amsdu_size_8K50, "enable 8K amsdu size in 50XX series");
-module_param_named(fw_restart50, iwl50_mod_params.restart_fw, int, 0444);
+module_param_named(fw_restart50, iwl50_mod_params.restart_fw, int, S_IRUGO);
 MODULE_PARM_DESC(fw_restart50, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 1473452ba22f..74e571049273 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -44,14 +44,16 @@
 #include "iwl-sta.h"
 #include "iwl-helpers.h"
 #include "iwl-5000-hw.h"
+#include "iwl-6000-hw.h"
+#include "iwl-agn-led.h"
 
 /* Highest firmware API version supported */
 #define IWL6000_UCODE_API_MAX 4
 #define IWL6050_UCODE_API_MAX 4
 
 /* Lowest firmware API version supported */
-#define IWL6000_UCODE_API_MIN 1
-#define IWL6050_UCODE_API_MIN 1
+#define IWL6000_UCODE_API_MIN 4
+#define IWL6050_UCODE_API_MIN 4
 
 #define IWL6000_FW_PRE "iwlwifi-6000-"
 #define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode"
@@ -71,14 +73,24 @@ static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
 /* NIC configuration for 6000 series */
 static void iwl6000_nic_config(struct iwl_priv *priv)
 {
-	iwl5000_nic_config(priv);
+	u16 radio_cfg;
+
+	radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
+
+	/* write radio config values to register */
+	if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX)
+		iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+			    EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
+			    EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
+			    EEPROM_RF_CFG_DASH_MSK(radio_cfg));
+
+	/* set CSR_HW_CONFIG_REG for uCode use */
+	iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+		    CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+		    CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
 
 	/* no locking required for register write */
-	if (priv->cfg->pa_type == IWL_PA_HYBRID) {
-		/* 2x2 hybrid phy type */
-		iwl_write32(priv, CSR_GP_DRIVER_REG,
-			     CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_HYB);
-	} else if (priv->cfg->pa_type == IWL_PA_INTERNAL) {
+	if (priv->cfg->pa_type == IWL_PA_INTERNAL) {
 		/* 2x2 IPA phy type */
 		iwl_write32(priv, CSR_GP_DRIVER_REG,
 			     CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
@@ -86,8 +98,109 @@ static void iwl6000_nic_config(struct iwl_priv *priv)
 	/* else do nothing, uCode configured */
 }
 
+static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
+	.min_nrg_cck = 97,
+	.max_nrg_cck = 0, /* not used, set to 0 */
+	.auto_corr_min_ofdm = 80,
+	.auto_corr_min_ofdm_mrc = 128,
+	.auto_corr_min_ofdm_x1 = 105,
+	.auto_corr_min_ofdm_mrc_x1 = 192,
+
+	.auto_corr_max_ofdm = 145,
+	.auto_corr_max_ofdm_mrc = 232,
+	.auto_corr_max_ofdm_x1 = 145,
+	.auto_corr_max_ofdm_mrc_x1 = 232,
+
+	.auto_corr_min_cck = 125,
+	.auto_corr_max_cck = 175,
+	.auto_corr_min_cck_mrc = 160,
+	.auto_corr_max_cck_mrc = 310,
+	.nrg_th_cck = 97,
+	.nrg_th_ofdm = 100,
+
+	.barker_corr_th_min = 190,
+	.barker_corr_th_min_mrc = 390,
+	.nrg_th_cca = 62,
+};
+
+static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
+{
+	if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
+	    priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES)
+		priv->cfg->num_of_queues =
+			priv->cfg->mod_params->num_of_queues;
+
+	priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
+	priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
+	priv->hw_params.scd_bc_tbls_size =
+			priv->cfg->num_of_queues *
+			sizeof(struct iwl5000_scd_bc_tbl);
+	priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
+	priv->hw_params.max_stations = IWL5000_STATION_COUNT;
+	priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
+
+	priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
+	priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
+
+	priv->hw_params.max_bsm_size = 0;
+	priv->hw_params.ht40_channel =  BIT(IEEE80211_BAND_2GHZ) |
+					BIT(IEEE80211_BAND_5GHZ);
+	priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
+
+	priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+	priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
+	priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
+	priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+
+	if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
+		priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
+
+	/* Set initial sensitivity parameters */
+	/* Set initial calibration set */
+	priv->hw_params.sens = &iwl6000_sensitivity;
+	priv->hw_params.calib_init_cfg =
+			BIT(IWL_CALIB_XTAL)		|
+			BIT(IWL_CALIB_LO)		|
+			BIT(IWL_CALIB_TX_IQ) 		|
+			BIT(IWL_CALIB_BASE_BAND);
+	return 0;
+}
+
+static int iwl6000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
+{
+	struct iwl6000_channel_switch_cmd cmd;
+	const struct iwl_channel_info *ch_info;
+	struct iwl_host_cmd hcmd = {
+		.id = REPLY_CHANNEL_SWITCH,
+		.len = sizeof(cmd),
+		.flags = CMD_SIZE_HUGE,
+		.data = &cmd,
+	};
+
+	IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
+		priv->active_rxon.channel, channel);
+
+	cmd.band = priv->band == IEEE80211_BAND_2GHZ;
+	cmd.channel = cpu_to_le16(channel);
+	cmd.rxon_flags = priv->staging_rxon.flags;
+	cmd.rxon_filter_flags = priv->staging_rxon.filter_flags;
+	cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
+	ch_info = iwl_get_channel_info(priv, priv->band, channel);
+	if (ch_info)
+		cmd.expect_beacon = is_channel_radar(ch_info);
+	else {
+		IWL_ERR(priv, "invalid channel switch from %u to %u\n",
+			priv->active_rxon.channel, channel);
+		return -EFAULT;
+	}
+	priv->switch_rxon.channel = cpu_to_le16(channel);
+	priv->switch_rxon.switch_in_progress = true;
+
+	return iwl_send_cmd_sync(priv, &hcmd);
+}
+
 static struct iwl_lib_ops iwl6000_lib = {
-	.set_hw_params = iwl5000_hw_set_hw_params,
+	.set_hw_params = iwl6000_hw_set_hw_params,
 	.txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
 	.txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
 	.txq_set_sched = iwl5000_txq_set_sched,
@@ -106,10 +219,10 @@ static struct iwl_lib_ops iwl6000_lib = {
 	.alive_notify = iwl5000_alive_notify,
 	.send_tx_power = iwl5000_send_tx_power,
 	.update_chain_flags = iwl_update_chain_flags,
+	.set_channel_switch = iwl6000_hw_channel_switch,
 	.apm_ops = {
-		.init =	iwl5000_apm_init,
-		.reset = iwl5000_apm_reset,
-		.stop = iwl5000_apm_stop,
+		.init = iwl_apm_init,
+		.stop = iwl_apm_stop,
 		.config = iwl6000_nic_config,
 		.set_pwr_src = iwl_set_pwr_src,
 	},
@@ -139,25 +252,33 @@ static struct iwl_lib_ops iwl6000_lib = {
 	 },
 };
 
-static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = {
+static struct iwl_ops iwl6000_ops = {
+	.ucode = &iwl5000_ucode,
+	.lib = &iwl6000_lib,
+	.hcmd = &iwl5000_hcmd,
+	.utils = &iwl5000_hcmd_utils,
+	.led = &iwlagn_led_ops,
+};
+
+static struct iwl_hcmd_utils_ops iwl6050_hcmd_utils = {
 	.get_hcmd_size = iwl5000_get_hcmd_size,
 	.build_addsta_hcmd = iwl5000_build_addsta_hcmd,
 	.rts_tx_cmd_flag = iwl5000_rts_tx_cmd_flag,
 	.calc_rssi = iwl5000_calc_rssi,
 };
 
-static struct iwl_ops iwl6000_ops = {
+static struct iwl_ops iwl6050_ops = {
 	.ucode = &iwl5000_ucode,
 	.lib = &iwl6000_lib,
 	.hcmd = &iwl5000_hcmd,
-	.utils = &iwl6000_hcmd_utils,
+	.utils = &iwl6050_hcmd_utils,
+	.led = &iwlagn_led_ops,
 };
 
-
 /*
- * "h": Hybrid configuration, use both internal and external Power Amplifier
+ * "i": Internal configuration, use internal Power Amplifier
  */
-struct iwl_cfg iwl6000h_2agn_cfg = {
+struct iwl_cfg iwl6000i_2agn_cfg = {
 	.name = "6000 Series 2x2 AGN",
 	.fw_name_pre = IWL6000_FW_PRE,
 	.ucode_api_max = IWL6000_UCODE_API_MAX,
@@ -165,41 +286,85 @@ struct iwl_cfg iwl6000h_2agn_cfg = {
 	.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
 	.ops = &iwl6000_ops,
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
-	.eeprom_ver = EEPROM_5000_EEPROM_VERSION,
+	.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
-	.valid_tx_ant = ANT_AB,
-	.valid_rx_ant = ANT_AB,
-	.need_pll_cfg = false,
-	.pa_type = IWL_PA_HYBRID,
+	.valid_tx_ant = ANT_BC,
+	.valid_rx_ant = ANT_BC,
+	.pll_cfg_val = 0,
+	.set_l0s = true,
+	.use_bsm = false,
+	.pa_type = IWL_PA_INTERNAL,
 	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
 	.shadow_ram_support = true,
 	.ht_greenfield_support = true,
+	.led_compensation = 51,
 	.use_rts_for_ht = true, /* use rts/cts protection */
+	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+	.supports_idle = true,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
+	.sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
 };
 
-/*
- * "i": Internal configuration, use internal Power Amplifier
- */
-struct iwl_cfg iwl6000i_2agn_cfg = {
-	.name = "6000 Series 2x2 AGN",
+struct iwl_cfg iwl6000i_2abg_cfg = {
+	.name = "6000 Series 2x2 ABG",
 	.fw_name_pre = IWL6000_FW_PRE,
 	.ucode_api_max = IWL6000_UCODE_API_MAX,
 	.ucode_api_min = IWL6000_UCODE_API_MIN,
-	.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
+	.sku = IWL_SKU_A|IWL_SKU_G,
 	.ops = &iwl6000_ops,
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
-	.eeprom_ver = EEPROM_5000_EEPROM_VERSION,
+	.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_BC,
 	.valid_rx_ant = ANT_BC,
-	.need_pll_cfg = false,
+	.pll_cfg_val = 0,
+	.set_l0s = true,
+	.use_bsm = false,
 	.pa_type = IWL_PA_INTERNAL,
 	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
 	.shadow_ram_support = true,
 	.ht_greenfield_support = true,
-	.use_rts_for_ht = true, /* use rts/cts protection */
+	.led_compensation = 51,
+	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+	.supports_idle = true,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
+};
+
+struct iwl_cfg iwl6000i_2bg_cfg = {
+	.name = "6000 Series 2x2 BG",
+	.fw_name_pre = IWL6000_FW_PRE,
+	.ucode_api_max = IWL6000_UCODE_API_MAX,
+	.ucode_api_min = IWL6000_UCODE_API_MIN,
+	.sku = IWL_SKU_G,
+	.ops = &iwl6000_ops,
+	.eeprom_size = OTP_LOW_IMAGE_SIZE,
+	.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
+	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
+	.mod_params = &iwl50_mod_params,
+	.valid_tx_ant = ANT_BC,
+	.valid_rx_ant = ANT_BC,
+	.pll_cfg_val = 0,
+	.set_l0s = true,
+	.use_bsm = false,
+	.pa_type = IWL_PA_INTERNAL,
+	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+	.shadow_ram_support = true,
+	.ht_greenfield_support = true,
+	.led_compensation = 51,
+	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+	.supports_idle = true,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
 };
 
 struct iwl_cfg iwl6050_2agn_cfg = {
@@ -208,61 +373,89 @@ struct iwl_cfg iwl6050_2agn_cfg = {
 	.ucode_api_max = IWL6050_UCODE_API_MAX,
 	.ucode_api_min = IWL6050_UCODE_API_MIN,
 	.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
-	.ops = &iwl6000_ops,
+	.ops = &iwl6050_ops,
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
-	.eeprom_ver = EEPROM_5000_EEPROM_VERSION,
+	.eeprom_ver = EEPROM_6050_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_AB,
 	.valid_rx_ant = ANT_AB,
-	.need_pll_cfg = false,
+	.pll_cfg_val = 0,
+	.set_l0s = true,
+	.use_bsm = false,
 	.pa_type = IWL_PA_SYSTEM,
-	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+	.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
 	.shadow_ram_support = true,
 	.ht_greenfield_support = true,
+	.led_compensation = 51,
 	.use_rts_for_ht = true, /* use rts/cts protection */
+	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+	.supports_idle = true,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
+	.sm_ps_mode = WLAN_HT_CAP_SM_PS_DYNAMIC,
 };
 
-struct iwl_cfg iwl6000_3agn_cfg = {
-	.name = "6000 Series 3x3 AGN",
-	.fw_name_pre = IWL6000_FW_PRE,
-	.ucode_api_max = IWL6000_UCODE_API_MAX,
-	.ucode_api_min = IWL6000_UCODE_API_MIN,
-	.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
-	.ops = &iwl6000_ops,
+struct iwl_cfg iwl6050_2abg_cfg = {
+	.name = "6050 Series 2x2 ABG",
+	.fw_name_pre = IWL6050_FW_PRE,
+	.ucode_api_max = IWL6050_UCODE_API_MAX,
+	.ucode_api_min = IWL6050_UCODE_API_MIN,
+	.sku = IWL_SKU_A|IWL_SKU_G,
+	.ops = &iwl6050_ops,
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
-	.eeprom_ver = EEPROM_5000_EEPROM_VERSION,
+	.eeprom_ver = EEPROM_6050_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
-	.valid_tx_ant = ANT_ABC,
-	.valid_rx_ant = ANT_ABC,
-	.need_pll_cfg = false,
+	.valid_tx_ant = ANT_AB,
+	.valid_rx_ant = ANT_AB,
+	.pll_cfg_val = 0,
+	.set_l0s = true,
+	.use_bsm = false,
 	.pa_type = IWL_PA_SYSTEM,
-	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+	.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
 	.shadow_ram_support = true,
 	.ht_greenfield_support = true,
-	.use_rts_for_ht = true, /* use rts/cts protection */
+	.led_compensation = 51,
+	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+	.supports_idle = true,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
 };
 
-struct iwl_cfg iwl6050_3agn_cfg = {
-	.name = "6050 Series 3x3 AGN",
-	.fw_name_pre = IWL6050_FW_PRE,
-	.ucode_api_max = IWL6050_UCODE_API_MAX,
-	.ucode_api_min = IWL6050_UCODE_API_MIN,
+struct iwl_cfg iwl6000_3agn_cfg = {
+	.name = "6000 Series 3x3 AGN",
+	.fw_name_pre = IWL6000_FW_PRE,
+	.ucode_api_max = IWL6000_UCODE_API_MAX,
+	.ucode_api_min = IWL6000_UCODE_API_MIN,
 	.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
 	.ops = &iwl6000_ops,
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
-	.eeprom_ver = EEPROM_5000_EEPROM_VERSION,
+	.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_ABC,
 	.valid_rx_ant = ANT_ABC,
-	.need_pll_cfg = false,
+	.pll_cfg_val = 0,
+	.set_l0s = true,
+	.use_bsm = false,
 	.pa_type = IWL_PA_SYSTEM,
 	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
 	.shadow_ram_support = true,
 	.ht_greenfield_support = true,
+	.led_compensation = 51,
 	.use_rts_for_ht = true, /* use rts/cts protection */
+	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+	.supports_idle = true,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
+	.sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
 };
 
 MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.c b/drivers/net/wireless/iwlwifi/iwl-agn-led.c
new file mode 100644
index 000000000000..3bccba20f6da
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.c
@@ -0,0 +1,85 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+
+#include "iwl-commands.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-agn-led.h"
+
+/* Send led command */
+static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
+{
+	struct iwl_host_cmd cmd = {
+		.id = REPLY_LEDS_CMD,
+		.len = sizeof(struct iwl_led_cmd),
+		.data = led_cmd,
+		.flags = CMD_ASYNC,
+		.callback = NULL,
+	};
+	u32 reg;
+
+	reg = iwl_read32(priv, CSR_LED_REG);
+	if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
+		iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
+
+	return iwl_send_cmd(priv, &cmd);
+}
+
+/* Set led register off */
+static int iwl_led_on_reg(struct iwl_priv *priv)
+{
+	IWL_DEBUG_LED(priv, "led on\n");
+	iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
+	return 0;
+}
+
+/* Set led register off */
+static int iwl_led_off_reg(struct iwl_priv *priv)
+{
+	IWL_DEBUG_LED(priv, "LED Reg off\n");
+	iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_OFF);
+	return 0;
+}
+
+const struct iwl_led_ops iwlagn_led_ops = {
+	.cmd = iwl_send_led_cmd,
+	.on = iwl_led_on_reg,
+	.off = iwl_led_off_reg,
+};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.h b/drivers/net/wireless/iwlwifi/iwl-agn-led.h
new file mode 100644
index 000000000000..ab55f92a161d
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.h
@@ -0,0 +1,32 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_agn_led_h__
+#define __iwl_agn_led_h__
+
+extern const struct iwl_led_ops iwlagn_led_ops;
+
+#endif /* __iwl_agn_led_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 81726ee32858..fe511cbf012e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -75,104 +75,6 @@ static const u8 ant_toggle_lookup[] = {
 	/*ANT_ABC  -> */ ANT_ABC,
 };
 
-/**
- * struct iwl_rate_scale_data -- tx success history for one rate
- */
-struct iwl_rate_scale_data {
-	u64 data;		/* bitmap of successful frames */
-	s32 success_counter;	/* number of frames successful */
-	s32 success_ratio;	/* per-cent * 128  */
-	s32 counter;		/* number of frames attempted */
-	s32 average_tpt;	/* success ratio * expected throughput */
-	unsigned long stamp;
-};
-
-/**
- * struct iwl_scale_tbl_info -- tx params and success history for all rates
- *
- * There are two of these in struct iwl_lq_sta,
- * one for "active", and one for "search".
- */
-struct iwl_scale_tbl_info {
-	enum iwl_table_type lq_type;
-	u8 ant_type;
-	u8 is_SGI;	/* 1 = short guard interval */
-	u8 is_ht40;	/* 1 = 40 MHz channel width */
-	u8 is_dup;	/* 1 = duplicated data streams */
-	u8 action;	/* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
-	u8 max_search;	/* maximun number of tables we can search */
-	s32 *expected_tpt;	/* throughput metrics; expected_tpt_G, etc. */
-	u32 current_rate;  /* rate_n_flags, uCode API format */
-	struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
-};
-
-struct iwl_traffic_load {
-	unsigned long time_stamp;	/* age of the oldest statistics */
-	u32 packet_count[TID_QUEUE_MAX_SIZE];   /* packet count in this time
-						 * slice */
-	u32 total;			/* total num of packets during the
-					 * last TID_MAX_TIME_DIFF */
-	u8 queue_count;			/* number of queues that has
-					 * been used since the last cleanup */
-	u8 head;			/* start of the circular buffer */
-};
-
-/**
- * struct iwl_lq_sta -- driver's rate scaling private structure
- *
- * Pointer to this gets passed back and forth between driver and mac80211.
- */
-struct iwl_lq_sta {
-	u8 active_tbl;		/* index of active table, range 0-1 */
-	u8 enable_counter;	/* indicates HT mode */
-	u8 stay_in_tbl;		/* 1: disallow, 0: allow search for new mode */
-	u8 search_better_tbl;	/* 1: currently trying alternate mode */
-	s32 last_tpt;
-
-	/* The following determine when to search for a new mode */
-	u32 table_count_limit;
-	u32 max_failure_limit;	/* # failed frames before new search */
-	u32 max_success_limit;	/* # successful frames before new search */
-	u32 table_count;
-	u32 total_failed;	/* total failed frames, any/all rates */
-	u32 total_success;	/* total successful frames, any/all rates */
-	u64 flush_timer;	/* time staying in mode before new search */
-
-	u8 action_counter;	/* # mode-switch actions tried */
-	u8 is_green;
-	u8 is_dup;
-	enum ieee80211_band band;
-	u8 ibss_sta_added;
-
-	/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
-	u32 supp_rates;
-	u16 active_legacy_rate;
-	u16 active_siso_rate;
-	u16 active_mimo2_rate;
-	u16 active_mimo3_rate;
-	u16 active_rate_basic;
-	s8 max_rate_idx;     /* Max rate set by user */
-	u8 missed_rate_counter;
-
-	struct iwl_link_quality_cmd lq;
-	struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
-	struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
-	u8 tx_agg_tid_en;
-#ifdef CONFIG_MAC80211_DEBUGFS
-	struct dentry *rs_sta_dbgfs_scale_table_file;
-	struct dentry *rs_sta_dbgfs_stats_table_file;
-	struct dentry *rs_sta_dbgfs_rate_scale_data_file;
-	struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
-	u32 dbg_fixed_rate;
-#endif
-	struct iwl_priv *drv;
-
-	/* used to be in sta_info */
-	int last_txrate_idx;
-	/* last tx rate_n_flags */
-	u32 last_rate_n_flags;
-};
-
 static void rs_rate_scale_perform(struct iwl_priv *priv,
 				   struct sk_buff *skb,
 				   struct ieee80211_sta *sta,
@@ -190,84 +92,78 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
 {}
 #endif
 
-/*
- * Expected throughput metrics for following rates:
- * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
- * "G" is the only table that supports CCK (the first 4 rates).
+/**
+ * The following tables contain the expected throughput metrics for all rates
+ *
+ *	1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
+ *
+ * where invalid entries are zeros.
+ *
+ * CCK rates are only valid in legacy table and will only be used in G
+ * (2.4 GHz) band.
  */
 
-static s32 expected_tpt_A[IWL_RATE_COUNT] = {
-	0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186, 186
-};
-
-static s32 expected_tpt_G[IWL_RATE_COUNT] = {
-	7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 186
-};
-
-static s32 expected_tpt_siso20MHz[IWL_RATE_COUNT] = {
-	0, 0, 0, 0, 42, 42, 76, 102, 124, 159, 183, 193, 202
-};
-
-static s32 expected_tpt_siso20MHzSGI[IWL_RATE_COUNT] = {
-	0, 0, 0, 0, 46, 46, 82, 110, 132, 168, 192, 202, 211
-};
-
-static s32 expected_tpt_mimo2_20MHz[IWL_RATE_COUNT] = {
-	0, 0, 0, 0, 74, 74, 123, 155, 179, 214, 236, 244, 251
-};
-
-static s32 expected_tpt_mimo2_20MHzSGI[IWL_RATE_COUNT] = {
-	0, 0, 0, 0, 81, 81, 131, 164, 188, 222, 243, 251, 257
+static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
+	7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
 };
 
-static s32 expected_tpt_siso40MHz[IWL_RATE_COUNT] = {
-	0, 0, 0, 0, 77, 77, 127, 160, 184, 220, 242, 250, 257
+static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
+	{0, 0, 0, 0, 42, 0,  76, 102, 124, 158, 183, 193, 202}, /* Norm */
+	{0, 0, 0, 0, 46, 0,  82, 110, 132, 167, 192, 202, 210}, /* SGI */
+	{0, 0, 0, 0, 48, 0,  93, 135, 176, 251, 319, 351, 381}, /* AGG */
+	{0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
 };
 
-static s32 expected_tpt_siso40MHzSGI[IWL_RATE_COUNT] = {
-	0, 0, 0, 0, 83, 83, 135, 169, 193, 229, 250, 257, 264
+static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
+	{0, 0, 0, 0,  77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
+	{0, 0, 0, 0,  83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
+	{0, 0, 0, 0,  96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
+	{0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
 };
 
-static s32 expected_tpt_mimo2_40MHz[IWL_RATE_COUNT] = {
-	0, 0, 0, 0, 123, 123, 182, 214, 235, 264, 279, 285, 289
+static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
+	{0, 0, 0, 0,  74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
+	{0, 0, 0, 0,  81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
+	{0, 0, 0, 0,  92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
+	{0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/
 };
 
-static s32 expected_tpt_mimo2_40MHzSGI[IWL_RATE_COUNT] = {
-	0, 0, 0, 0, 131, 131, 191, 222, 242, 270, 284, 289, 293
+static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
+	{0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
+	{0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
+	{0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
+	{0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
 };
 
-/* Expected throughput metric MIMO3 */
-static s32 expected_tpt_mimo3_20MHz[IWL_RATE_COUNT] = {
-	0, 0, 0, 0, 99, 99, 153, 186, 208, 239, 256, 263, 268
+static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
+	{0, 0, 0, 0,  99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */
+	{0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */
+	{0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */
+	{0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */
 };
 
-static s32 expected_tpt_mimo3_20MHzSGI[IWL_RATE_COUNT] = {
-	0, 0, 0, 0, 106, 106, 162, 194, 215, 246, 262, 268, 273
-};
-
-static s32 expected_tpt_mimo3_40MHz[IWL_RATE_COUNT] = {
-	0, 0, 0, 0, 152, 152, 211, 239, 255, 279, 290, 294, 297
-};
-
-static s32 expected_tpt_mimo3_40MHzSGI[IWL_RATE_COUNT] = {
-	0, 0, 0, 0, 160, 160, 219, 245, 261, 284, 294, 297, 300
+static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
+	{0, 0, 0, 0, 152, 0, 211, 239, 255, 279,  290,  294,  297}, /* Norm */
+	{0, 0, 0, 0, 160, 0, 219, 245, 261, 284,  294,  297,  300}, /* SGI */
+	{0, 0, 0, 0, 254, 0, 443, 584, 695, 868,  984, 1030, 1070}, /* AGG */
+	{0, 0, 0, 0, 277, 0, 478, 624, 737, 911, 1026, 1070, 1109}, /* AGG+SGI */
 };
 
 /* mbps, mcs */
 const static struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
-  {"1", ""},
-  {"2", ""},
-  {"5.5", ""},
-  {"11", ""},
-  {"6", "BPSK 1/2"},
-  {"9", "BPSK 1/2"},
-  {"12", "QPSK 1/2"},
-  {"18", "QPSK 3/4"},
-  {"24", "16QAM 1/2"},
-  {"36", "16QAM 3/4"},
-  {"48", "64QAM 2/3"},
-  {"54", "64QAM 3/4"},
-  {"60", "64QAM 5/6"}
+	{  "1", "BPSK DSSS"},
+	{  "2", "QPSK DSSS"},
+	{"5.5", "BPSK CCK"},
+	{ "11", "QPSK CCK"},
+	{  "6", "BPSK 1/2"},
+	{  "9", "BPSK 1/2"},
+	{ "12", "QPSK 1/2"},
+	{ "18", "QPSK 3/4"},
+	{ "24", "16QAM 1/2"},
+	{ "36", "16QAM 3/4"},
+	{ "48", "64QAM 2/3"},
+	{ "54", "64QAM 3/4"},
+	{ "60", "64QAM 5/6"},
 };
 
 #define MCS_INDEX_PER_STREAM	(8)
@@ -405,7 +301,7 @@ static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
 	if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
 		IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
 				sta->addr, tid);
-		ieee80211_start_tx_ba_session(priv->hw, sta->addr, tid);
+		ieee80211_start_tx_ba_session(sta, tid);
 	}
 }
 
@@ -444,7 +340,7 @@ static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
  * packets.
  */
 static int rs_collect_tx_data(struct iwl_rate_scale_data *windows,
-			      int scale_index, s32 tpt, int retries,
+			      int scale_index, s32 tpt, int attempts,
 			      int successes)
 {
 	struct iwl_rate_scale_data *window = NULL;
@@ -454,7 +350,7 @@ static int rs_collect_tx_data(struct iwl_rate_scale_data *windows,
 	if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
 		return -EINVAL;
 
-	/* Select data for current tx bit rate */
+	/* Select window for current tx bit rate */
 	window = &(windows[scale_index]);
 
 	/*
@@ -465,7 +361,7 @@ static int rs_collect_tx_data(struct iwl_rate_scale_data *windows,
 	 * subtract "1" from the success counter (this is the main reason
 	 * we keep these bitmaps!).
 	 */
-	while (retries > 0) {
+	while (attempts > 0) {
 		if (window->counter >= IWL_RATE_MAX_WINDOW) {
 
 			/* remove earliest */
@@ -480,17 +376,17 @@ static int rs_collect_tx_data(struct iwl_rate_scale_data *windows,
 		/* Increment frames-attempted counter */
 		window->counter++;
 
-		/* Shift bitmap by one frame (throw away oldest history),
-		 * OR in "1", and increment "success" if this
-		 * frame was successful. */
+		/* Shift bitmap by one frame to throw away oldest history */
 		window->data <<= 1;
+
+		/* Mark the most recent #successes attempts as successful */
 		if (successes > 0) {
 			window->success_counter++;
 			window->data |= 0x1;
 			successes--;
 		}
 
-		retries--;
+		attempts--;
 	}
 
 	/* Calculate current success ratio, avoid divide-by-0! */
@@ -671,7 +567,7 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
  * there are no non-GF stations present in the BSS.
  */
 static inline u8 rs_use_green(struct ieee80211_sta *sta,
-			      struct iwl_ht_info *ht_conf)
+			      struct iwl_ht_config *ht_conf)
 {
 	return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
 		!(ht_conf->non_GF_STA_present);
@@ -821,27 +717,45 @@ out:
 }
 
 /*
+ * Simple function to compare two rate scale table types
+ */
+static bool table_type_matches(struct iwl_scale_tbl_info *a,
+			       struct iwl_scale_tbl_info *b)
+{
+	return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
+		(a->is_SGI == b->is_SGI);
+}
+/*
+ * Static function to get the expected throughput from an iwl_scale_tbl_info
+ * that wraps a NULL pointer check
+ */
+static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
+{
+	if (tbl->expected_tpt)
+		return tbl->expected_tpt[rs_index];
+	return 0;
+}
+
+/*
  * mac80211 sends us Tx status
  */
 static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
 			 struct ieee80211_sta *sta, void *priv_sta,
 			 struct sk_buff *skb)
 {
-	int status;
-	u8 retries;
-	int rs_index, mac_index, index = 0;
+	int legacy_success;
+	int retries;
+	int rs_index, mac_index, i;
 	struct iwl_lq_sta *lq_sta = priv_sta;
 	struct iwl_link_quality_cmd *table;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 	struct iwl_priv *priv = (struct iwl_priv *)priv_r;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct iwl_rate_scale_data *window = NULL;
-	struct iwl_rate_scale_data *search_win = NULL;
 	enum mac80211_rate_control_flags mac_flags;
 	u32 tx_rate;
 	struct iwl_scale_tbl_info tbl_type;
-	struct iwl_scale_tbl_info *curr_tbl, *search_tbl;
-	u8 active_index = 0;
+	struct iwl_scale_tbl_info *curr_tbl, *other_tbl;
 	s32 tpt = 0;
 
 	IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
@@ -850,30 +764,14 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
 	    info->flags & IEEE80211_TX_CTL_NO_ACK)
 		return;
 
-	/* This packet was aggregated but doesn't carry rate scale info */
+	/* This packet was aggregated but doesn't carry status info */
 	if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
 	    !(info->flags & IEEE80211_TX_STAT_AMPDU))
 		return;
 
-	if (info->flags & IEEE80211_TX_STAT_AMPDU)
-		retries = 0;
-	else
-		retries = info->status.rates[0].count - 1;
-
-	if (retries > 15)
-		retries = 15;
-
 	if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
 	    !lq_sta->ibss_sta_added)
-		goto out;
-
-	table = &lq_sta->lq;
-	active_index = lq_sta->active_tbl;
-
-	curr_tbl = &(lq_sta->lq_info[active_index]);
-	search_tbl = &(lq_sta->lq_info[(1 - active_index)]);
-	window = (struct iwl_rate_scale_data *)&(curr_tbl->win[0]);
-	search_win = (struct iwl_rate_scale_data *)&(search_tbl->win[0]);
+		return;
 
 	/*
 	 * Ignore this Tx frame response if its initial rate doesn't match
@@ -883,6 +781,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
 	 * to check "search" mode, or a prior "search" mode after we've moved
 	 * to a new "search" mode (which might become the new "active" mode).
 	 */
+	table = &lq_sta->lq;
 	tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
 	rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index);
 	if (priv->band == IEEE80211_BAND_5GHZ)
@@ -901,7 +800,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
 		if (priv->band == IEEE80211_BAND_2GHZ)
 			mac_index += IWL_FIRST_OFDM_RATE;
 	}
-
+	/* Here we actually compare this rate to the latest LQ command */
 	if ((mac_index < 0) ||
 	    (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
 	    (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
@@ -911,124 +810,106 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
 	    (!!(tx_rate & RATE_MCS_GF_MSK) != !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
 	    (rs_index != mac_index)) {
 		IWL_DEBUG_RATE(priv, "initial rate %d does not match %d (0x%x)\n", mac_index, rs_index, tx_rate);
-		/* the last LQ command could failed so the LQ in ucode not
-		 * the same in driver sync up
+		/*
+		 * Since rates mis-match, the last LQ command may have failed.
+		 * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
+		 * ... driver.
 		 */
 		lq_sta->missed_rate_counter++;
 		if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
 			lq_sta->missed_rate_counter = 0;
 			iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
 		}
-		goto out;
+		/* Regardless, ignore this status info for outdated rate */
+		return;
+	} else
+		/* Rate did match, so reset the missed_rate_counter */
+		lq_sta->missed_rate_counter = 0;
+
+	/* Figure out if rate scale algorithm is in active or search table */
+	if (table_type_matches(&tbl_type,
+				&(lq_sta->lq_info[lq_sta->active_tbl]))) {
+		curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+		other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+	} else if (table_type_matches(&tbl_type,
+				&lq_sta->lq_info[1 - lq_sta->active_tbl])) {
+		curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+		other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+	} else {
+		IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n");
+		return;
 	}
+	window = (struct iwl_rate_scale_data *)&(curr_tbl->win[0]);
 
-	lq_sta->missed_rate_counter = 0;
-	/* Update frame history window with "failure" for each Tx retry. */
-	while (retries) {
-		/* Look up the rate and other info used for each tx attempt.
-		 * Each tx attempt steps one entry deeper in the rate table. */
-		tx_rate = le32_to_cpu(table->rs_table[index].rate_n_flags);
-		rs_get_tbl_info_from_mcs(tx_rate, priv->band,
-					  &tbl_type, &rs_index);
-
-		/* If type matches "search" table,
-		 * add failure to "search" history */
-		if ((tbl_type.lq_type == search_tbl->lq_type) &&
-		    (tbl_type.ant_type == search_tbl->ant_type) &&
-		    (tbl_type.is_SGI == search_tbl->is_SGI)) {
-			if (search_tbl->expected_tpt)
-				tpt = search_tbl->expected_tpt[rs_index];
-			else
-				tpt = 0;
-			rs_collect_tx_data(search_win, rs_index, tpt, 1, 0);
-
-		/* Else if type matches "current/active" table,
-		 * add failure to "current/active" history */
-		} else if ((tbl_type.lq_type == curr_tbl->lq_type) &&
-			   (tbl_type.ant_type == curr_tbl->ant_type) &&
-			   (tbl_type.is_SGI == curr_tbl->is_SGI)) {
-			if (curr_tbl->expected_tpt)
-				tpt = curr_tbl->expected_tpt[rs_index];
-			else
-				tpt = 0;
-			rs_collect_tx_data(window, rs_index, tpt, 1, 0);
+	/*
+	 * Updating the frame history depends on whether packets were
+	 * aggregated.
+	 *
+	 * For aggregation, all packets were transmitted at the same rate, the
+	 * first index into rate scale table.
+	 */
+	if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+		tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
+		rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
+				&rs_index);
+		tpt = get_expected_tpt(curr_tbl, rs_index);
+		rs_collect_tx_data(window, rs_index, tpt,
+				   info->status.ampdu_ack_len,
+				   info->status.ampdu_ack_map);
+
+		/* Update success/fail counts if not searching for new mode */
+		if (lq_sta->stay_in_tbl) {
+			lq_sta->total_success += info->status.ampdu_ack_map;
+			lq_sta->total_failed += (info->status.ampdu_ack_len -
+					info->status.ampdu_ack_map);
 		}
-
-		/* If not searching for a new mode, increment failed counter
-		 * ... this helps determine when to start searching again */
-		if (lq_sta->stay_in_tbl)
-			lq_sta->total_failed++;
-		--retries;
-		index++;
-
-	}
-
+	} else {
 	/*
-	 * Find (by rate) the history window to update with final Tx attempt;
-	 * if Tx was successful first try, use original rate,
-	 * else look up the rate that was, finally, successful.
+	 * For legacy, update frame history with for each Tx retry.
 	 */
-	tx_rate = le32_to_cpu(table->rs_table[index].rate_n_flags);
-	lq_sta->last_rate_n_flags = tx_rate;
-	rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index);
-
-	/* Update frame history window with "success" if Tx got ACKed ... */
-	status = !!(info->flags & IEEE80211_TX_STAT_ACK);
-
-	/* If type matches "search" table,
-	 * add final tx status to "search" history */
-	if ((tbl_type.lq_type == search_tbl->lq_type) &&
-	    (tbl_type.ant_type == search_tbl->ant_type) &&
-	    (tbl_type.is_SGI == search_tbl->is_SGI)) {
-		if (search_tbl->expected_tpt)
-			tpt = search_tbl->expected_tpt[rs_index];
-		else
-			tpt = 0;
-		if (info->flags & IEEE80211_TX_STAT_AMPDU)
-			rs_collect_tx_data(search_win, rs_index, tpt,
-					   info->status.ampdu_ack_len,
-					   info->status.ampdu_ack_map);
-		else
-			rs_collect_tx_data(search_win, rs_index, tpt,
-					   1, status);
-	/* Else if type matches "current/active" table,
-	 * add final tx status to "current/active" history */
-	} else if ((tbl_type.lq_type == curr_tbl->lq_type) &&
-		   (tbl_type.ant_type == curr_tbl->ant_type) &&
-		   (tbl_type.is_SGI == curr_tbl->is_SGI)) {
-		if (curr_tbl->expected_tpt)
-			tpt = curr_tbl->expected_tpt[rs_index];
-		else
-			tpt = 0;
-		if (info->flags & IEEE80211_TX_STAT_AMPDU)
-			rs_collect_tx_data(window, rs_index, tpt,
-					   info->status.ampdu_ack_len,
-					   info->status.ampdu_ack_map);
-		else
-			rs_collect_tx_data(window, rs_index, tpt,
-					   1, status);
-	}
+		retries = info->status.rates[0].count - 1;
+		/* HW doesn't send more than 15 retries */
+		retries = min(retries, 15);
+
+		/* The last transmission may have been successful */
+		legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+		/* Collect data for each rate used during failed TX attempts */
+		for (i = 0; i <= retries; ++i) {
+			tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
+			rs_get_tbl_info_from_mcs(tx_rate, priv->band,
+					&tbl_type, &rs_index);
+			/*
+			 * Only collect stats if retried rate is in the same RS
+			 * table as active/search.
+			 */
+			if (table_type_matches(&tbl_type, curr_tbl))
+				tpt = get_expected_tpt(curr_tbl, rs_index);
+			else if (table_type_matches(&tbl_type, other_tbl))
+				tpt = get_expected_tpt(other_tbl, rs_index);
+			else
+				continue;
 
-	/* If not searching for new mode, increment success/failed counter
-	 * ... these help determine when to start searching again */
-	if (lq_sta->stay_in_tbl) {
-		if (info->flags & IEEE80211_TX_STAT_AMPDU) {
-			lq_sta->total_success += info->status.ampdu_ack_map;
-			lq_sta->total_failed +=
-			     (info->status.ampdu_ack_len - info->status.ampdu_ack_map);
-		} else {
-			if (status)
-				lq_sta->total_success++;
+			/* Constants mean 1 transmission, 0 successes */
+			if (i < retries)
+				rs_collect_tx_data(window, rs_index, tpt, 1,
+						0);
 			else
-				lq_sta->total_failed++;
+				rs_collect_tx_data(window, rs_index, tpt, 1,
+						legacy_success);
+		}
+
+		/* Update success/fail counts if not searching for new mode */
+		if (lq_sta->stay_in_tbl) {
+			lq_sta->total_success += legacy_success;
+			lq_sta->total_failed += retries + (1 - legacy_success);
 		}
 	}
+	/* The last TX rate is cached in lq_sta; it's set in if/else above */
+	lq_sta->last_rate_n_flags = tx_rate;
 
 	/* See if there's a better rate or modulation mode to try. */
 	if (sta && sta->supp_rates[sband->band])
 		rs_rate_scale_perform(priv, skb, sta, lq_sta);
-out:
-	return;
 }
 
 /*
@@ -1066,43 +947,45 @@ static void rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
 static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
 				      struct iwl_scale_tbl_info *tbl)
 {
+	/* Used to choose among HT tables */
+	s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
+
+	/* Check for invalid LQ type */
+	if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
+		tbl->expected_tpt = expected_tpt_legacy;
+		return;
+	}
+
+	/* Legacy rates have only one table */
 	if (is_legacy(tbl->lq_type)) {
-		if (!is_a_band(tbl->lq_type))
-			tbl->expected_tpt = expected_tpt_G;
-		else
-			tbl->expected_tpt = expected_tpt_A;
-	} else if (is_siso(tbl->lq_type)) {
-		if (tbl->is_ht40 && !lq_sta->is_dup)
-			if (tbl->is_SGI)
-				tbl->expected_tpt = expected_tpt_siso40MHzSGI;
-			else
-				tbl->expected_tpt = expected_tpt_siso40MHz;
-		else if (tbl->is_SGI)
-			tbl->expected_tpt = expected_tpt_siso20MHzSGI;
-		else
-			tbl->expected_tpt = expected_tpt_siso20MHz;
-	} else if (is_mimo2(tbl->lq_type)) {
-		if (tbl->is_ht40 && !lq_sta->is_dup)
-			if (tbl->is_SGI)
-				tbl->expected_tpt = expected_tpt_mimo2_40MHzSGI;
-			else
-				tbl->expected_tpt = expected_tpt_mimo2_40MHz;
-		else if (tbl->is_SGI)
-			tbl->expected_tpt = expected_tpt_mimo2_20MHzSGI;
-		else
-			tbl->expected_tpt = expected_tpt_mimo2_20MHz;
-	} else if (is_mimo3(tbl->lq_type)) {
-		if (tbl->is_ht40 && !lq_sta->is_dup)
-			if (tbl->is_SGI)
-				tbl->expected_tpt = expected_tpt_mimo3_40MHzSGI;
-			else
-				tbl->expected_tpt = expected_tpt_mimo3_40MHz;
-		else if (tbl->is_SGI)
-			tbl->expected_tpt = expected_tpt_mimo3_20MHzSGI;
-		else
-			tbl->expected_tpt = expected_tpt_mimo3_20MHz;
-	} else
-		tbl->expected_tpt = expected_tpt_G;
+		tbl->expected_tpt = expected_tpt_legacy;
+		return;
+	}
+
+	/* Choose among many HT tables depending on number of streams
+	 * (SISO/MIMO2/MIMO3), channel width (20/40), SGI, and aggregation
+	 * status */
+	if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+		ht_tbl_pointer = expected_tpt_siso20MHz;
+	else if (is_siso(tbl->lq_type))
+		ht_tbl_pointer = expected_tpt_siso40MHz;
+	else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+		ht_tbl_pointer = expected_tpt_mimo2_20MHz;
+	else if (is_mimo2(tbl->lq_type))
+		ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+	else if (is_mimo3(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+		ht_tbl_pointer = expected_tpt_mimo3_20MHz;
+	else /* if (is_mimo3(tbl->lq_type)) <-- must be true */
+		ht_tbl_pointer = expected_tpt_mimo3_40MHz;
+
+	if (!tbl->is_SGI && !lq_sta->is_agg)		/* Normal */
+		tbl->expected_tpt = ht_tbl_pointer[0];
+	else if (tbl->is_SGI && !lq_sta->is_agg)	/* SGI */
+		tbl->expected_tpt = ht_tbl_pointer[1];
+	else if (!tbl->is_SGI && lq_sta->is_agg)	/* AGG */
+		tbl->expected_tpt = ht_tbl_pointer[2];
+	else						/* AGG+SGI */
+		tbl->expected_tpt = ht_tbl_pointer[3];
 }
 
 /*
@@ -2077,6 +1960,14 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
 	lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
 
 	tid = rs_tl_add_packet(lq_sta, hdr);
+	if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) {
+		tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid];
+		if (tid_data->agg.state == IWL_AGG_OFF)
+			lq_sta->is_agg = 0;
+		else
+			lq_sta->is_agg = 1;
+	} else
+		lq_sta->is_agg = 0;
 
 	/*
 	 * Select rate-scale / modulation-mode table to work with in
@@ -2177,10 +2068,10 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
 
 		goto out;
 	}
-
 	/* Else we have enough samples; calculate estimate of
 	 * actual average throughput */
 
+	/* Sanity-check TPT calculations */
 	BUG_ON(window->average_tpt != ((window->success_ratio *
 			tbl->expected_tpt[index] + 64) / 128));
 
@@ -2584,22 +2475,13 @@ static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
 			  gfp_t gfp)
 {
 	struct iwl_lq_sta *lq_sta;
+	struct iwl_station_priv *sta_priv = (struct iwl_station_priv *) sta->drv_priv;
 	struct iwl_priv *priv;
-	int i, j;
 
 	priv = (struct iwl_priv *)priv_rate;
 	IWL_DEBUG_RATE(priv, "create station rate scale window\n");
 
-	lq_sta = kzalloc(sizeof(struct iwl_lq_sta), gfp);
-
-	if (lq_sta == NULL)
-		return NULL;
-	lq_sta->lq.sta_id = 0xff;
-
-
-	for (j = 0; j < LQ_SIZE; j++)
-		for (i = 0; i < IWL_RATE_COUNT; i++)
-			rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
+	lq_sta = &sta_priv->lq_sta;
 
 	return lq_sta;
 }
@@ -2613,6 +2495,12 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
 	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
 	struct iwl_lq_sta *lq_sta = priv_sta;
 
+	lq_sta->lq.sta_id = 0xff;
+
+	for (j = 0; j < LQ_SIZE; j++)
+		for (i = 0; i < IWL_RATE_COUNT; i++)
+			rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
+
 	lq_sta->flush_timer = 0;
 	lq_sta->supp_rates = sta->supp_rates[sband->band];
 	for (j = 0; j < LQ_SIZE; j++)
@@ -2690,6 +2578,7 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
 	lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
 	if (sband->band == IEEE80211_BAND_5GHZ)
 		lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
+	lq_sta->is_agg = 0;
 
 	rs_initialize_lq(priv, conf, sta, lq_sta);
 }
@@ -2808,7 +2697,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
 		repeat_rate--;
 	}
 
-	lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_MAX;
+	lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
 	lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
 	lq_cmd->agg_params.agg_time_limit =
 		cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
@@ -2827,11 +2716,9 @@ static void rs_free(void *priv_rate)
 static void rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
 			void *priv_sta)
 {
-	struct iwl_lq_sta *lq_sta = priv_sta;
 	struct iwl_priv *priv __maybe_unused = priv_r;
 
 	IWL_DEBUG_RATE(priv, "enter\n");
-	kfree(lq_sta);
 	IWL_DEBUG_RATE(priv, "leave\n");
 }
 
@@ -2942,8 +2829,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
 		   ((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3"));
 		   desc += sprintf(buff+desc, " %s",
 		   (tbl->is_ht40) ? "40MHz" : "20MHz");
-		   desc += sprintf(buff+desc, " %s %s\n", (tbl->is_SGI) ? "SGI" : "",
-		   (lq_sta->is_green) ? "GF enabled" : "");
+		   desc += sprintf(buff+desc, " %s %s %s\n", (tbl->is_SGI) ? "SGI" : "",
+		   (lq_sta->is_green) ? "GF enabled" : "",
+		   (lq_sta->is_agg) ? "AGG on" : "");
 	}
 	desc += sprintf(buff+desc, "last tx rate=0x%X\n",
 		lq_sta->last_rate_n_flags);
@@ -3076,16 +2964,16 @@ static void rs_add_debugfs(void *priv, void *priv_sta,
 {
 	struct iwl_lq_sta *lq_sta = priv_sta;
 	lq_sta->rs_sta_dbgfs_scale_table_file =
-		debugfs_create_file("rate_scale_table", 0600, dir,
+		debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
 				lq_sta, &rs_sta_dbgfs_scale_table_ops);
 	lq_sta->rs_sta_dbgfs_stats_table_file =
-		debugfs_create_file("rate_stats_table", 0600, dir,
+		debugfs_create_file("rate_stats_table", S_IRUSR, dir,
 			lq_sta, &rs_sta_dbgfs_stats_table_ops);
 	lq_sta->rs_sta_dbgfs_rate_scale_data_file =
-		debugfs_create_file("rate_scale_data", 0600, dir,
+		debugfs_create_file("rate_scale_data", S_IRUSR, dir,
 			lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
 	lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
-		debugfs_create_u8("tx_agg_tid_enable", 0600, dir,
+		debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
 		&lq_sta->tx_agg_tid_en);
 
 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index 9fac530cfb7e..affc0c5a2f2c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -54,6 +54,7 @@ struct iwl3945_rate_info {
 	u8 prev_table_rs;	/* prev in rate table cmd */
 };
 
+
 /*
  * These serve as indexes into
  * struct iwl_rate_info iwl_rates[IWL_RATE_COUNT];
@@ -335,6 +336,106 @@ struct iwl_rate_mcs_info {
 	char	mcs[IWL_MAX_MCS_DISPLAY_SIZE];
 };
 
+/**
+ * struct iwl_rate_scale_data -- tx success history for one rate
+ */
+struct iwl_rate_scale_data {
+	u64 data;		/* bitmap of successful frames */
+	s32 success_counter;	/* number of frames successful */
+	s32 success_ratio;	/* per-cent * 128  */
+	s32 counter;		/* number of frames attempted */
+	s32 average_tpt;	/* success ratio * expected throughput */
+	unsigned long stamp;
+};
+
+/**
+ * struct iwl_scale_tbl_info -- tx params and success history for all rates
+ *
+ * There are two of these in struct iwl_lq_sta,
+ * one for "active", and one for "search".
+ */
+struct iwl_scale_tbl_info {
+	enum iwl_table_type lq_type;
+	u8 ant_type;
+	u8 is_SGI;	/* 1 = short guard interval */
+	u8 is_ht40;	/* 1 = 40 MHz channel width */
+	u8 is_dup;	/* 1 = duplicated data streams */
+	u8 action;	/* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
+	u8 max_search;	/* maximun number of tables we can search */
+	s32 *expected_tpt;	/* throughput metrics; expected_tpt_G, etc. */
+	u32 current_rate;  /* rate_n_flags, uCode API format */
+	struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
+};
+
+struct iwl_traffic_load {
+	unsigned long time_stamp;	/* age of the oldest statistics */
+	u32 packet_count[TID_QUEUE_MAX_SIZE];   /* packet count in this time
+						 * slice */
+	u32 total;			/* total num of packets during the
+					 * last TID_MAX_TIME_DIFF */
+	u8 queue_count;			/* number of queues that has
+					 * been used since the last cleanup */
+	u8 head;			/* start of the circular buffer */
+};
+
+/**
+ * struct iwl_lq_sta -- driver's rate scaling private structure
+ *
+ * Pointer to this gets passed back and forth between driver and mac80211.
+ */
+struct iwl_lq_sta {
+	u8 active_tbl;		/* index of active table, range 0-1 */
+	u8 enable_counter;	/* indicates HT mode */
+	u8 stay_in_tbl;		/* 1: disallow, 0: allow search for new mode */
+	u8 search_better_tbl;	/* 1: currently trying alternate mode */
+	s32 last_tpt;
+
+	/* The following determine when to search for a new mode */
+	u32 table_count_limit;
+	u32 max_failure_limit;	/* # failed frames before new search */
+	u32 max_success_limit;	/* # successful frames before new search */
+	u32 table_count;
+	u32 total_failed;	/* total failed frames, any/all rates */
+	u32 total_success;	/* total successful frames, any/all rates */
+	u64 flush_timer;	/* time staying in mode before new search */
+
+	u8 action_counter;	/* # mode-switch actions tried */
+	u8 is_green;
+	u8 is_dup;
+	enum ieee80211_band band;
+	u8 ibss_sta_added;
+
+	/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
+	u32 supp_rates;
+	u16 active_legacy_rate;
+	u16 active_siso_rate;
+	u16 active_mimo2_rate;
+	u16 active_mimo3_rate;
+	u16 active_rate_basic;
+	s8 max_rate_idx;     /* Max rate set by user */
+	u8 missed_rate_counter;
+
+	struct iwl_link_quality_cmd lq;
+	struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
+	struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
+	u8 tx_agg_tid_en;
+#ifdef CONFIG_MAC80211_DEBUGFS
+	struct dentry *rs_sta_dbgfs_scale_table_file;
+	struct dentry *rs_sta_dbgfs_stats_table_file;
+	struct dentry *rs_sta_dbgfs_rate_scale_data_file;
+	struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
+	u32 dbg_fixed_rate;
+#endif
+	struct iwl_priv *drv;
+
+	/* used to be in sta_info */
+	int last_txrate_idx;
+	/* last tx rate_n_flags */
+	u32 last_rate_n_flags;
+	/* packets destined for this STA are aggregated */
+	u8 is_agg;
+};
+
 static inline u8 num_of_ant(u8 mask)
 {
 	return  !!((mask) & ANT_A) +
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 921dc4a26fe2..b8377efb3ba7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -123,6 +123,17 @@ int iwl_commit_rxon(struct iwl_priv *priv)
 		return -EINVAL;
 	}
 
+	/*
+	 * receive commit_rxon request
+	 * abort any previous channel switch if still in process
+	 */
+	if (priv->switch_rxon.switch_in_progress &&
+	    (priv->switch_rxon.channel != priv->staging_rxon.channel)) {
+		IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
+		      le16_to_cpu(priv->switch_rxon.channel));
+		priv->switch_rxon.switch_in_progress = false;
+	}
+
 	/* If we don't need to send a full RXON, we can use
 	 * iwl_rxon_assoc_cmd which is used to reconfigure filter
 	 * and other flags for the current radio configuration. */
@@ -134,6 +145,7 @@ int iwl_commit_rxon(struct iwl_priv *priv)
 		}
 
 		memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
+		iwl_print_rx_config_cmd(priv);
 		return 0;
 	}
 
@@ -191,11 +203,7 @@ int iwl_commit_rxon(struct iwl_priv *priv)
 	priv->start_calib = 0;
 
 	/* Add the broadcast address so we can send broadcast frames */
-	if (iwl_rxon_add_station(priv, iwl_bcast_addr, 0) ==
-						IWL_INVALID_STATION) {
-		IWL_ERR(priv, "Error adding BROADCAST address for transmit.\n");
-		return -EIO;
-	}
+	iwl_add_bcast_station(priv);
 
 	/* If we have set the ASSOC_MSK and we are in BSS mode then
 	 * add the IWL_AP_ID to the station rate table */
@@ -233,6 +241,7 @@ int iwl_commit_rxon(struct iwl_priv *priv)
 		}
 		memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
 	}
+	iwl_print_rx_config_cmd(priv);
 
 	iwl_init_sensitivity(priv);
 
@@ -302,7 +311,7 @@ static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
 	list_add(&frame->list, &priv->free_frames);
 }
 
-static unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv,
+static u32 iwl_fill_beacon_frame(struct iwl_priv *priv,
 					  struct ieee80211_hdr *hdr,
 					  int left)
 {
@@ -319,34 +328,74 @@ static unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv,
 	return priv->ibss_beacon->len;
 }
 
+/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
+static void iwl_set_beacon_tim(struct iwl_priv *priv,
+		struct iwl_tx_beacon_cmd *tx_beacon_cmd,
+		u8 *beacon, u32 frame_size)
+{
+	u16 tim_idx;
+	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
+
+	/*
+	 * The index is relative to frame start but we start looking at the
+	 * variable-length part of the beacon.
+	 */
+	tim_idx = mgmt->u.beacon.variable - beacon;
+
+	/* Parse variable-length elements of beacon to find WLAN_EID_TIM */
+	while ((tim_idx < (frame_size - 2)) &&
+			(beacon[tim_idx] != WLAN_EID_TIM))
+		tim_idx += beacon[tim_idx+1] + 2;
+
+	/* If TIM field was found, set variables */
+	if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
+		tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
+		tx_beacon_cmd->tim_size = beacon[tim_idx+1];
+	} else
+		IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
+}
+
 static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
-				       struct iwl_frame *frame, u8 rate)
+				       struct iwl_frame *frame)
 {
 	struct iwl_tx_beacon_cmd *tx_beacon_cmd;
-	unsigned int frame_size;
+	u32 frame_size;
+	u32 rate_flags;
+	u32 rate;
+	/*
+	 * We have to set up the TX command, the TX Beacon command, and the
+	 * beacon contents.
+	 */
 
+	/* Initialize memory */
 	tx_beacon_cmd = &frame->u.beacon;
 	memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
 
-	tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id;
-	tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-
+	/* Set up TX beacon contents */
 	frame_size = iwl_fill_beacon_frame(priv, tx_beacon_cmd->frame,
 				sizeof(frame->u) - sizeof(*tx_beacon_cmd));
+	if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
+		return 0;
 
-	BUG_ON(frame_size > MAX_MPDU_SIZE);
+	/* Set up TX command fields */
 	tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
+	tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id;
+	tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+	tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
+		TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
 
-	if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP))
-		tx_beacon_cmd->tx.rate_n_flags =
-			iwl_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK);
-	else
-		tx_beacon_cmd->tx.rate_n_flags =
-			iwl_hw_set_rate_n_flags(rate, 0);
+	/* Set up TX beacon command fields */
+	iwl_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
+			frame_size);
 
-	tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
-				     TX_CMD_FLG_TSF_MSK |
-				     TX_CMD_FLG_STA_RATE_MSK;
+	/* Set up packet rate and flags */
+	rate = iwl_rate_get_lowest_plcp(priv);
+	priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
+	rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
+	if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
+		rate_flags |= RATE_MCS_CCK_MSK;
+	tx_beacon_cmd->tx.rate_n_flags = iwl_hw_set_rate_n_flags(rate,
+			rate_flags);
 
 	return sizeof(*tx_beacon_cmd) + frame_size;
 }
@@ -355,19 +404,20 @@ static int iwl_send_beacon_cmd(struct iwl_priv *priv)
 	struct iwl_frame *frame;
 	unsigned int frame_size;
 	int rc;
-	u8 rate;
 
 	frame = iwl_get_free_frame(priv);
-
 	if (!frame) {
 		IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
 			  "command.\n");
 		return -ENOMEM;
 	}
 
-	rate = iwl_rate_get_lowest_plcp(priv);
-
-	frame_size = iwl_hw_get_beacon_cmd(priv, frame, rate);
+	frame_size = iwl_hw_get_beacon_cmd(priv, frame);
+	if (!frame_size) {
+		IWL_ERR(priv, "Error configuring the beacon command\n");
+		iwl_free_frame(priv, frame);
+		return -EINVAL;
+	}
 
 	rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
 			      &frame->u.cmd[0]);
@@ -525,7 +575,7 @@ int iwl_hw_tx_queue_init(struct iwl_priv *priv,
 static void iwl_rx_reply_alive(struct iwl_priv *priv,
 				struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_alive_resp *palive;
 	struct delayed_work *pwork;
 
@@ -604,14 +654,14 @@ static void iwl_bg_statistics_periodic(unsigned long data)
 	if (!iwl_is_ready_rf(priv))
 		return;
 
-	iwl_send_statistics_request(priv, CMD_ASYNC);
+	iwl_send_statistics_request(priv, CMD_ASYNC, false);
 }
 
 static void iwl_rx_beacon_notif(struct iwl_priv *priv,
 				struct iwl_rx_mem_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl4965_beacon_notif *beacon =
 		(struct iwl4965_beacon_notif *)pkt->u.raw;
 	u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
@@ -635,7 +685,7 @@ static void iwl_rx_beacon_notif(struct iwl_priv *priv,
 static void iwl_rx_card_state_notif(struct iwl_priv *priv,
 				    struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
 	unsigned long status = priv->status;
 
@@ -721,7 +771,7 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
 	 * statistics request from the host as well as for the periodic
 	 * statistics notifications (after received beacons) from the uCode.
 	 */
-	priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_rx_statistics;
+	priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_reply_statistics;
 	priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
 
 	iwl_setup_spectrum_handlers(priv);
@@ -770,7 +820,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
 		IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
 
 	/* calculate total frames need to be restock after handling RX */
-	total_empty = r - priv->rxq.write_actual;
+	total_empty = r - rxq->write_actual;
 	if (total_empty < 0)
 		total_empty += RX_QUEUE_SIZE;
 
@@ -787,10 +837,13 @@ void iwl_rx_handle(struct iwl_priv *priv)
 
 		rxq->queue[i] = NULL;
 
-		pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
-				 priv->hw_params.rx_buf_size + 256,
-				 PCI_DMA_FROMDEVICE);
-		pkt = (struct iwl_rx_packet *)rxb->skb->data;
+		pci_unmap_page(priv->pci_dev, rxb->page_dma,
+			       PAGE_SIZE << priv->hw_params.rx_page_order,
+			       PCI_DMA_FROMDEVICE);
+		pkt = rxb_addr(rxb);
+
+		trace_iwlwifi_dev_rx(priv, pkt,
+			le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
 
 		/* Reclaim a command buffer only if this packet is a response
 		 *   to a (driver-originated) command.
@@ -812,8 +865,8 @@ void iwl_rx_handle(struct iwl_priv *priv)
 		if (priv->rx_handlers[pkt->hdr.cmd]) {
 			IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
 				i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
-			priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
 			priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
+			priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
 		} else {
 			/* No handling needed */
 			IWL_DEBUG_RX(priv,
@@ -822,35 +875,45 @@ void iwl_rx_handle(struct iwl_priv *priv)
 				pkt->hdr.cmd);
 		}
 
+		/*
+		 * XXX: After here, we should always check rxb->page
+		 * against NULL before touching it or its virtual
+		 * memory (pkt). Because some rx_handler might have
+		 * already taken or freed the pages.
+		 */
+
 		if (reclaim) {
-			/* Invoke any callbacks, transfer the skb to caller, and
-			 * fire off the (possibly) blocking iwl_send_cmd()
+			/* Invoke any callbacks, transfer the buffer to caller,
+			 * and fire off the (possibly) blocking iwl_send_cmd()
 			 * as we reclaim the driver command queue */
-			if (rxb && rxb->skb)
+			if (rxb->page)
 				iwl_tx_cmd_complete(priv, rxb);
 			else
 				IWL_WARN(priv, "Claim null rxb?\n");
 		}
 
-		/* For now we just don't re-use anything.  We can tweak this
-		 * later to try and re-use notification packets and SKBs that
-		 * fail to Rx correctly */
-		if (rxb->skb != NULL) {
-			priv->alloc_rxb_skb--;
-			dev_kfree_skb_any(rxb->skb);
-			rxb->skb = NULL;
-		}
-
+		/* Reuse the page if possible. For notification packets and
+		 * SKBs that fail to Rx correctly, add them back into the
+		 * rx_free list for reuse later. */
 		spin_lock_irqsave(&rxq->lock, flags);
-		list_add_tail(&rxb->list, &priv->rxq.rx_used);
+		if (rxb->page != NULL) {
+			rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
+				0, PAGE_SIZE << priv->hw_params.rx_page_order,
+				PCI_DMA_FROMDEVICE);
+			list_add_tail(&rxb->list, &rxq->rx_free);
+			rxq->free_count++;
+		} else
+			list_add_tail(&rxb->list, &rxq->rx_used);
+
 		spin_unlock_irqrestore(&rxq->lock, flags);
+
 		i = (i + 1) & RX_QUEUE_MASK;
 		/* If there are a lot of unused frames,
 		 * restock the Rx queue so ucode wont assert. */
 		if (fill_rx) {
 			count++;
 			if (count >= 8) {
-				priv->rxq.read = i;
+				rxq->read = i;
 				iwl_rx_replenish_now(priv);
 				count = 0;
 			}
@@ -858,7 +921,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
 	}
 
 	/* Backtrack one entry */
-	priv->rxq.read = i;
+	rxq->read = i;
 	if (fill_rx)
 		iwl_rx_replenish_now(priv);
 	else
@@ -878,6 +941,7 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
 	u32 inta, handled = 0;
 	u32 inta_fh;
 	unsigned long flags;
+	u32 i;
 #ifdef CONFIG_IWLWIFI_DEBUG
 	u32 inta_mask;
 #endif
@@ -905,6 +969,8 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
 	}
 #endif
 
+	spin_unlock_irqrestore(&priv->lock, flags);
+
 	/* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
 	 * atomic, make sure that inta covers all the interrupts that
 	 * we've discovered, even if FH interrupt came in just after
@@ -926,8 +992,6 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
 
 		handled |= CSR_INT_BIT_HW_ERR;
 
-		spin_unlock_irqrestore(&priv->lock, flags);
-
 		return;
 	}
 
@@ -995,19 +1059,17 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
 		handled |= CSR_INT_BIT_SW_ERR;
 	}
 
-	/* uCode wakes up after power-down sleep */
+	/*
+	 * uCode wakes up after power-down sleep.
+	 * Tell device about any new tx or host commands enqueued,
+	 * and about any Rx buffers made available while asleep.
+	 */
 	if (inta & CSR_INT_BIT_WAKEUP) {
 		IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
 		iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
-		iwl_txq_update_write_ptr(priv, &priv->txq[0]);
-		iwl_txq_update_write_ptr(priv, &priv->txq[1]);
-		iwl_txq_update_write_ptr(priv, &priv->txq[2]);
-		iwl_txq_update_write_ptr(priv, &priv->txq[3]);
-		iwl_txq_update_write_ptr(priv, &priv->txq[4]);
-		iwl_txq_update_write_ptr(priv, &priv->txq[5]);
-
+		for (i = 0; i < priv->hw_params.max_txq_num; i++)
+			iwl_txq_update_write_ptr(priv, &priv->txq[i]);
 		priv->isr_stats.wakeup++;
-
 		handled |= CSR_INT_BIT_WAKEUP;
 	}
 
@@ -1020,11 +1082,12 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
 		handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
 	}
 
+	/* This "Tx" DMA channel is used only for loading uCode */
 	if (inta & CSR_INT_BIT_FH_TX) {
-		IWL_DEBUG_ISR(priv, "Tx interrupt\n");
+		IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
 		priv->isr_stats.tx++;
 		handled |= CSR_INT_BIT_FH_TX;
-		/* FH finished to write, send event */
+		/* Wake up uCode load routine, now that load is complete */
 		priv->ucode_write_complete = 1;
 		wake_up_interruptible(&priv->wait_command_queue);
 	}
@@ -1054,7 +1117,6 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
 			"flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
 	}
 #endif
-	spin_unlock_irqrestore(&priv->lock, flags);
 }
 
 /* tasklet for iwlagn interrupt */
@@ -1063,6 +1125,7 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
 	u32 inta = 0;
 	u32 handled = 0;
 	unsigned long flags;
+	u32 i;
 #ifdef CONFIG_IWLWIFI_DEBUG
 	u32 inta_mask;
 #endif
@@ -1084,6 +1147,9 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
 				inta, inta_mask);
 	}
 #endif
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+
 	/* saved interrupt in inta variable now we can reset priv->inta */
 	priv->inta = 0;
 
@@ -1099,8 +1165,6 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
 
 		handled |= CSR_INT_BIT_HW_ERR;
 
-		spin_unlock_irqrestore(&priv->lock, flags);
-
 		return;
 	}
 
@@ -1172,12 +1236,8 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
 	if (inta & CSR_INT_BIT_WAKEUP) {
 		IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
 		iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
-		iwl_txq_update_write_ptr(priv, &priv->txq[0]);
-		iwl_txq_update_write_ptr(priv, &priv->txq[1]);
-		iwl_txq_update_write_ptr(priv, &priv->txq[2]);
-		iwl_txq_update_write_ptr(priv, &priv->txq[3]);
-		iwl_txq_update_write_ptr(priv, &priv->txq[4]);
-		iwl_txq_update_write_ptr(priv, &priv->txq[5]);
+		for (i = 0; i < priv->hw_params.max_txq_num; i++)
+			iwl_txq_update_write_ptr(priv, &priv->txq[i]);
 
 		priv->isr_stats.wakeup++;
 
@@ -1206,26 +1266,36 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
 		 * 3- update RX shared data to indicate last write index.
 		 * 4- send interrupt.
 		 * This could lead to RX race, driver could receive RX interrupt
-		 * but the shared data changes does not reflect this.
-		 * this could lead to RX race, RX periodic will solve this race
+		 * but the shared data changes does not reflect this;
+		 * periodic interrupt will detect any dangling Rx activity.
 		 */
-		iwl_write32(priv, CSR_INT_PERIODIC_REG,
+
+		/* Disable periodic interrupt; we use it as just a one-shot. */
+		iwl_write8(priv, CSR_INT_PERIODIC_REG,
 			    CSR_INT_PERIODIC_DIS);
 		iwl_rx_handle(priv);
-		/* Only set RX periodic if real RX is received. */
+
+		/*
+		 * Enable periodic interrupt in 8 msec only if we received
+		 * real RX interrupt (instead of just periodic int), to catch
+		 * any dangling Rx interrupt.  If it was just the periodic
+		 * interrupt, there was no dangling Rx activity, and no need
+		 * to extend the periodic interrupt; one-shot is enough.
+		 */
 		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
-			iwl_write32(priv, CSR_INT_PERIODIC_REG,
+			iwl_write8(priv, CSR_INT_PERIODIC_REG,
 				    CSR_INT_PERIODIC_ENA);
 
 		priv->isr_stats.rx++;
 	}
 
+	/* This "Tx" DMA channel is used only for loading uCode */
 	if (inta & CSR_INT_BIT_FH_TX) {
 		iwl_write32(priv, CSR_FH_INT_STATUS, CSR49_FH_INT_TX_MASK);
-		IWL_DEBUG_ISR(priv, "Tx interrupt\n");
+		IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
 		priv->isr_stats.tx++;
 		handled |= CSR_INT_BIT_FH_TX;
-		/* FH finished to write, send event */
+		/* Wake up uCode load routine, now that load is complete */
 		priv->ucode_write_complete = 1;
 		wake_up_interruptible(&priv->wait_command_queue);
 	}
@@ -1240,14 +1310,10 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
 			 inta & ~priv->inta_mask);
 	}
 
-
 	/* Re-enable all interrupts */
 	/* only Re-enable if diabled by irq */
 	if (test_bit(STATUS_INT_ENABLED, &priv->status))
 		iwl_enable_interrupts(priv);
-
-	spin_unlock_irqrestore(&priv->lock, flags);
-
 }
 
 
@@ -1367,6 +1433,14 @@ static int iwl_read_ucode(struct iwl_priv *priv)
 	       IWL_UCODE_API(priv->ucode_ver),
 	       IWL_UCODE_SERIAL(priv->ucode_ver));
 
+	snprintf(priv->hw->wiphy->fw_version,
+		 sizeof(priv->hw->wiphy->fw_version),
+		 "%u.%u.%u.%u",
+		 IWL_UCODE_MAJOR(priv->ucode_ver),
+		 IWL_UCODE_MINOR(priv->ucode_ver),
+		 IWL_UCODE_API(priv->ucode_ver),
+		 IWL_UCODE_SERIAL(priv->ucode_ver));
+
 	if (build)
 		IWL_DEBUG_INFO(priv, "Build %u\n", build);
 
@@ -1531,7 +1605,6 @@ static int iwl_read_ucode(struct iwl_priv *priv)
 	return ret;
 }
 
-#ifdef CONFIG_IWLWIFI_DEBUG
 static const char *desc_lookup_text[] = {
 	"OK",
 	"FAIL",
@@ -1589,7 +1662,9 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
 		base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
 
 	if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
-		IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
+		IWL_ERR(priv,
+			"Not valid error log pointer 0x%08X for %s uCode\n",
+			base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
 		return;
 	}
 
@@ -1611,6 +1686,9 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
 	line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
 	time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
 
+	trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, data2, line,
+				      blink1, blink2, ilink1, ilink2);
+
 	IWL_ERR(priv, "Desc                               Time       "
 		"data1      data2      line\n");
 	IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n",
@@ -1635,6 +1713,7 @@ static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
 	u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
 	u32 ptr;        /* SRAM byte address of log data */
 	u32 ev, time, data; /* event log data */
+	unsigned long reg_flags;
 
 	if (num_events == 0)
 		return;
@@ -1650,26 +1729,72 @@ static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
 
 	ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
 
+	/* Make sure device is powered up for SRAM reads */
+	spin_lock_irqsave(&priv->reg_lock, reg_flags);
+	iwl_grab_nic_access(priv);
+
+	/* Set starting address; reads will auto-increment */
+	_iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
+	rmb();
+
 	/* "time" is actually "data" for mode 0 (no timestamp).
 	* place event id # at far right for easier visual parsing. */
 	for (i = 0; i < num_events; i++) {
-		ev = iwl_read_targ_mem(priv, ptr);
-		ptr += sizeof(u32);
-		time = iwl_read_targ_mem(priv, ptr);
-		ptr += sizeof(u32);
+		ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+		time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
 		if (mode == 0) {
 			/* data, ev */
+			trace_iwlwifi_dev_ucode_event(priv, 0, time, ev);
 			IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", time, ev);
 		} else {
-			data = iwl_read_targ_mem(priv, ptr);
-			ptr += sizeof(u32);
+			data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
 			IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
 					time, data, ev);
+			trace_iwlwifi_dev_ucode_event(priv, time, data, ev);
 		}
 	}
+
+	/* Allow device to power down */
+	iwl_release_nic_access(priv);
+	spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+/**
+ * iwl_print_last_event_logs - Dump the newest # of event log to syslog
+ */
+static void iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
+				      u32 num_wraps, u32 next_entry,
+				      u32 size, u32 mode)
+{
+	/*
+	 * display the newest DEFAULT_LOG_ENTRIES entries
+	 * i.e the entries just before the next ont that uCode would fill.
+	 */
+	if (num_wraps) {
+		if (next_entry < size) {
+			iwl_print_event_log(priv,
+					capacity - (size - next_entry),
+					size - next_entry, mode);
+			iwl_print_event_log(priv, 0,
+				    next_entry, mode);
+		} else
+			iwl_print_event_log(priv, next_entry - size,
+				    size, mode);
+	} else {
+		if (next_entry < size)
+			iwl_print_event_log(priv, 0, next_entry, mode);
+		else
+			iwl_print_event_log(priv, next_entry - size,
+					    size, mode);
+	}
 }
 
-void iwl_dump_nic_event_log(struct iwl_priv *priv)
+/* For sanity check only.  Actual size is determined by uCode, typ. 512 */
+#define MAX_EVENT_LOG_SIZE (512)
+
+#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
+
+void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
 {
 	u32 base;       /* SRAM byte address of event log header */
 	u32 capacity;   /* event log capacity in # entries */
@@ -1684,7 +1809,9 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv)
 		base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
 
 	if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
-		IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
+		IWL_ERR(priv,
+			"Invalid event log pointer 0x%08X for %s uCode\n",
+			base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
 		return;
 	}
 
@@ -1694,6 +1821,18 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv)
 	num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
 	next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
 
+	if (capacity > MAX_EVENT_LOG_SIZE) {
+		IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
+			capacity, MAX_EVENT_LOG_SIZE);
+		capacity = MAX_EVENT_LOG_SIZE;
+	}
+
+	if (next_entry > MAX_EVENT_LOG_SIZE) {
+		IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
+			next_entry, MAX_EVENT_LOG_SIZE);
+		next_entry = MAX_EVENT_LOG_SIZE;
+	}
+
 	size = num_wraps ? capacity : next_entry;
 
 	/* bail out if nothing in log */
@@ -1702,19 +1841,37 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv)
 		return;
 	}
 
-	IWL_ERR(priv, "Start IWL Event Log Dump: display count %d, wraps %d\n",
-			size, num_wraps);
-
-	/* if uCode has wrapped back to top of log, start at the oldest entry,
-	 * i.e the next one that uCode would fill. */
-	if (num_wraps)
-		iwl_print_event_log(priv, next_entry,
-					capacity - next_entry, mode);
-	/* (then/else) start at top of log */
-	iwl_print_event_log(priv, 0, next_entry, mode);
+#ifdef CONFIG_IWLWIFI_DEBUG
+	if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS))
+		size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
+			? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
+#else
+	size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
+		? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
+#endif
+	IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
+		size);
 
-}
+#ifdef CONFIG_IWLWIFI_DEBUG
+	if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
+		/*
+		 * if uCode has wrapped back to top of log,
+		 * start at the oldest entry,
+		 * i.e the next one that uCode would fill.
+		 */
+		if (num_wraps)
+			iwl_print_event_log(priv, next_entry,
+					    capacity - next_entry, mode);
+		/* (then/else) start at top of log */
+		iwl_print_event_log(priv, 0, next_entry, mode);
+	} else
+		iwl_print_last_event_logs(priv, capacity, num_wraps,
+					next_entry, size, mode);
+#else
+	iwl_print_last_event_logs(priv, capacity, num_wraps,
+				next_entry, size, mode);
 #endif
+}
 
 /**
  * iwl_alive_start - called after REPLY_ALIVE notification received
@@ -1763,6 +1920,10 @@ static void iwl_alive_start(struct iwl_priv *priv)
 	priv->active_rate = priv->rates_mask;
 	priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
 
+	/* Configure Tx antenna selection based on H/W config */
+	if (priv->cfg->ops->hcmd->set_tx_ant)
+		priv->cfg->ops->hcmd->set_tx_ant(priv, priv->cfg->valid_tx_ant);
+
 	if (iwl_is_associated(priv)) {
 		struct iwl_rxon_cmd *active_rxon =
 				(struct iwl_rxon_cmd *)&priv->active_rxon;
@@ -1790,7 +1951,7 @@ static void iwl_alive_start(struct iwl_priv *priv)
 	/* At this point, the NIC is initialized and operational */
 	iwl_rf_kill_ct_config(priv);
 
-	iwl_leds_register(priv);
+	iwl_leds_init(priv);
 
 	IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
 	set_bit(STATUS_READY, &priv->status);
@@ -1828,8 +1989,6 @@ static void __iwl_down(struct iwl_priv *priv)
 	if (!exit_pending)
 		set_bit(STATUS_EXIT_PENDING, &priv->status);
 
-	iwl_leds_unregister(priv);
-
 	iwl_clear_stations_table(priv);
 
 	/* Unblock any waiting calls */
@@ -1877,24 +2036,20 @@ static void __iwl_down(struct iwl_priv *priv)
 
 	/* device going down, Stop using ICT table */
 	iwl_disable_ict(priv);
-	spin_lock_irqsave(&priv->lock, flags);
-	iwl_clear_bit(priv, CSR_GP_CNTRL,
-			 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-	spin_unlock_irqrestore(&priv->lock, flags);
 
 	iwl_txq_ctx_stop(priv);
 	iwl_rxq_stop(priv);
 
-	iwl_write_prph(priv, APMG_CLK_DIS_REG,
-				APMG_CLK_VAL_DMA_CLK_RQT);
-
+	/* Power-down device's busmaster DMA clocks */
+	iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
 	udelay(5);
 
-	/* FIXME: apm_ops.suspend(priv) */
-	if (exit_pending)
-		priv->cfg->ops->lib->apm_ops.stop(priv);
-	else
-		priv->cfg->ops->lib->apm_ops.reset(priv);
+	/* Make sure (redundant) we've released our request to stay awake */
+	iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+	/* Stop the device, and put it in low power state */
+	priv->cfg->ops->lib->apm_ops.stop(priv);
+
  exit:
 	memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
 
@@ -2281,6 +2436,67 @@ void iwl_post_associate(struct iwl_priv *priv)
 
 #define UCODE_READY_TIMEOUT	(4 * HZ)
 
+/*
+ * Not a mac80211 entry point function, but it fits in with all the
+ * other mac80211 functions grouped here.
+ */
+static int iwl_setup_mac(struct iwl_priv *priv)
+{
+	int ret;
+	struct ieee80211_hw *hw = priv->hw;
+	hw->rate_control_algorithm = "iwl-agn-rs";
+
+	/* Tell mac80211 our characteristics */
+	hw->flags = IEEE80211_HW_SIGNAL_DBM |
+		    IEEE80211_HW_NOISE_DBM |
+		    IEEE80211_HW_AMPDU_AGGREGATION |
+		    IEEE80211_HW_SPECTRUM_MGMT;
+
+	if (!priv->cfg->broken_powersave)
+		hw->flags |= IEEE80211_HW_SUPPORTS_PS |
+			     IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+
+	hw->sta_data_size = sizeof(struct iwl_station_priv);
+	hw->wiphy->interface_modes =
+		BIT(NL80211_IFTYPE_STATION) |
+		BIT(NL80211_IFTYPE_ADHOC);
+
+	hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY |
+			    WIPHY_FLAG_DISABLE_BEACON_HINTS;
+
+	/*
+	 * For now, disable PS by default because it affects
+	 * RX performance significantly.
+	 */
+	hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+	hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+	/* we create the 802.11 header and a zero-length SSID element */
+	hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
+
+	/* Default value; 4 EDCA QOS priorities */
+	hw->queues = 4;
+
+	hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
+
+	if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
+		priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+			&priv->bands[IEEE80211_BAND_2GHZ];
+	if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
+		priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+			&priv->bands[IEEE80211_BAND_5GHZ];
+
+	ret = ieee80211_register_hw(priv->hw);
+	if (ret) {
+		IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
+		return ret;
+	}
+	priv->mac80211_registered = 1;
+
+	return 0;
+}
+
+
 static int iwl_mac_start(struct ieee80211_hw *hw)
 {
 	struct iwl_priv *priv = hw->priv;
@@ -2328,6 +2544,8 @@ static int iwl_mac_start(struct ieee80211_hw *hw)
 		}
 	}
 
+	iwl_led_start(priv);
+
 out:
 	priv->is_open = 1;
 	IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -2404,6 +2622,10 @@ void iwl_config_ap(struct iwl_priv *priv)
 			IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
 					"Attempting to continue.\n");
 
+		/* AP has all antennas */
+		priv->chain_noise_data.active_chains =
+			priv->hw_params.valid_rx_ant;
+		iwl_set_rxon_ht(priv, &priv->current_ht_config);
 		if (priv->cfg->ops->hcmd->set_rxon_chain)
 			priv->cfg->ops->hcmd->set_rxon_chain(priv);
 
@@ -2432,10 +2654,11 @@ void iwl_config_ap(struct iwl_priv *priv)
 		/* restore RXON assoc */
 		priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
 		iwlcore_commit_rxon(priv);
+		iwl_reset_qos(priv);
 		spin_lock_irqsave(&priv->lock, flags);
 		iwl_activate_qos(priv, 1);
 		spin_unlock_irqrestore(&priv->lock, flags);
-		iwl_rxon_add_station(priv, iwl_bcast_addr, 0);
+		iwl_add_bcast_station(priv);
 	}
 	iwl_send_beacon_cmd(priv);
 
@@ -2527,6 +2750,7 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 }
 
 static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
+				struct ieee80211_vif *vif,
 			     enum ieee80211_ampdu_mlme_action action,
 			     struct ieee80211_sta *sta, u16 tid, u16 *ssn)
 {
@@ -2580,6 +2804,45 @@ static int iwl_mac_get_stats(struct ieee80211_hw *hw,
 	return 0;
 }
 
+static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
+			       struct ieee80211_vif *vif,
+			       enum sta_notify_cmd cmd,
+			       struct ieee80211_sta *sta)
+{
+	struct iwl_priv *priv = hw->priv;
+	struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+	int sta_id;
+
+	/*
+	 * TODO: We really should use this callback to
+	 *	 actually maintain the station table in
+	 *	 the device.
+	 */
+
+	switch (cmd) {
+	case STA_NOTIFY_ADD:
+		atomic_set(&sta_priv->pending_frames, 0);
+		if (vif->type == NL80211_IFTYPE_AP)
+			sta_priv->client = true;
+		break;
+	case STA_NOTIFY_SLEEP:
+		WARN_ON(!sta_priv->client);
+		sta_priv->asleep = true;
+		if (atomic_read(&sta_priv->pending_frames) > 0)
+			ieee80211_sta_block_awake(hw, sta, true);
+		break;
+	case STA_NOTIFY_AWAKE:
+		WARN_ON(!sta_priv->client);
+		sta_priv->asleep = false;
+		sta_id = iwl_find_station(priv, sta->addr);
+		if (sta_id != IWL_INVALID_STATION)
+			iwl_sta_modify_ps_wake(priv, sta_id);
+		break;
+	default:
+		break;
+	}
+}
+
 /*****************************************************************************
  *
  * sysfs attributes
@@ -2774,7 +3037,7 @@ static ssize_t show_statistics(struct device *d,
 		return -EAGAIN;
 
 	mutex_lock(&priv->mutex);
-	rc = iwl_send_statistics_request(priv, 0);
+	rc = iwl_send_statistics_request(priv, CMD_SYNC, false);
 	mutex_unlock(&priv->mutex);
 
 	if (rc) {
@@ -2799,6 +3062,40 @@ static ssize_t show_statistics(struct device *d,
 
 static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
 
+static ssize_t show_rts_ht_protection(struct device *d,
+			     struct device_attribute *attr, char *buf)
+{
+	struct iwl_priv *priv = dev_get_drvdata(d);
+
+	return sprintf(buf, "%s\n",
+		priv->cfg->use_rts_for_ht ? "RTS/CTS" : "CTS-to-self");
+}
+
+static ssize_t store_rts_ht_protection(struct device *d,
+			      struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct iwl_priv *priv = dev_get_drvdata(d);
+	unsigned long val;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &val);
+	if (ret)
+		IWL_INFO(priv, "Input is not in decimal form.\n");
+	else {
+		if (!iwl_is_associated(priv))
+			priv->cfg->use_rts_for_ht = val ? true : false;
+		else
+			IWL_ERR(priv, "Sta associated with AP - "
+				"Change protection mechanism is not allowed\n");
+		ret = count;
+	}
+	return ret;
+}
+
+static DEVICE_ATTR(rts_ht_protection, S_IWUSR | S_IRUGO,
+			show_rts_ht_protection, store_rts_ht_protection);
+
 
 /*****************************************************************************
  *
@@ -2849,12 +3146,103 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
 	del_timer_sync(&priv->statistics_periodic);
 }
 
+static void iwl_init_hw_rates(struct iwl_priv *priv,
+			      struct ieee80211_rate *rates)
+{
+	int i;
+
+	for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
+		rates[i].bitrate = iwl_rates[i].ieee * 5;
+		rates[i].hw_value = i; /* Rate scaling will work on indexes */
+		rates[i].hw_value_short = i;
+		rates[i].flags = 0;
+		if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
+			/*
+			 * If CCK != 1M then set short preamble rate flag.
+			 */
+			rates[i].flags |=
+				(iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ?
+					0 : IEEE80211_RATE_SHORT_PREAMBLE;
+		}
+	}
+}
+
+static int iwl_init_drv(struct iwl_priv *priv)
+{
+	int ret;
+
+	priv->ibss_beacon = NULL;
+
+	spin_lock_init(&priv->lock);
+	spin_lock_init(&priv->sta_lock);
+	spin_lock_init(&priv->hcmd_lock);
+
+	INIT_LIST_HEAD(&priv->free_frames);
+
+	mutex_init(&priv->mutex);
+
+	/* Clear the driver's (not device's) station table */
+	iwl_clear_stations_table(priv);
+
+	priv->ieee_channels = NULL;
+	priv->ieee_rates = NULL;
+	priv->band = IEEE80211_BAND_2GHZ;
+
+	priv->iw_mode = NL80211_IFTYPE_STATION;
+
+	/* Choose which receivers/antennas to use */
+	if (priv->cfg->ops->hcmd->set_rxon_chain)
+		priv->cfg->ops->hcmd->set_rxon_chain(priv);
+
+	iwl_init_scan_params(priv);
+
+	iwl_reset_qos(priv);
+
+	priv->qos_data.qos_active = 0;
+	priv->qos_data.qos_cap.val = 0;
+
+	priv->rates_mask = IWL_RATES_MASK;
+	/* Set the tx_power_user_lmt to the lowest power level
+	 * this value will get overwritten by channel max power avg
+	 * from eeprom */
+	priv->tx_power_user_lmt = IWL_TX_POWER_TARGET_POWER_MIN;
+
+	ret = iwl_init_channel_map(priv);
+	if (ret) {
+		IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = iwlcore_init_geos(priv);
+	if (ret) {
+		IWL_ERR(priv, "initializing geos failed: %d\n", ret);
+		goto err_free_channel_map;
+	}
+	iwl_init_hw_rates(priv, priv->ieee_rates);
+
+	return 0;
+
+err_free_channel_map:
+	iwl_free_channel_map(priv);
+err:
+	return ret;
+}
+
+static void iwl_uninit_drv(struct iwl_priv *priv)
+{
+	iwl_calib_free_results(priv);
+	iwlcore_free_geos(priv);
+	iwl_free_channel_map(priv);
+	kfree(priv->scan);
+}
+
 static struct attribute *iwl_sysfs_entries[] = {
 	&dev_attr_flags.attr,
 	&dev_attr_filter_flags.attr,
 	&dev_attr_statistics.attr,
 	&dev_attr_temperature.attr,
 	&dev_attr_tx_power.attr,
+	&dev_attr_rts_ht_protection.attr,
 #ifdef CONFIG_IWLWIFI_DEBUG
 	&dev_attr_debug_level.attr,
 #endif
@@ -2882,7 +3270,8 @@ static struct ieee80211_ops iwl_hw_ops = {
 	.reset_tsf = iwl_mac_reset_tsf,
 	.bss_info_changed = iwl_bss_info_changed,
 	.ampdu_action = iwl_mac_ampdu_action,
-	.hw_scan = iwl_mac_hw_scan
+	.hw_scan = iwl_mac_hw_scan,
+	.sta_notify = iwl_mac_sta_notify,
 };
 
 static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -2990,12 +3379,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		goto out_iounmap;
 	}
 
-	/* amp init */
-	err = priv->cfg->ops->lib->apm_ops.init(priv);
-	if (err < 0) {
-		IWL_ERR(priv, "Failed to init APMG\n");
-		goto out_iounmap;
-	}
 	/*****************
 	 * 4. Read EEPROM
 	 *****************/
@@ -3141,6 +3524,15 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
 		iwl_down(priv);
 	}
 
+	/*
+	 * Make sure device is reset to low power before unloading driver.
+	 * This may be redundant with iwl_down(), but there are paths to
+	 * run iwl_down() without calling apm_ops.stop(), and there are
+	 * paths to avoid running iwl_down() at all before leaving driver.
+	 * This (inexpensive) call *makes sure* device is reset.
+	 */
+	priv->cfg->ops->lib->apm_ops.stop(priv);
+
 	iwl_tt_exit(priv);
 
 	/* make sure we flush any pending irq or
@@ -3203,37 +3595,97 @@ static struct pci_device_id iwl_hw_card_ids[] = {
 	{IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
 #endif /* CONFIG_IWL4965 */
 #ifdef CONFIG_IWL5000
-	{IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bg_cfg)},
-	{IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bg_cfg)},
-	{IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)},
-	{IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)},
-	{IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)},
-	{IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)},
-	{IWL_PCI_DEVICE(0x4232, PCI_ANY_ID, iwl5100_agn_cfg)},
-	{IWL_PCI_DEVICE(0x4235, PCI_ANY_ID, iwl5300_agn_cfg)},
-	{IWL_PCI_DEVICE(0x4236, PCI_ANY_ID, iwl5300_agn_cfg)},
-	{IWL_PCI_DEVICE(0x4237, PCI_ANY_ID, iwl5100_agn_cfg)},
-/* 5350 WiFi/WiMax */
-	{IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)},
-	{IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)},
-	{IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)},
-/* 5150 Wifi/WiMax */
-	{IWL_PCI_DEVICE(0x423C, PCI_ANY_ID, iwl5150_agn_cfg)},
-	{IWL_PCI_DEVICE(0x423D, PCI_ANY_ID, iwl5150_agn_cfg)},
-/* 6000/6050 Series */
-	{IWL_PCI_DEVICE(0x008D, PCI_ANY_ID, iwl6000h_2agn_cfg)},
-	{IWL_PCI_DEVICE(0x008E, PCI_ANY_ID, iwl6000h_2agn_cfg)},
-	{IWL_PCI_DEVICE(0x422B, PCI_ANY_ID, iwl6000_3agn_cfg)},
-	{IWL_PCI_DEVICE(0x422C, PCI_ANY_ID, iwl6000i_2agn_cfg)},
-	{IWL_PCI_DEVICE(0x4238, PCI_ANY_ID, iwl6000_3agn_cfg)},
-	{IWL_PCI_DEVICE(0x4239, PCI_ANY_ID, iwl6000i_2agn_cfg)},
-	{IWL_PCI_DEVICE(0x0086, PCI_ANY_ID, iwl6050_3agn_cfg)},
-	{IWL_PCI_DEVICE(0x0087, PCI_ANY_ID, iwl6050_2agn_cfg)},
-	{IWL_PCI_DEVICE(0x0088, PCI_ANY_ID, iwl6050_3agn_cfg)},
-	{IWL_PCI_DEVICE(0x0089, PCI_ANY_ID, iwl6050_2agn_cfg)},
+/* 5100 Series WiFi */
+	{IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1304, iwl5100_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bgn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bgn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1221, iwl5100_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1321, iwl5100_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1224, iwl5100_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1324, iwl5100_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1225, iwl5100_bgn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1325, iwl5100_bgn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1226, iwl5100_abg_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4237, 0x1211, iwl5100_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4237, 0x1311, iwl5100_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4237, 0x1214, iwl5100_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4237, 0x1314, iwl5100_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4237, 0x1215, iwl5100_bgn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4237, 0x1315, iwl5100_bgn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4237, 0x1316, iwl5100_abg_cfg)}, /* Half Mini Card */
+
+/* 5300 Series WiFi */
+	{IWL_PCI_DEVICE(0x4235, 0x1021, iwl5300_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4235, 0x1121, iwl5300_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4235, 0x1024, iwl5300_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4235, 0x1124, iwl5300_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4235, 0x1001, iwl5300_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4235, 0x1101, iwl5300_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4235, 0x1004, iwl5300_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4235, 0x1104, iwl5300_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4236, 0x1011, iwl5300_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4236, 0x1111, iwl5300_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4236, 0x1014, iwl5300_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4236, 0x1114, iwl5300_agn_cfg)}, /* Half Mini Card */
+
+/* 5350 Series WiFi/WiMax */
+	{IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, /* Mini Card */
+
+/* 5150 Series Wifi/WiMax */
+	{IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_abg_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */
+
+	{IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_abg_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_abg_cfg)}, /* Half Mini Card */
+
+/* 6x00 Series */
+	{IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
+	{IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
+	{IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
+	{IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
+	{IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
+
+/* 6x50 WiFi/WiMax Series */
+	{IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)},
+
 /* 1000 Series WiFi */
-	{IWL_PCI_DEVICE(0x0083, PCI_ANY_ID, iwl1000_bgn_cfg)},
-	{IWL_PCI_DEVICE(0x0084, PCI_ANY_ID, iwl1000_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_bg_cfg)},
+	{IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_bg_cfg)},
+	{IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_bg_cfg)},
+	{IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)},
+	{IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)},
+	{IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)},
 #endif /* CONFIG_IWL5000 */
 
 	{0}
@@ -3288,9 +3740,9 @@ module_exit(iwl_exit);
 module_init(iwl_init);
 
 #ifdef CONFIG_IWLWIFI_DEBUG
-module_param_named(debug50, iwl_debug_level, uint, 0444);
+module_param_named(debug50, iwl_debug_level, uint, S_IRUGO);
 MODULE_PARM_DESC(debug50, "50XX debug output mask (deprecated)");
-module_param_named(debug, iwl_debug_level, uint, 0644);
+module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(debug, "debug output mask");
 #endif
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index c4b565a2de94..95a57b36a7ea 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -132,6 +132,7 @@ void iwl_calib_free_results(struct iwl_priv *priv)
 		priv->calib_results[i].buf_len = 0;
 	}
 }
+EXPORT_SYMBOL(iwl_calib_free_results);
 
 /*****************************************************************************
  * RUNTIME calibrations framework
@@ -447,11 +448,11 @@ static int iwl_sensitivity_write(struct iwl_priv *priv)
 				cpu_to_le16((u16)data->nrg_th_ofdm);
 
 	cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
-				cpu_to_le16(190);
+				cpu_to_le16(data->barker_corr_th_min);
 	cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
-				cpu_to_le16(390);
+				cpu_to_le16(data->barker_corr_th_min_mrc);
 	cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] =
-				cpu_to_le16(62);
+				cpu_to_le16(data->nrg_th_cca);
 
 	IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
 			data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
@@ -516,7 +517,7 @@ void iwl_init_sensitivity(struct iwl_priv *priv)
 	for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
 		data->nrg_silence_rssi[i] = 0;
 
-	data->auto_corr_ofdm = 90;
+	data->auto_corr_ofdm =  ranges->auto_corr_min_ofdm;
 	data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
 	data->auto_corr_ofdm_x1  = ranges->auto_corr_min_ofdm_x1;
 	data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
@@ -524,6 +525,9 @@ void iwl_init_sensitivity(struct iwl_priv *priv)
 	data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
 	data->nrg_th_cck = ranges->nrg_th_cck;
 	data->nrg_th_ofdm = ranges->nrg_th_ofdm;
+	data->barker_corr_th_min = ranges->barker_corr_th_min;
+	data->barker_corr_th_min_mrc = ranges->barker_corr_th_min_mrc;
+	data->nrg_th_cca = ranges->nrg_th_cca;
 
 	data->last_bad_plcp_cnt_ofdm = 0;
 	data->last_fa_cnt_ofdm = 0;
@@ -643,6 +647,15 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv,
 }
 EXPORT_SYMBOL(iwl_sensitivity_calibration);
 
+static inline u8 find_first_chain(u8 mask)
+{
+	if (mask & ANT_A)
+		return CHAIN_A;
+	if (mask & ANT_B)
+		return CHAIN_B;
+	return CHAIN_C;
+}
+
 /*
  * Accumulate 20 beacons of signal and noise statistics for each of
  *   3 receivers/antennas/rx-chains, then figure out:
@@ -675,14 +688,17 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
 	u8 num_tx_chains;
 	unsigned long flags;
 	struct statistics_rx_non_phy *rx_info = &(stat_resp->rx.general);
+	u8 first_chain;
 
 	if (priv->disable_chain_noise_cal)
 		return;
 
 	data = &(priv->chain_noise_data);
 
-	/* Accumulate just the first 20 beacons after the first association,
-	 *   then we're done forever. */
+	/*
+	 * Accumulate just the first "chain_noise_num_beacons" after
+	 * the first association, then we're done forever.
+	 */
 	if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
 		if (data->state == IWL_CHAIN_NOISE_ALIVE)
 			IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n");
@@ -710,7 +726,10 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
 		return;
 	}
 
-	/* Accumulate beacon statistics values across 20 beacons */
+	/*
+	 *  Accumulate beacon statistics values across
+	 * "chain_noise_num_beacons"
+	 */
 	chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
 				IN_BAND_FILTER;
 	chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
@@ -741,16 +760,19 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
 	IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n",
 			chain_noise_a, chain_noise_b, chain_noise_c);
 
-	/* If this is the 20th beacon, determine:
+	/* If this is the "chain_noise_num_beacons", determine:
 	 * 1)  Disconnected antennas (using signal strengths)
 	 * 2)  Differential gain (using silence noise) to balance receivers */
-	if (data->beacon_count != CAL_NUM_OF_BEACONS)
+	if (data->beacon_count != priv->cfg->chain_noise_num_beacons)
 		return;
 
 	/* Analyze signal for disconnected antenna */
-	average_sig[0] = (data->chain_signal_a) / CAL_NUM_OF_BEACONS;
-	average_sig[1] = (data->chain_signal_b) / CAL_NUM_OF_BEACONS;
-	average_sig[2] = (data->chain_signal_c) / CAL_NUM_OF_BEACONS;
+	average_sig[0] =
+		(data->chain_signal_a) / priv->cfg->chain_noise_num_beacons;
+	average_sig[1] =
+		(data->chain_signal_b) / priv->cfg->chain_noise_num_beacons;
+	average_sig[2] =
+		(data->chain_signal_c) / priv->cfg->chain_noise_num_beacons;
 
 	if (average_sig[0] >= average_sig[1]) {
 		max_average_sig = average_sig[0];
@@ -803,13 +825,17 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
 			/* there is a Tx antenna connected */
 			break;
 		if (num_tx_chains == priv->hw_params.tx_chains_num &&
-		data->disconn_array[i]) {
-			/* This is the last TX antenna and is also
-			 * disconnected connect it anyway */
-			data->disconn_array[i] = 0;
-			active_chains |= ant_msk;
-			IWL_DEBUG_CALIB(priv, "All Tx chains are disconnected W/A - "
-				"declare %d as connected\n", i);
+		    data->disconn_array[i]) {
+			/*
+			 * If all chains are disconnected
+			 * connect the first valid tx chain
+			 */
+			first_chain =
+				find_first_chain(priv->cfg->valid_tx_ant);
+			data->disconn_array[first_chain] = 0;
+			active_chains |= BIT(first_chain);
+			IWL_DEBUG_CALIB(priv, "All Tx chains are disconnected W/A - declare %d as connected\n",
+					first_chain);
 			break;
 		}
 	}
@@ -820,9 +846,12 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
 			active_chains);
 
 	/* Analyze noise for rx balance */
-	average_noise[0] = ((data->chain_noise_a)/CAL_NUM_OF_BEACONS);
-	average_noise[1] = ((data->chain_noise_b)/CAL_NUM_OF_BEACONS);
-	average_noise[2] = ((data->chain_noise_c)/CAL_NUM_OF_BEACONS);
+	average_noise[0] =
+		((data->chain_noise_a) / priv->cfg->chain_noise_num_beacons);
+	average_noise[1] =
+		((data->chain_noise_b) / priv->cfg->chain_noise_num_beacons);
+	average_noise[2] =
+		((data->chain_noise_c) / priv->cfg->chain_noise_num_beacons);
 
 	for (i = 0; i < NUM_RX_CHAINS; i++) {
 		if (!(data->disconn_array[i]) &&
@@ -843,7 +872,8 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
 
 	if (priv->cfg->ops->utils->gain_computation)
 		priv->cfg->ops->utils->gain_computation(priv, average_noise,
-			min_average_noise_antenna_i, min_average_noise);
+				min_average_noise_antenna_i, min_average_noise,
+				find_first_chain(priv->cfg->valid_rx_ant));
 
 	/* Some power changes may have been made during the calibration.
 	 * Update and commit the RXON
@@ -870,7 +900,7 @@ void iwl_reset_run_time_calib(struct iwl_priv *priv)
 
 	/* Ask for statistics now, the uCode will send notification
 	 * periodically after association */
-	iwl_send_statistics_request(priv, CMD_ASYNC);
+	iwl_send_statistics_request(priv, CMD_ASYNC, true);
 }
 EXPORT_SYMBOL(iwl_reset_run_time_calib);
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 4afaf773aeac..e91507531923 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -109,11 +109,12 @@ enum {
 	REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* 4965 only */
 
 	/* WiMAX coexistence */
-	COEX_PRIORITY_TABLE_CMD = 0x5a,	/*5000 only */
+	COEX_PRIORITY_TABLE_CMD = 0x5a,	/* for 5000 series and up */
 	COEX_MEDIUM_NOTIFICATION = 0x5b,
 	COEX_EVENT_CMD = 0x5c,
 
 	/* Calibration */
+	TEMPERATURE_NOTIFICATION = 0x62,
 	CALIBRATION_CFG_CMD = 0x65,
 	CALIBRATION_RES_NOTIFICATION = 0x66,
 	CALIBRATION_COMPLETE_NOTIFICATION = 0x67,
@@ -148,7 +149,7 @@ enum {
 	QUIET_NOTIFICATION = 0x96,		/* not used */
 	REPLY_TX_PWR_TABLE_CMD = 0x97,
 	REPLY_TX_POWER_DBM_CMD_V1 = 0x98,	/* old version of API */
-	TX_ANT_CONFIGURATION_CMD = 0x98,	/* not used */
+	TX_ANT_CONFIGURATION_CMD = 0x98,
 	MEASURE_ABORT_NOTIFICATION = 0x99,	/* not used */
 
 	/* Bluetooth device coexistence config command */
@@ -353,6 +354,9 @@ struct iwl3945_power_per_rate {
 #define POWER_TABLE_NUM_HT_OFDM_ENTRIES		32
 #define POWER_TABLE_CCK_ENTRY			32
 
+#define IWL_PWR_NUM_HT_OFDM_ENTRIES		24
+#define IWL_PWR_CCK_ENTRIES			2
+
 /**
  * union iwl4965_tx_power_dual_stream
  *
@@ -411,6 +415,16 @@ struct iwl5000_tx_power_dbm_cmd {
 	u8 reserved;
 } __attribute__ ((packed));
 
+/**
+ * Command TX_ANT_CONFIGURATION_CMD = 0x98
+ * This command is used to configure valid Tx antenna.
+ * By default uCode concludes the valid antenna according to the radio flavor.
+ * This command enables the driver to override/modify this conclusion.
+ */
+struct iwl_tx_ant_config_cmd {
+	__le32 valid;
+} __attribute__ ((packed));
+
 /******************************************************************************
  * (0a)
  * Alive and Error Commands & Responses:
@@ -793,7 +807,7 @@ struct iwl3945_channel_switch_cmd {
 	struct iwl3945_power_per_rate power[IWL_MAX_RATES];
 } __attribute__ ((packed));
 
-struct iwl_channel_switch_cmd {
+struct iwl4965_channel_switch_cmd {
 	u8 band;
 	u8 expect_beacon;
 	__le16 channel;
@@ -803,6 +817,48 @@ struct iwl_channel_switch_cmd {
 	struct iwl4965_tx_power_db tx_power;
 } __attribute__ ((packed));
 
+/**
+ * struct iwl5000_channel_switch_cmd
+ * @band: 0- 5.2GHz, 1- 2.4GHz
+ * @expect_beacon: 0- resume transmits after channel switch
+ *		   1- wait for beacon to resume transmits
+ * @channel: new channel number
+ * @rxon_flags: Rx on flags
+ * @rxon_filter_flags: filtering parameters
+ * @switch_time: switch time in extended beacon format
+ * @reserved: reserved bytes
+ */
+struct iwl5000_channel_switch_cmd {
+	u8 band;
+	u8 expect_beacon;
+	__le16 channel;
+	__le32 rxon_flags;
+	__le32 rxon_filter_flags;
+	__le32 switch_time;
+	__le32 reserved[2][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES];
+} __attribute__ ((packed));
+
+/**
+ * struct iwl6000_channel_switch_cmd
+ * @band: 0- 5.2GHz, 1- 2.4GHz
+ * @expect_beacon: 0- resume transmits after channel switch
+ *		   1- wait for beacon to resume transmits
+ * @channel: new channel number
+ * @rxon_flags: Rx on flags
+ * @rxon_filter_flags: filtering parameters
+ * @switch_time: switch time in extended beacon format
+ * @reserved: reserved bytes
+ */
+struct iwl6000_channel_switch_cmd {
+	u8 band;
+	u8 expect_beacon;
+	__le16 channel;
+	__le32 rxon_flags;
+	__le32 rxon_filter_flags;
+	__le32 switch_time;
+	__le32 reserved[3][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES];
+} __attribute__ ((packed));
+
 /*
  * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
  */
@@ -921,6 +977,7 @@ struct iwl_qosparam_cmd {
 #define	STA_MODIFY_TX_RATE_MSK		0x04
 #define STA_MODIFY_ADDBA_TID_MSK	0x08
 #define STA_MODIFY_DELBA_TID_MSK	0x10
+#define STA_MODIFY_SLEEP_TX_COUNT_MSK	0x20
 
 /* Receiver address (actually, Rx station's index into station table),
  * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
@@ -1051,7 +1108,14 @@ struct iwl4965_addsta_cmd {
 	 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
 	__le16 add_immediate_ba_ssn;
 
-	__le32 reserved2;
+	/*
+	 * Number of packets OK to transmit to station even though
+	 * it is asleep -- used to synchronise PS-poll and u-APSD
+	 * responses while ucode keeps track of STA sleep state.
+	 */
+	__le16 sleep_tx_count;
+
+	__le16 reserved2;
 } __attribute__ ((packed));
 
 /* 5000 */
@@ -1082,7 +1146,14 @@ struct iwl_addsta_cmd {
 	 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
 	__le16 add_immediate_ba_ssn;
 
-	__le32 reserved2;
+	/*
+	 * Number of packets OK to transmit to station even though
+	 * it is asleep -- used to synchronise PS-poll and u-APSD
+	 * responses while ucode keeps track of STA sleep state.
+	 */
+	__le16 sleep_tx_count;
+
+	__le16 reserved2;
 } __attribute__ ((packed));
 
 
@@ -1634,6 +1705,21 @@ enum {
 	TX_ABORT_REQUIRED_MSK = 0x80000000,	/* bits 31:31 */
 };
 
+static inline u32 iwl_tx_status_to_mac80211(u32 status)
+{
+	status &= TX_STATUS_MSK;
+
+	switch (status) {
+	case TX_STATUS_SUCCESS:
+	case TX_STATUS_DIRECT_DONE:
+		return IEEE80211_TX_STAT_ACK;
+	case TX_STATUS_FAIL_DEST_PS:
+		return IEEE80211_TX_STAT_TX_FILTERED;
+	default:
+		return 0;
+	}
+}
+
 static inline bool iwl_is_tx_success(u32 status)
 {
 	status &= TX_STATUS_MSK;
@@ -2162,6 +2248,19 @@ struct iwl_link_quality_cmd {
 	__le32 reserved2;
 } __attribute__ ((packed));
 
+#define BT_COEX_DISABLE (0x0)
+#define BT_COEX_MODE_2W (0x1)
+#define BT_COEX_MODE_3W (0x2)
+#define BT_COEX_MODE_4W (0x3)
+
+#define BT_LEAD_TIME_MIN (0x0)
+#define BT_LEAD_TIME_DEF (0x1E)
+#define BT_LEAD_TIME_MAX (0xFF)
+
+#define BT_MAX_KILL_MIN (0x1)
+#define BT_MAX_KILL_DEF (0x5)
+#define BT_MAX_KILL_MAX (0xFF)
+
 /*
  * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
  *
@@ -2497,9 +2596,10 @@ struct iwl_scan_channel {
 /**
  * struct iwl_ssid_ie - directed scan network information element
  *
- * Up to 4 of these may appear in REPLY_SCAN_CMD, selected by "type" field
- * in struct iwl_scan_channel; each channel may select different ssids from
- * among the 4 entries.  SSID IEs get transmitted in reverse order of entry.
+ * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in
+ * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel;
+ * each channel may select different ssids from among the 20 (4) entries.
+ * SSID IEs get transmitted in reverse order of entry.
  */
 struct iwl_ssid_ie {
 	u8 id;
@@ -3001,6 +3101,10 @@ struct statistics_general {
 	__le32 reserved3;
 } __attribute__ ((packed));
 
+#define UCODE_STATISTICS_CLEAR_MSK		(0x1 << 0)
+#define UCODE_STATISTICS_FREQUENCY_MSK		(0x1 << 1)
+#define UCODE_STATISTICS_NARROW_BAND_MSK	(0x1 << 2)
+
 /*
  * REPLY_STATISTICS_CMD = 0x9c,
  * 3945 and 4965 identical.
@@ -3237,12 +3341,6 @@ struct iwl_missed_beacon_notif {
  *   Lower values mean higher energy; this means making sure that the value
  *   in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy".
  *
- * Driver should set the following entries to fixed values:
- *
- *   HD_MIN_ENERGY_OFDM_DET_INDEX               100
- *   HD_BARKER_CORR_TH_ADD_MIN_INDEX            190
- *   HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX        390
- *   HD_OFDM_ENERGY_TH_IN_INDEX                  62
  */
 
 /*
@@ -3440,30 +3538,134 @@ struct iwl_led_cmd {
 } __attribute__ ((packed));
 
 /*
- * Coexistence WIFI/WIMAX  Command
- * COEX_PRIORITY_TABLE_CMD = 0x5a
- *
+ * station priority table entries
+ * also used as potential "events" value for both
+ * COEX_MEDIUM_NOTIFICATION and COEX_EVENT_CMD
+ */
+
+/*
+ * COEX events entry flag masks
+ * RP - Requested Priority
+ * WP - Win Medium Priority: priority assigned when the contention has been won
+ */
+#define COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG        (0x1)
+#define COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG        (0x2)
+#define COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG  (0x4)
+
+#define COEX_CU_UNASSOC_IDLE_RP               4
+#define COEX_CU_UNASSOC_MANUAL_SCAN_RP        4
+#define COEX_CU_UNASSOC_AUTO_SCAN_RP          4
+#define COEX_CU_CALIBRATION_RP                4
+#define COEX_CU_PERIODIC_CALIBRATION_RP       4
+#define COEX_CU_CONNECTION_ESTAB_RP           4
+#define COEX_CU_ASSOCIATED_IDLE_RP            4
+#define COEX_CU_ASSOC_MANUAL_SCAN_RP          4
+#define COEX_CU_ASSOC_AUTO_SCAN_RP            4
+#define COEX_CU_ASSOC_ACTIVE_LEVEL_RP         4
+#define COEX_CU_RF_ON_RP                      6
+#define COEX_CU_RF_OFF_RP                     4
+#define COEX_CU_STAND_ALONE_DEBUG_RP          6
+#define COEX_CU_IPAN_ASSOC_LEVEL_RP           4
+#define COEX_CU_RSRVD1_RP                     4
+#define COEX_CU_RSRVD2_RP                     4
+
+#define COEX_CU_UNASSOC_IDLE_WP               3
+#define COEX_CU_UNASSOC_MANUAL_SCAN_WP        3
+#define COEX_CU_UNASSOC_AUTO_SCAN_WP          3
+#define COEX_CU_CALIBRATION_WP                3
+#define COEX_CU_PERIODIC_CALIBRATION_WP       3
+#define COEX_CU_CONNECTION_ESTAB_WP           3
+#define COEX_CU_ASSOCIATED_IDLE_WP            3
+#define COEX_CU_ASSOC_MANUAL_SCAN_WP          3
+#define COEX_CU_ASSOC_AUTO_SCAN_WP            3
+#define COEX_CU_ASSOC_ACTIVE_LEVEL_WP         3
+#define COEX_CU_RF_ON_WP                      3
+#define COEX_CU_RF_OFF_WP                     3
+#define COEX_CU_STAND_ALONE_DEBUG_WP          6
+#define COEX_CU_IPAN_ASSOC_LEVEL_WP           3
+#define COEX_CU_RSRVD1_WP                     3
+#define COEX_CU_RSRVD2_WP                     3
+
+#define COEX_UNASSOC_IDLE_FLAGS                     0
+#define COEX_UNASSOC_MANUAL_SCAN_FLAGS		\
+	(COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG |	\
+	COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
+#define COEX_UNASSOC_AUTO_SCAN_FLAGS		\
+	(COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG |	\
+	COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
+#define COEX_CALIBRATION_FLAGS			\
+	(COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG |	\
+	COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
+#define COEX_PERIODIC_CALIBRATION_FLAGS             0
+/*
+ * COEX_CONNECTION_ESTAB:
+ * we need DELAY_MEDIUM_FREE_NTFY to let WiMAX disconnect from network.
+ */
+#define COEX_CONNECTION_ESTAB_FLAGS		\
+	(COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG |	\
+	COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG |	\
+	COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG)
+#define COEX_ASSOCIATED_IDLE_FLAGS                  0
+#define COEX_ASSOC_MANUAL_SCAN_FLAGS		\
+	(COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG |	\
+	COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
+#define COEX_ASSOC_AUTO_SCAN_FLAGS		\
+	(COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG |	\
+	 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
+#define COEX_ASSOC_ACTIVE_LEVEL_FLAGS               0
+#define COEX_RF_ON_FLAGS                            0
+#define COEX_RF_OFF_FLAGS                           0
+#define COEX_STAND_ALONE_DEBUG_FLAGS		\
+	(COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG |	\
+	 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
+#define COEX_IPAN_ASSOC_LEVEL_FLAGS		\
+	(COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG |	\
+	 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG |	\
+	 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG)
+#define COEX_RSRVD1_FLAGS                           0
+#define COEX_RSRVD2_FLAGS                           0
+/*
+ * COEX_CU_RF_ON is the event wrapping all radio ownership.
+ * We need DELAY_MEDIUM_FREE_NTFY to let WiMAX disconnect from network.
  */
+#define COEX_CU_RF_ON_FLAGS			\
+	(COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG |	\
+	 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG |	\
+	 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG)
+
+
 enum {
+	/* un-association part */
 	COEX_UNASSOC_IDLE		= 0,
 	COEX_UNASSOC_MANUAL_SCAN	= 1,
 	COEX_UNASSOC_AUTO_SCAN		= 2,
+	/* calibration */
 	COEX_CALIBRATION		= 3,
 	COEX_PERIODIC_CALIBRATION	= 4,
+	/* connection */
 	COEX_CONNECTION_ESTAB		= 5,
+	/* association part */
 	COEX_ASSOCIATED_IDLE		= 6,
 	COEX_ASSOC_MANUAL_SCAN		= 7,
 	COEX_ASSOC_AUTO_SCAN		= 8,
 	COEX_ASSOC_ACTIVE_LEVEL		= 9,
+	/* RF ON/OFF */
 	COEX_RF_ON			= 10,
 	COEX_RF_OFF			= 11,
 	COEX_STAND_ALONE_DEBUG		= 12,
+	/* IPAN */
 	COEX_IPAN_ASSOC_LEVEL		= 13,
+	/* reserved */
 	COEX_RSRVD1			= 14,
 	COEX_RSRVD2			= 15,
 	COEX_NUM_OF_EVENTS		= 16
 };
 
+/*
+ * Coexistence WIFI/WIMAX  Command
+ * COEX_PRIORITY_TABLE_CMD = 0x5a
+ *
+ */
 struct iwl_wimax_coex_event_entry {
 	u8 request_prio;
 	u8 win_medium_prio;
@@ -3488,6 +3690,55 @@ struct iwl_wimax_coex_cmd {
 	struct iwl_wimax_coex_event_entry sta_prio[COEX_NUM_OF_EVENTS];
 } __attribute__ ((packed));
 
+/*
+ * Coexistence MEDIUM NOTIFICATION
+ * COEX_MEDIUM_NOTIFICATION = 0x5b
+ *
+ * notification from uCode to host to indicate medium changes
+ *
+ */
+/*
+ * status field
+ * bit 0 - 2: medium status
+ * bit 3: medium change indication
+ * bit 4 - 31: reserved
+ */
+/* status option values, (0 - 2 bits) */
+#define COEX_MEDIUM_BUSY	(0x0) /* radio belongs to WiMAX */
+#define COEX_MEDIUM_ACTIVE	(0x1) /* radio belongs to WiFi */
+#define COEX_MEDIUM_PRE_RELEASE	(0x2) /* received radio release */
+#define COEX_MEDIUM_MSK		(0x7)
+
+/* send notification status (1 bit) */
+#define COEX_MEDIUM_CHANGED	(0x8)
+#define COEX_MEDIUM_CHANGED_MSK	(0x8)
+#define COEX_MEDIUM_SHIFT	(3)
+
+struct iwl_coex_medium_notification {
+	__le32 status;
+	__le32 events;
+} __attribute__ ((packed));
+
+/*
+ * Coexistence EVENT  Command
+ * COEX_EVENT_CMD = 0x5c
+ *
+ * send from host to uCode for coex event request.
+ */
+/* flags options */
+#define COEX_EVENT_REQUEST_MSK	(0x1)
+
+struct iwl_coex_event_cmd {
+	u8 flags;
+	u8 event;
+	__le16 reserved;
+} __attribute__ ((packed));
+
+struct iwl_coex_event_resp {
+	__le32 status;
+} __attribute__ ((packed));
+
+
 /******************************************************************************
  * (13)
  * Union of all expected notifications/responses:
@@ -3495,6 +3746,16 @@ struct iwl_wimax_coex_cmd {
  *****************************************************************************/
 
 struct iwl_rx_packet {
+	/*
+	 * The first 4 bytes of the RX frame header contain both the RX frame
+	 * size and some flags.
+	 * Bit fields:
+	 * 31:    flag flush RB request
+	 * 30:    flag ignore TC (terminal counter) request
+	 * 29:    flag fast IRQ request
+	 * 28-14: Reserved
+	 * 13-00: RX frame size
+	 */
 	__le32 len_n_flags;
 	struct iwl_cmd_header hdr;
 	union {
@@ -3514,6 +3775,8 @@ struct iwl_rx_packet {
 		struct iwl_notif_statistics stats;
 		struct iwl_compressed_ba_resp compressed_ba;
 		struct iwl_missed_beacon_notif missed_beacon;
+		struct iwl_coex_medium_notification coex_medium_notif;
+		struct iwl_coex_event_resp coex_event;
 		__le32 status;
 		u8 raw[0];
 	} u;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 2dc928755454..574d36658702 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -47,6 +47,37 @@ MODULE_VERSION(IWLWIFI_VERSION);
 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
 MODULE_LICENSE("GPL");
 
+static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
+	{COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
+	 0, COEX_UNASSOC_IDLE_FLAGS},
+	{COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
+	 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
+	{COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
+	 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
+	{COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
+	 0, COEX_CALIBRATION_FLAGS},
+	{COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
+	 0, COEX_PERIODIC_CALIBRATION_FLAGS},
+	{COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
+	 0, COEX_CONNECTION_ESTAB_FLAGS},
+	{COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
+	 0, COEX_ASSOCIATED_IDLE_FLAGS},
+	{COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
+	 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
+	{COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
+	 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
+	{COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
+	 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
+	{COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
+	{COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
+	{COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
+	 0, COEX_STAND_ALONE_DEBUG_FLAGS},
+	{COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
+	 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
+	{COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
+	{COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
+};
+
 #define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np)    \
 	[IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP,      \
 				    IWL_RATE_SISO_##s##M_PLCP, \
@@ -178,6 +209,7 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant)
 	}
 	return ant;
 }
+EXPORT_SYMBOL(iwl_toggle_tx_ant);
 
 const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
 EXPORT_SYMBOL(iwl_bcast_addr);
@@ -224,7 +256,10 @@ int iwl_hw_nic_init(struct iwl_priv *priv)
 	/* nic_init */
 	spin_lock_irqsave(&priv->lock, flags);
 	priv->cfg->ops->lib->apm_ops.init(priv);
-	iwl_write32(priv, CSR_INT_COALESCING, 512 / 32);
+
+	/* Set interrupt coalescing timer to 512 usecs */
+	iwl_write8(priv, CSR_INT_COALESCING, 512 / 32);
+
 	spin_unlock_irqrestore(&priv->lock, flags);
 
 	ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
@@ -416,8 +451,7 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
 		ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
 	ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
 	ht_info->cap |= (IEEE80211_HT_CAP_SM_PS &
-			     (WLAN_HT_CAP_SM_PS_DISABLED << 2));
-
+			     (priv->cfg->sm_ps_mode << 2));
 	max_bit_rate = MAX_BIT_RATE_20_MHZ;
 	if (priv->hw_params.ht40_channel & BIT(band)) {
 		ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
@@ -452,28 +486,6 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
 	}
 }
 
-static void iwlcore_init_hw_rates(struct iwl_priv *priv,
-			      struct ieee80211_rate *rates)
-{
-	int i;
-
-	for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
-		rates[i].bitrate = iwl_rates[i].ieee * 5;
-		rates[i].hw_value = i; /* Rate scaling will work on indexes */
-		rates[i].hw_value_short = i;
-		rates[i].flags = 0;
-		if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
-			/*
-			 * If CCK != 1M then set short preamble rate flag.
-			 */
-			rates[i].flags |=
-				(iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ?
-					0 : IEEE80211_RATE_SHORT_PREAMBLE;
-		}
-	}
-}
-
-
 /**
  * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom
  */
@@ -605,11 +617,27 @@ void iwlcore_free_geos(struct iwl_priv *priv)
 }
 EXPORT_SYMBOL(iwlcore_free_geos);
 
+/*
+ *  iwlcore_rts_tx_cmd_flag: Set rts/cts. 3945 and 4965 only share this
+ *  function.
+ */
+void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
+				__le32 *tx_flags)
+{
+	if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
+		*tx_flags |= TX_CMD_FLG_RTS_MSK;
+		*tx_flags &= ~TX_CMD_FLG_CTS_MSK;
+	} else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+		*tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+		*tx_flags |= TX_CMD_FLG_CTS_MSK;
+	}
+}
+EXPORT_SYMBOL(iwlcore_rts_tx_cmd_flag);
+
 static bool is_single_rx_stream(struct iwl_priv *priv)
 {
 	return !priv->current_ht_config.is_ht ||
-	       ((priv->current_ht_config.mcs.rx_mask[1] == 0) &&
-		(priv->current_ht_config.mcs.rx_mask[2] == 0));
+	       priv->current_ht_config.single_chain_sufficient;
 }
 
 static u8 iwl_is_channel_extension(struct iwl_priv *priv,
@@ -635,10 +663,9 @@ static u8 iwl_is_channel_extension(struct iwl_priv *priv,
 u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
 			 struct ieee80211_sta_ht_cap *sta_ht_inf)
 {
-	struct iwl_ht_info *iwl_ht_conf = &priv->current_ht_config;
+	struct iwl_ht_config *ht_conf = &priv->current_ht_config;
 
-	if ((!iwl_ht_conf->is_ht) ||
-	    (iwl_ht_conf->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ))
+	if (!ht_conf->is_ht || !ht_conf->is_40mhz)
 		return 0;
 
 	/* We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
@@ -654,7 +681,7 @@ u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
 #endif
 	return iwl_is_channel_extension(priv, priv->band,
 			le16_to_cpu(priv->staging_rxon.channel),
-			iwl_ht_conf->extension_chan_offset);
+			ht_conf->extension_chan_offset);
 }
 EXPORT_SYMBOL(iwl_is_ht40_tx_allowed);
 
@@ -878,11 +905,11 @@ u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv)
 }
 EXPORT_SYMBOL(iwl_rate_get_lowest_plcp);
 
-void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
+void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
 {
 	struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
 
-	if (!ht_info->is_ht) {
+	if (!ht_conf->is_ht) {
 		rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
 			RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
 			RXON_FLG_HT40_PROT_MSK |
@@ -893,7 +920,7 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
 	/* FIXME: if the definition of ht_protection changed, the "translation"
 	 * will be needed for rxon->flags
 	 */
-	rxon->flags |= cpu_to_le32(ht_info->ht_protection << RXON_FLG_HT_OPERATING_MODE_POS);
+	rxon->flags |= cpu_to_le32(ht_conf->ht_protection << RXON_FLG_HT_OPERATING_MODE_POS);
 
 	/* Set up channel bandwidth:
 	 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
@@ -902,10 +929,10 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
 			 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
 	if (iwl_is_ht40_tx_allowed(priv, NULL)) {
 		/* pure ht40 */
-		if (ht_info->ht_protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
+		if (ht_conf->ht_protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
 			rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
 			/* Note: control channel is opposite of extension channel */
-			switch (ht_info->extension_chan_offset) {
+			switch (ht_conf->extension_chan_offset) {
 			case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
 				rxon->flags &= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
 				break;
@@ -915,7 +942,7 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
 			}
 		} else {
 			/* Note: control channel is opposite of extension channel */
-			switch (ht_info->extension_chan_offset) {
+			switch (ht_conf->extension_chan_offset) {
 			case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
 				rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
 				rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
@@ -938,14 +965,10 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
 	if (priv->cfg->ops->hcmd->set_rxon_chain)
 		priv->cfg->ops->hcmd->set_rxon_chain(priv);
 
-	IWL_DEBUG_ASSOC(priv, "supported HT rate 0x%X 0x%X 0x%X "
-			"rxon flags 0x%X operation mode :0x%X "
+	IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
 			"extension channel offset 0x%x\n",
-			ht_info->mcs.rx_mask[0],
-			ht_info->mcs.rx_mask[1],
-			ht_info->mcs.rx_mask[2],
-			le32_to_cpu(rxon->flags), ht_info->ht_protection,
-			ht_info->extension_chan_offset);
+			le32_to_cpu(rxon->flags), ht_conf->ht_protection,
+			ht_conf->extension_chan_offset);
 	return;
 }
 EXPORT_SYMBOL(iwl_set_rxon_ht);
@@ -955,44 +978,50 @@ EXPORT_SYMBOL(iwl_set_rxon_ht);
 #define IWL_NUM_IDLE_CHAINS_DUAL	2
 #define IWL_NUM_IDLE_CHAINS_SINGLE	1
 
-/* Determine how many receiver/antenna chains to use.
- * More provides better reception via diversity.  Fewer saves power.
+/*
+ * Determine how many receiver/antenna chains to use.
+ *
+ * More provides better reception via diversity.  Fewer saves power
+ * at the expense of throughput, but only when not in powersave to
+ * start with.
+ *
  * MIMO (dual stream) requires at least 2, but works better with 3.
  * This does not determine *which* chains to use, just how many.
  */
 static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
 {
-	bool is_single = is_single_rx_stream(priv);
-	bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
-
 	/* # of Rx chains to use when expecting MIMO. */
-	if (is_single || (!is_cam && (priv->current_ht_config.sm_ps ==
-						 WLAN_HT_CAP_SM_PS_STATIC)))
+	if (is_single_rx_stream(priv))
 		return IWL_NUM_RX_CHAINS_SINGLE;
 	else
 		return IWL_NUM_RX_CHAINS_MULTIPLE;
 }
 
+/*
+ * When we are in power saving mode, unless device support spatial
+ * multiplexing power save, use the active count for rx chain count.
+ */
 static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
 {
-	int idle_cnt;
+	int idle_cnt = active_cnt;
 	bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
+
 	/* # Rx chains when idling and maybe trying to save power */
-	switch (priv->current_ht_config.sm_ps) {
+	switch (priv->cfg->sm_ps_mode) {
 	case WLAN_HT_CAP_SM_PS_STATIC:
+		idle_cnt = (is_cam) ? active_cnt : IWL_NUM_IDLE_CHAINS_SINGLE;
+		break;
 	case WLAN_HT_CAP_SM_PS_DYNAMIC:
 		idle_cnt = (is_cam) ? IWL_NUM_IDLE_CHAINS_DUAL :
-					IWL_NUM_IDLE_CHAINS_SINGLE;
+			IWL_NUM_IDLE_CHAINS_SINGLE;
 		break;
 	case WLAN_HT_CAP_SM_PS_DISABLED:
-		idle_cnt = (is_cam) ? active_cnt : IWL_NUM_IDLE_CHAINS_SINGLE;
 		break;
 	case WLAN_HT_CAP_SM_PS_INVALID:
 	default:
-		IWL_ERR(priv, "invalid mimo ps mode %d\n",
-			   priv->current_ht_config.sm_ps);
+		IWL_ERR(priv, "invalid sm_ps mode %u\n",
+			priv->cfg->sm_ps_mode);
 		WARN_ON(1);
-		idle_cnt = -1;
 		break;
 	}
 	return idle_cnt;
@@ -1005,7 +1034,7 @@ static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
 	res = (chain_bitmap & BIT(0)) >> 0;
 	res += (chain_bitmap & BIT(1)) >> 1;
 	res += (chain_bitmap & BIT(2)) >> 2;
-	res += (chain_bitmap & BIT(4)) >> 4;
+	res += (chain_bitmap & BIT(3)) >> 3;
 	return res;
 }
 
@@ -1281,18 +1310,28 @@ static void iwl_set_rate(struct iwl_priv *priv)
 
 void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon;
 	struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
-	IWL_DEBUG_11H(priv, "CSA notif: channel %d, status %d\n",
-		      le16_to_cpu(csa->channel), le32_to_cpu(csa->status));
-	rxon->channel = csa->channel;
-	priv->staging_rxon.channel = csa->channel;
+
+	if (priv->switch_rxon.switch_in_progress) {
+		if (!le32_to_cpu(csa->status) &&
+		    (csa->channel == priv->switch_rxon.channel)) {
+			rxon->channel = csa->channel;
+			priv->staging_rxon.channel = csa->channel;
+			IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
+			      le16_to_cpu(csa->channel));
+		} else
+			IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
+			      le16_to_cpu(csa->channel));
+
+		priv->switch_rxon.switch_in_progress = false;
+	}
 }
 EXPORT_SYMBOL(iwl_rx_csa);
 
 #ifdef CONFIG_IWLWIFI_DEBUG
-static void iwl_print_rx_config_cmd(struct iwl_priv *priv)
+void iwl_print_rx_config_cmd(struct iwl_priv *priv)
 {
 	struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
 
@@ -1310,6 +1349,7 @@ static void iwl_print_rx_config_cmd(struct iwl_priv *priv)
 	IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
 	IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
 }
+EXPORT_SYMBOL(iwl_print_rx_config_cmd);
 #endif
 /**
  * iwl_irq_handle_error - called for HW or SW error interrupt from card
@@ -1322,12 +1362,11 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
 	/* Cancel currently queued command. */
 	clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
 
+	priv->cfg->ops->lib->dump_nic_error_log(priv);
+	priv->cfg->ops->lib->dump_nic_event_log(priv, false);
 #ifdef CONFIG_IWLWIFI_DEBUG
-	if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) {
-		priv->cfg->ops->lib->dump_nic_error_log(priv);
-		priv->cfg->ops->lib->dump_nic_event_log(priv);
+	if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)
 		iwl_print_rx_config_cmd(priv);
-	}
 #endif
 
 	wake_up_interruptible(&priv->wait_command_queue);
@@ -1346,6 +1385,160 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
 }
 EXPORT_SYMBOL(iwl_irq_handle_error);
 
+int iwl_apm_stop_master(struct iwl_priv *priv)
+{
+	int ret = 0;
+
+	/* stop device's busmaster DMA activity */
+	iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
+
+	ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
+			CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
+	if (ret)
+		IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
+
+	IWL_DEBUG_INFO(priv, "stop master\n");
+
+	return ret;
+}
+EXPORT_SYMBOL(iwl_apm_stop_master);
+
+void iwl_apm_stop(struct iwl_priv *priv)
+{
+	IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
+
+	/* Stop device's DMA activity */
+	iwl_apm_stop_master(priv);
+
+	/* Reset the entire device */
+	iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+	udelay(10);
+
+	/*
+	 * Clear "initialization complete" bit to move adapter from
+	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+	 */
+	iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+}
+EXPORT_SYMBOL(iwl_apm_stop);
+
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via iwl_apm_stop())
+ * NOTE:  This does not load uCode nor start the embedded processor
+ */
+int iwl_apm_init(struct iwl_priv *priv)
+{
+	int ret = 0;
+	u16 lctl;
+
+	IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
+
+	/*
+	 * Use "set_bit" below rather than "write", to preserve any hardware
+	 * bits already set by default after reset.
+	 */
+
+	/* Disable L0S exit timer (platform NMI Work/Around) */
+	iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
+			  CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+
+	/*
+	 * Disable L0s without affecting L1;
+	 *  don't wait for ICH L0s (ICH bug W/A)
+	 */
+	iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
+			  CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+	/* Set FH wait threshold to maximum (HW error during stress W/A) */
+	iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
+
+	/*
+	 * Enable HAP INTA (interrupt from management bus) to
+	 * wake device's PCI Express link L1a -> L0s
+	 * NOTE:  This is no-op for 3945 (non-existant bit)
+	 */
+	iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+				    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
+	/*
+	 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
+	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
+	 * If so (likely), disable L0S, so device moves directly L0->L1;
+	 *    costs negligible amount of power savings.
+	 * If not (unlikely), enable L0S, so there is at least some
+	 *    power savings, even without L1.
+	 */
+	if (priv->cfg->set_l0s) {
+		lctl = iwl_pcie_link_ctl(priv);
+		if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
+					PCI_CFG_LINK_CTRL_VAL_L1_EN) {
+			/* L1-ASPM enabled; disable(!) L0S  */
+			iwl_set_bit(priv, CSR_GIO_REG,
+					CSR_GIO_REG_VAL_L0S_ENABLED);
+			IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
+		} else {
+			/* L1-ASPM disabled; enable(!) L0S */
+			iwl_clear_bit(priv, CSR_GIO_REG,
+					CSR_GIO_REG_VAL_L0S_ENABLED);
+			IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
+		}
+	}
+
+	/* Configure analog phase-lock-loop before activating to D0A */
+	if (priv->cfg->pll_cfg_val)
+		iwl_set_bit(priv, CSR_ANA_PLL_CFG, priv->cfg->pll_cfg_val);
+
+	/*
+	 * Set "initialization complete" bit to move adapter from
+	 * D0U* --> D0A* (powered-up active) state.
+	 */
+	iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+	/*
+	 * Wait for clock stabilization; once stabilized, access to
+	 * device-internal resources is supported, e.g. iwl_write_prph()
+	 * and accesses to uCode SRAM.
+	 */
+	ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
+			CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+			CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
+	if (ret < 0) {
+		IWL_DEBUG_INFO(priv, "Failed to init the card\n");
+		goto out;
+	}
+
+	/*
+	 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
+	 * BSM (Boostrap State Machine) is only in 3945 and 4965;
+	 * later devices (i.e. 5000 and later) have non-volatile SRAM,
+	 * and don't need BSM to restore data after power-saving sleep.
+	 *
+	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
+	 * do not disable clocks.  This preserves any hardware bits already
+	 * set by default in "CLK_CTRL_REG" after reset.
+	 */
+	if (priv->cfg->use_bsm)
+		iwl_write_prph(priv, APMG_CLK_EN_REG,
+			APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
+	else
+		iwl_write_prph(priv, APMG_CLK_EN_REG,
+			APMG_CLK_VAL_DMA_CLK_RQT);
+	udelay(20);
+
+	/* Disable L1-Active */
+	iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
+			  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+
+out:
+	return ret;
+}
+EXPORT_SYMBOL(iwl_apm_init);
+
+
+
 void iwl_configure_filter(struct ieee80211_hw *hw,
 			  unsigned int changed_flags,
 			  unsigned int *total_flags,
@@ -1393,73 +1586,14 @@ void iwl_configure_filter(struct ieee80211_hw *hw,
 }
 EXPORT_SYMBOL(iwl_configure_filter);
 
-int iwl_setup_mac(struct iwl_priv *priv)
-{
-	int ret;
-	struct ieee80211_hw *hw = priv->hw;
-	hw->rate_control_algorithm = "iwl-agn-rs";
-
-	/* Tell mac80211 our characteristics */
-	hw->flags = IEEE80211_HW_SIGNAL_DBM |
-		    IEEE80211_HW_NOISE_DBM |
-		    IEEE80211_HW_AMPDU_AGGREGATION |
-		    IEEE80211_HW_SPECTRUM_MGMT;
-
-	if (!priv->cfg->broken_powersave)
-		hw->flags |= IEEE80211_HW_SUPPORTS_PS |
-			     IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
-
-	hw->wiphy->interface_modes =
-		BIT(NL80211_IFTYPE_STATION) |
-		BIT(NL80211_IFTYPE_ADHOC);
-
-	hw->wiphy->custom_regulatory = true;
-
-	/* Firmware does not support this */
-	hw->wiphy->disable_beacon_hints = true;
-
-	/*
-	 * For now, disable PS by default because it affects
-	 * RX performance significantly.
-	 */
-	hw->wiphy->ps_default = false;
-
-	hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
-	/* we create the 802.11 header and a zero-length SSID element */
-	hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
-
-	/* Default value; 4 EDCA QOS priorities */
-	hw->queues = 4;
-
-	hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
-
-	if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
-		priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-			&priv->bands[IEEE80211_BAND_2GHZ];
-	if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
-		priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
-			&priv->bands[IEEE80211_BAND_5GHZ];
-
-	ret = ieee80211_register_hw(priv->hw);
-	if (ret) {
-		IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
-		return ret;
-	}
-	priv->mac80211_registered = 1;
-
-	return 0;
-}
-EXPORT_SYMBOL(iwl_setup_mac);
-
 int iwl_set_hw_params(struct iwl_priv *priv)
 {
 	priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
 	priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
 	if (priv->cfg->mod_params->amsdu_size_8K)
-		priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_8K;
+		priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
 	else
-		priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_4K;
-	priv->hw_params.max_pkt_size = priv->hw_params.rx_buf_size - 256;
+		priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
 
 	priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
 
@@ -1471,71 +1605,6 @@ int iwl_set_hw_params(struct iwl_priv *priv)
 }
 EXPORT_SYMBOL(iwl_set_hw_params);
 
-int iwl_init_drv(struct iwl_priv *priv)
-{
-	int ret;
-
-	priv->ibss_beacon = NULL;
-
-	spin_lock_init(&priv->lock);
-	spin_lock_init(&priv->sta_lock);
-	spin_lock_init(&priv->hcmd_lock);
-
-	INIT_LIST_HEAD(&priv->free_frames);
-
-	mutex_init(&priv->mutex);
-
-	/* Clear the driver's (not device's) station table */
-	iwl_clear_stations_table(priv);
-
-	priv->data_retry_limit = -1;
-	priv->ieee_channels = NULL;
-	priv->ieee_rates = NULL;
-	priv->band = IEEE80211_BAND_2GHZ;
-
-	priv->iw_mode = NL80211_IFTYPE_STATION;
-
-	priv->current_ht_config.sm_ps = WLAN_HT_CAP_SM_PS_DISABLED;
-
-	/* Choose which receivers/antennas to use */
-	if (priv->cfg->ops->hcmd->set_rxon_chain)
-		priv->cfg->ops->hcmd->set_rxon_chain(priv);
-
-	iwl_init_scan_params(priv);
-
-	iwl_reset_qos(priv);
-
-	priv->qos_data.qos_active = 0;
-	priv->qos_data.qos_cap.val = 0;
-
-	priv->rates_mask = IWL_RATES_MASK;
-	/* Set the tx_power_user_lmt to the lowest power level
-	 * this value will get overwritten by channel max power avg
-	 * from eeprom */
-	priv->tx_power_user_lmt = IWL_TX_POWER_TARGET_POWER_MIN;
-
-	ret = iwl_init_channel_map(priv);
-	if (ret) {
-		IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
-		goto err;
-	}
-
-	ret = iwlcore_init_geos(priv);
-	if (ret) {
-		IWL_ERR(priv, "initializing geos failed: %d\n", ret);
-		goto err_free_channel_map;
-	}
-	iwlcore_init_hw_rates(priv, priv->ieee_rates);
-
-	return 0;
-
-err_free_channel_map:
-	iwl_free_channel_map(priv);
-err:
-	return ret;
-}
-EXPORT_SYMBOL(iwl_init_drv);
-
 int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
 {
 	int ret = 0;
@@ -1583,15 +1652,6 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
 }
 EXPORT_SYMBOL(iwl_set_tx_power);
 
-void iwl_uninit_drv(struct iwl_priv *priv)
-{
-	iwl_calib_free_results(priv);
-	iwlcore_free_geos(priv);
-	iwl_free_channel_map(priv);
-	kfree(priv->scan);
-}
-EXPORT_SYMBOL(iwl_uninit_drv);
-
 #define ICT_COUNT (PAGE_SIZE/sizeof(u32))
 
 /* Free dram table */
@@ -1915,9 +1975,9 @@ EXPORT_SYMBOL(iwl_isr_legacy);
 int iwl_send_bt_config(struct iwl_priv *priv)
 {
 	struct iwl_bt_cmd bt_cmd = {
-		.flags = 3,
-		.lead_time = 0xAA,
-		.max_kill = 1,
+		.flags = BT_COEX_MODE_4W,
+		.lead_time = BT_LEAD_TIME_DEF,
+		.max_kill = BT_MAX_KILL_DEF,
 		.kill_ack_mask = 0,
 		.kill_cts_mask = 0,
 	};
@@ -1927,16 +1987,21 @@ int iwl_send_bt_config(struct iwl_priv *priv)
 }
 EXPORT_SYMBOL(iwl_send_bt_config);
 
-int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags)
+int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
 {
-	u32 stat_flags = 0;
-	struct iwl_host_cmd cmd = {
-		.id = REPLY_STATISTICS_CMD,
-		.flags = flags,
-		.len = sizeof(stat_flags),
-		.data = (u8 *) &stat_flags,
+	struct iwl_statistics_cmd statistics_cmd = {
+		.configuration_flags =
+			clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
 	};
-	return iwl_send_cmd(priv, &cmd);
+
+	if (flags & CMD_ASYNC)
+		return iwl_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
+					       sizeof(struct iwl_statistics_cmd),
+					       &statistics_cmd, NULL);
+	else
+		return iwl_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
+					sizeof(struct iwl_statistics_cmd),
+					&statistics_cmd);
 }
 EXPORT_SYMBOL(iwl_send_statistics_request);
 
@@ -2077,10 +2142,7 @@ void iwl_rf_kill_ct_config(struct iwl_priv *priv)
 	spin_unlock_irqrestore(&priv->lock, flags);
 	priv->thermal_throttle.ct_kill_toggle = false;
 
-	switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
-	case CSR_HW_REV_TYPE_1000:
-	case CSR_HW_REV_TYPE_6x00:
-	case CSR_HW_REV_TYPE_6x50:
+	if (priv->cfg->support_ct_kill_exit) {
 		adv_cmd.critical_temperature_enter =
 			cpu_to_le32(priv->hw_params.ct_kill_threshold);
 		adv_cmd.critical_temperature_exit =
@@ -2097,8 +2159,7 @@ void iwl_rf_kill_ct_config(struct iwl_priv *priv)
 					"exit is %d\n",
 				       priv->hw_params.ct_kill_threshold,
 				       priv->hw_params.ct_kill_exit_threshold);
-		break;
-	default:
+	} else {
 		cmd.critical_temperature_R =
 			cpu_to_le32(priv->hw_params.ct_kill_threshold);
 
@@ -2111,7 +2172,6 @@ void iwl_rf_kill_ct_config(struct iwl_priv *priv)
 					"succeeded, "
 					"critical temperature is %d\n",
 					priv->hw_params.ct_kill_threshold);
-		break;
 	}
 }
 EXPORT_SYMBOL(iwl_rf_kill_ct_config);
@@ -2143,7 +2203,7 @@ void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
 			   struct iwl_rx_mem_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
 	IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
 		     sleep->pm_sleep_mode, sleep->pm_wakeup_src);
@@ -2154,7 +2214,7 @@ EXPORT_SYMBOL(iwl_rx_pm_sleep_notif);
 void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
 				      struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
 	IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
 			"notification for %s:\n", len,
@@ -2166,7 +2226,7 @@ EXPORT_SYMBOL(iwl_rx_pm_debug_statistics_notif);
 void iwl_rx_reply_error(struct iwl_priv *priv,
 			struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 
 	IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
 		"seq 0x%04X ser 0x%08X\n",
@@ -2228,42 +2288,58 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
 EXPORT_SYMBOL(iwl_mac_conf_tx);
 
 static void iwl_ht_conf(struct iwl_priv *priv,
-			    struct ieee80211_bss_conf *bss_conf)
+			struct ieee80211_bss_conf *bss_conf)
 {
-	struct ieee80211_sta_ht_cap *ht_conf;
-	struct iwl_ht_info *iwl_conf = &priv->current_ht_config;
+	struct iwl_ht_config *ht_conf = &priv->current_ht_config;
 	struct ieee80211_sta *sta;
 
 	IWL_DEBUG_MAC80211(priv, "enter: \n");
 
-	if (!iwl_conf->is_ht)
+	if (!ht_conf->is_ht)
 		return;
 
+	ht_conf->ht_protection =
+		bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+	ht_conf->non_GF_STA_present =
+		!!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
 
-	/*
-	 * It is totally wrong to base global information on something
-	 * that is valid only when associated, alas, this driver works
-	 * that way and I don't know how to fix it.
-	 */
+	ht_conf->single_chain_sufficient = false;
 
-	rcu_read_lock();
-	sta = ieee80211_find_sta(priv->hw, priv->bssid);
-	if (!sta) {
+	switch (priv->iw_mode) {
+	case NL80211_IFTYPE_STATION:
+		rcu_read_lock();
+		sta = ieee80211_find_sta(priv->vif, priv->bssid);
+		if (sta) {
+			struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+			int maxstreams;
+
+			maxstreams = (ht_cap->mcs.tx_params &
+				      IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
+					>> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
+			maxstreams += 1;
+
+			if ((ht_cap->mcs.rx_mask[1] == 0) &&
+			    (ht_cap->mcs.rx_mask[2] == 0))
+				ht_conf->single_chain_sufficient = true;
+			if (maxstreams <= 1)
+				ht_conf->single_chain_sufficient = true;
+		} else {
+			/*
+			 * If at all, this can only happen through a race
+			 * when the AP disconnects us while we're still
+			 * setting up the connection, in that case mac80211
+			 * will soon tell us about that.
+			 */
+			ht_conf->single_chain_sufficient = true;
+		}
 		rcu_read_unlock();
-		return;
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		ht_conf->single_chain_sufficient = true;
+		break;
+	default:
+		break;
 	}
-	ht_conf = &sta->ht_cap;
-
-	iwl_conf->sm_ps = (u8)((ht_conf->cap & IEEE80211_HT_CAP_SM_PS) >> 2);
-
-	memcpy(&iwl_conf->mcs, &ht_conf->mcs, 16);
-
-	iwl_conf->ht_protection =
-		bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
-	iwl_conf->non_GF_STA_present =
-		!!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
-
-	rcu_read_unlock();
 
 	IWL_DEBUG_MAC80211(priv, "leave\n");
 }
@@ -2387,6 +2463,8 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
 			priv->timestamp = bss_conf->timestamp;
 			priv->assoc_capability = bss_conf->assoc_capability;
 
+			iwl_led_associate(priv);
+
 			/*
 			 * We have just associated, don't start scan too early
 			 * leave time for EAPOL exchange to complete.
@@ -2397,9 +2475,20 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
 					IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
 			if (!iwl_is_rfkill(priv))
 				priv->cfg->ops->lib->post_associate(priv);
-		} else
+		} else {
 			priv->assoc_id = 0;
+			iwl_led_disassociate(priv);
 
+			/*
+			 * inform the ucode that there is no longer an
+			 * association and that no more packets should be
+			 * send
+			 */
+			priv->staging_rxon.filter_flags &=
+				~RXON_FILTER_ASSOC_MSK;
+			priv->staging_rxon.assoc_id = 0;
+			iwlcore_commit_rxon(priv);
+		}
 	}
 
 	if (changes && iwl_is_associated(priv) && priv->assoc_id) {
@@ -2414,6 +2503,14 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
 		}
 	}
 
+	if ((changes & BSS_CHANGED_BEACON_ENABLED) &&
+	    vif->bss_conf.enable_beacon) {
+		memcpy(priv->staging_rxon.bssid_addr,
+		       bss_conf->bssid, ETH_ALEN);
+		memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
+		iwlcore_config_ap(priv);
+	}
+
 	mutex_unlock(&priv->mutex);
 
 	IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -2570,7 +2667,7 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
 	struct iwl_priv *priv = hw->priv;
 	const struct iwl_channel_info *ch_info;
 	struct ieee80211_conf *conf = &hw->conf;
-	struct iwl_ht_info *ht_conf = &priv->current_ht_config;
+	struct iwl_ht_config *ht_conf = &priv->current_ht_config;
 	unsigned long flags = 0;
 	int ret = 0;
 	u16 ch;
@@ -2620,21 +2717,18 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
 			if (conf_is_ht40_minus(conf)) {
 				ht_conf->extension_chan_offset =
 					IEEE80211_HT_PARAM_CHA_SEC_BELOW;
-				ht_conf->supported_chan_width =
-					IWL_CHANNEL_WIDTH_40MHZ;
+				ht_conf->is_40mhz = true;
 			} else if (conf_is_ht40_plus(conf)) {
 				ht_conf->extension_chan_offset =
 					IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
-				ht_conf->supported_chan_width =
-					IWL_CHANNEL_WIDTH_40MHZ;
+				ht_conf->is_40mhz = true;
 			} else {
 				ht_conf->extension_chan_offset =
 					IEEE80211_HT_PARAM_CHA_SEC_NONE;
-				ht_conf->supported_chan_width =
-					IWL_CHANNEL_WIDTH_20MHZ;
+				ht_conf->is_40mhz = false;
 			}
 		} else
-			ht_conf->supported_chan_width = IWL_CHANNEL_WIDTH_20MHZ;
+			ht_conf->is_40mhz = false;
 		/* Default to no protection. Protection mode will later be set
 		 * from BSS config in iwl_ht_conf */
 		ht_conf->ht_protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
@@ -2649,6 +2743,22 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
 
 		iwl_set_flags_for_band(priv, conf->channel->band);
 		spin_unlock_irqrestore(&priv->lock, flags);
+		if (iwl_is_associated(priv) &&
+		    (le16_to_cpu(priv->active_rxon.channel) != ch) &&
+		    priv->cfg->ops->lib->set_channel_switch) {
+			iwl_set_rate(priv);
+			/*
+			 * at this point, staging_rxon has the
+			 * configuration for channel switch
+			 */
+			ret = priv->cfg->ops->lib->set_channel_switch(priv,
+				ch);
+			if (!ret) {
+				iwl_print_rx_config_cmd(priv);
+				goto out;
+			}
+			priv->switch_rxon.switch_in_progress = false;
+		}
  set_ch_out:
 		/* The list of supported rates and rate mask can be different
 		 * for each band; since the band may have changed, reset
@@ -2656,7 +2766,8 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
 		iwl_set_rate(priv);
 	}
 
-	if (changed & IEEE80211_CONF_CHANGE_PS) {
+	if (changed & (IEEE80211_CONF_CHANGE_PS |
+			IEEE80211_CONF_CHANGE_IDLE)) {
 		ret = iwl_power_update_mode(priv, false);
 		if (ret)
 			IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
@@ -2740,7 +2851,7 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
 	IWL_DEBUG_MAC80211(priv, "enter\n");
 
 	spin_lock_irqsave(&priv->lock, flags);
-	memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_info));
+	memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
 	spin_unlock_irqrestore(&priv->lock, flags);
 
 	iwl_reset_qos(priv);
@@ -2792,6 +2903,55 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
 }
 EXPORT_SYMBOL(iwl_mac_reset_tsf);
 
+int iwl_alloc_txq_mem(struct iwl_priv *priv)
+{
+	if (!priv->txq)
+		priv->txq = kzalloc(
+			sizeof(struct iwl_tx_queue) * priv->cfg->num_of_queues,
+			GFP_KERNEL);
+	if (!priv->txq) {
+		IWL_ERR(priv, "Not enough memory for txq \n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(iwl_alloc_txq_mem);
+
+void iwl_free_txq_mem(struct iwl_priv *priv)
+{
+	kfree(priv->txq);
+	priv->txq = NULL;
+}
+EXPORT_SYMBOL(iwl_free_txq_mem);
+
+int iwl_send_wimax_coex(struct iwl_priv *priv)
+{
+	struct iwl_wimax_coex_cmd uninitialized_var(coex_cmd);
+
+	if (priv->cfg->support_wimax_coexist) {
+		/* UnMask wake up src at associated sleep */
+		coex_cmd.flags |= COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
+
+		/* UnMask wake up src at unassociated sleep */
+		coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
+		memcpy(coex_cmd.sta_prio, cu_priorities,
+			sizeof(struct iwl_wimax_coex_event_entry) *
+			 COEX_NUM_OF_EVENTS);
+
+		/* enabling the coexistence feature */
+		coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
+
+		/* enabling the priorities tables */
+		coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
+	} else {
+		/* coexistence is disabled */
+		memset(&coex_cmd, 0, sizeof(coex_cmd));
+	}
+	return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD,
+				sizeof(coex_cmd), &coex_cmd);
+}
+EXPORT_SYMBOL(iwl_send_wimax_coex);
+
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 
 #define IWL_TRAFFIC_DUMP_SIZE	(IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
@@ -2929,15 +3089,11 @@ const char *get_ctrl_string(int cmd)
 	}
 }
 
-void iwl_clear_tx_stats(struct iwl_priv *priv)
+void iwl_clear_traffic_stats(struct iwl_priv *priv)
 {
 	memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
-
-}
-
-void iwl_clear_rx_stats(struct iwl_priv *priv)
-{
 	memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
+	priv->led_tpt = 0;
 }
 
 /*
@@ -3030,6 +3186,7 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
 		stats->data_cnt++;
 		stats->data_bytes += len;
 	}
+	iwl_leds_background(priv);
 }
 EXPORT_SYMBOL(iwl_update_stats);
 #endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 7754538c2194..675b7df632fc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -63,6 +63,8 @@
 #ifndef __iwl_core_h__
 #define __iwl_core_h__
 
+#include <linux/utsrelease.h>
+
 /************************
  * forward declarations *
  ************************/
@@ -70,7 +72,7 @@ struct iwl_host_cmd;
 struct iwl_cmd;
 
 
-#define IWLWIFI_VERSION "1.3.27k"
+#define IWLWIFI_VERSION UTS_RELEASE "-k"
 #define DRV_COPYRIGHT	"Copyright(c) 2003-2009 Intel Corporation"
 #define DRV_AUTHOR     "<ilw@linux.intel.com>"
 
@@ -89,6 +91,7 @@ struct iwl_hcmd_ops {
 	int (*rxon_assoc)(struct iwl_priv *priv);
 	int (*commit_rxon)(struct iwl_priv *priv);
 	void (*set_rxon_chain)(struct iwl_priv *priv);
+	int (*set_tx_ant)(struct iwl_priv *priv, u8 valid_tx_ant);
 };
 
 struct iwl_hcmd_utils_ops {
@@ -97,7 +100,8 @@ struct iwl_hcmd_utils_ops {
 	void (*gain_computation)(struct iwl_priv *priv,
 			u32 *average_noise,
 			u16 min_average_noise_antennat_i,
-			u32 min_average_noise);
+			u32 min_average_noise,
+			u8 default_chain);
 	void (*chain_noise_reset)(struct iwl_priv *priv);
 	void (*rts_tx_cmd_flag)(struct ieee80211_tx_info *info,
 			__le32 *tx_flags);
@@ -107,7 +111,6 @@ struct iwl_hcmd_utils_ops {
 
 struct iwl_apm_ops {
 	int (*init)(struct iwl_priv *priv);
-	int (*reset)(struct iwl_priv *priv);
 	void (*stop)(struct iwl_priv *priv);
 	void (*config)(struct iwl_priv *priv);
 	int (*set_pwr_src)(struct iwl_priv *priv, enum iwl_pwr_src src);
@@ -166,8 +169,9 @@ struct iwl_lib_ops {
 	int (*is_valid_rtc_data_addr)(u32 addr);
 	/* 1st ucode load */
 	int (*load_ucode)(struct iwl_priv *priv);
-	void (*dump_nic_event_log)(struct iwl_priv *priv);
+	void (*dump_nic_event_log)(struct iwl_priv *priv, bool full_log);
 	void (*dump_nic_error_log)(struct iwl_priv *priv);
+	int (*set_channel_switch)(struct iwl_priv *priv, u16 channel);
 	/* power management */
 	struct iwl_apm_ops apm_ops;
 
@@ -185,18 +189,24 @@ struct iwl_lib_ops {
 	struct iwl_temp_ops temp_ops;
 };
 
+struct iwl_led_ops {
+	int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
+	int (*on)(struct iwl_priv *priv);
+	int (*off)(struct iwl_priv *priv);
+};
+
 struct iwl_ops {
 	const struct iwl_ucode_ops *ucode;
 	const struct iwl_lib_ops *lib;
 	const struct iwl_hcmd_ops *hcmd;
 	const struct iwl_hcmd_utils_ops *utils;
+	const struct iwl_led_ops *led;
 };
 
 struct iwl_mod_params {
 	int sw_crypto;		/* def: 0 = using hardware encryption */
 	int disable_hw_scan;	/* def: 0 = use h/w scan */
 	int num_of_queues;	/* def: HW dependent */
-	int num_of_ampdu_queues;/* def: HW dependent */
 	int disable_11n;	/* def: 0 = 11n capabilities enabled */
 	int amsdu_size_8K;	/* def: 1 = enable 8K amsdu size */
 	int antenna;  		/* def: 0 = both antennas (use diversity) */
@@ -213,7 +223,15 @@ struct iwl_mod_params {
  * @pa_type: used by 6000 series only to identify the type of Power Amplifier
  * @max_ll_items: max number of OTP blocks
  * @shadow_ram_support: shadow support for OTP memory
+ * @led_compensation: compensate on the led on/off time per HW according
+ *	to the deviation to achieve the desired led frequency.
+ *	The detail algorithm is described in iwl-led.c
  * @use_rts_for_ht: use rts/cts protection for HT traffic
+ * @chain_noise_num_beacons: number of beacons used to compute chain noise
+ * @adv_thermal_throttle: support advance thermal throttle
+ * @support_ct_kill_exit: support ct kill exit condition
+ * @sm_ps_mode: spatial multiplexing power save mode
+ * @support_wimax_coexist: support wimax/wifi co-exist
  *
  * We enable the driver to be backward compatible wrt API version. The
  * driver specifies which APIs it supports (with @ucode_api_max being the
@@ -245,18 +263,32 @@ struct iwl_cfg {
 	int eeprom_size;
 	u16  eeprom_ver;
 	u16  eeprom_calib_ver;
+	int num_of_queues;	/* def: HW dependent */
+	int num_of_ampdu_queues;/* def: HW dependent */
 	const struct iwl_ops *ops;
 	const struct iwl_mod_params *mod_params;
 	u8   valid_tx_ant;
 	u8   valid_rx_ant;
-	bool need_pll_cfg;
+
+	/* for iwl_apm_init() */
+	u32 pll_cfg_val;
+	bool set_l0s;
+	bool use_bsm;
+
 	bool use_isr_legacy;
 	enum iwl_pa_type pa_type;
 	const u16 max_ll_items;
 	const bool shadow_ram_support;
 	const bool ht_greenfield_support;
+	u16 led_compensation;
 	const bool broken_powersave;
 	bool use_rts_for_ht;
+	int chain_noise_num_beacons;
+	const bool supports_idle;
+	bool adv_thermal_throttle;
+	bool support_ct_kill_exit;
+	u8 sm_ps_mode;
+	const bool support_wimax_coexist;
 };
 
 /***************************
@@ -275,7 +307,7 @@ int iwl_check_rxon_cmd(struct iwl_priv *priv);
 int iwl_full_rxon_required(struct iwl_priv *priv);
 void iwl_set_rxon_chain(struct iwl_priv *priv);
 int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch);
-void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info);
+void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf);
 u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
 			 struct ieee80211_sta_ht_cap *sta_ht_inf);
 void iwl_set_flags_for_band(struct iwl_priv *priv, enum ieee80211_band band);
@@ -289,10 +321,7 @@ void iwl_configure_filter(struct ieee80211_hw *hw,
 			  unsigned int changed_flags,
 			  unsigned int *total_flags, u64 multicast);
 int iwl_hw_nic_init(struct iwl_priv *priv);
-int iwl_setup_mac(struct iwl_priv *priv);
 int iwl_set_hw_params(struct iwl_priv *priv);
-int iwl_init_drv(struct iwl_priv *priv);
-void iwl_uninit_drv(struct iwl_priv *priv);
 bool iwl_is_monitor_mode(struct iwl_priv *priv);
 void iwl_post_associate(struct iwl_priv *priv);
 void iwl_bss_info_changed(struct ieee80211_hw *hw,
@@ -311,6 +340,11 @@ void iwl_config_ap(struct iwl_priv *priv);
 int iwl_mac_get_tx_stats(struct ieee80211_hw *hw,
 			 struct ieee80211_tx_queue_stats *stats);
 void iwl_mac_reset_tsf(struct ieee80211_hw *hw);
+int iwl_alloc_txq_mem(struct iwl_priv *priv);
+void iwl_free_txq_mem(struct iwl_priv *priv);
+void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
+				__le32 *tx_flags);
+int iwl_send_wimax_coex(struct iwl_priv *priv);
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 int iwl_alloc_traffic_mem(struct iwl_priv *priv);
 void iwl_free_traffic_mem(struct iwl_priv *priv);
@@ -321,8 +355,7 @@ void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
 				u16 length, struct ieee80211_hdr *header);
 const char *get_mgmt_string(int cmd);
 const char *get_ctrl_string(int cmd);
-void iwl_clear_tx_stats(struct iwl_priv *priv);
-void iwl_clear_rx_stats(struct iwl_priv *priv);
+void iwl_clear_traffic_stats(struct iwl_priv *priv);
 void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc,
 		      u16 len);
 #else
@@ -358,6 +391,7 @@ static inline void iwl_update_stats(struct iwl_priv *priv, bool is_tx,
 		/* data */
 		stats->data_bytes += len;
 	}
+	iwl_leds_background(priv);
 }
 #endif
 /*****************************************************
@@ -393,6 +427,8 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
 			       struct iwl_rx_mem_buffer *rxb);
 void iwl_rx_statistics(struct iwl_priv *priv,
 			      struct iwl_rx_mem_buffer *rxb);
+void iwl_reply_statistics(struct iwl_priv *priv,
+			  struct iwl_rx_mem_buffer *rxb);
 void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
 
 /* TX helpers */
@@ -511,7 +547,7 @@ int iwl_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
 			   const void *data,
 			   void (*callback)(struct iwl_priv *priv,
 					    struct iwl_device_cmd *cmd,
-					    struct sk_buff *skb));
+					    struct iwl_rx_packet *pkt));
 
 int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
 
@@ -544,15 +580,12 @@ int iwl_pci_resume(struct pci_dev *pdev);
 /*****************************************************
 *  Error Handling Debugging
 ******************************************************/
-#ifdef CONFIG_IWLWIFI_DEBUG
-void iwl_dump_nic_event_log(struct iwl_priv *priv);
 void iwl_dump_nic_error_log(struct iwl_priv *priv);
+void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log);
+#ifdef CONFIG_IWLWIFI_DEBUG
+void iwl_print_rx_config_cmd(struct iwl_priv *priv);
 #else
-static inline void iwl_dump_nic_event_log(struct iwl_priv *priv)
-{
-}
-
-static inline void iwl_dump_nic_error_log(struct iwl_priv *priv)
+static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv)
 {
 }
 #endif
@@ -571,6 +604,7 @@ void iwlcore_free_geos(struct iwl_priv *priv);
 #define STATUS_HCMD_SYNC_ACTIVE	1	/* sync host command in progress */
 #define STATUS_INT_ENABLED	2
 #define STATUS_RF_KILL_HW	3
+#define STATUS_CT_KILL		4
 #define STATUS_INIT		5
 #define STATUS_ALIVE		6
 #define STATUS_READY		7
@@ -615,6 +649,11 @@ static inline int iwl_is_rfkill(struct iwl_priv *priv)
 	return iwl_is_rfkill_hw(priv);
 }
 
+static inline int iwl_is_ctkill(struct iwl_priv *priv)
+{
+	return test_bit(STATUS_CT_KILL, &priv->status);
+}
+
 static inline int iwl_is_ready_rf(struct iwl_priv *priv)
 {
 
@@ -626,7 +665,8 @@ static inline int iwl_is_ready_rf(struct iwl_priv *priv)
 
 extern void iwl_rf_kill_ct_config(struct iwl_priv *priv);
 extern int iwl_send_bt_config(struct iwl_priv *priv);
-extern int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags);
+extern int iwl_send_statistics_request(struct iwl_priv *priv,
+				       u8 flags, bool clear);
 extern int iwl_verify_ucode(struct iwl_priv *priv);
 extern int iwl_send_lq_cmd(struct iwl_priv *priv,
 		struct iwl_link_quality_cmd *lq, u8 flags);
@@ -636,6 +676,9 @@ extern void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
 				    struct iwl_rx_mem_buffer *rxb);
 void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
 					   struct iwl_rx_mem_buffer *rxb);
+void iwl_apm_stop(struct iwl_priv *priv);
+int iwl_apm_stop_master(struct iwl_priv *priv);
+int iwl_apm_init(struct iwl_priv *priv);
 
 void iwl_setup_rxon_timing(struct iwl_priv *priv);
 static inline int iwl_send_rxon_assoc(struct iwl_priv *priv)
@@ -655,5 +698,4 @@ static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
 {
 	return priv->hw->wiphy->bands[band];
 }
-
 #endif /* __iwl_core_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 06437d13e73e..a7bfae01f19b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -62,11 +62,29 @@
  *****************************************************************************/
 #ifndef __iwl_csr_h__
 #define __iwl_csr_h__
-/*=== CSR (control and status registers) ===*/
+/*
+ * CSR (control and status registers)
+ *
+ * CSR registers are mapped directly into PCI bus space, and are accessible
+ * whenever platform supplies power to device, even when device is in
+ * low power states due to driver-invoked device resets
+ * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
+ *
+ * Use iwl_write32() and iwl_read32() family to access these registers;
+ * these provide simple PCI bus access, without waking up the MAC.
+ * Do not use iwl_write_direct32() family for these registers;
+ * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
+ * The MAC (uCode processor, etc.) does not need to be powered up for accessing
+ * the CSR registers.
+ *
+ * NOTE:  Newer devices using one-time-programmable (OTP) memory
+ *        require device to be awake in order to read this memory
+ *        via CSR_EEPROM and CSR_OTP registers
+ */
 #define CSR_BASE    (0x000)
 
 #define CSR_HW_IF_CONFIG_REG    (CSR_BASE+0x000) /* hardware interface config */
-#define CSR_INT_COALESCING     (CSR_BASE+0x004) /* accum ints, 32-usec units */
+#define CSR_INT_COALESCING      (CSR_BASE+0x004) /* accum ints, 32-usec units */
 #define CSR_INT                 (CSR_BASE+0x008) /* host interrupt status/ack */
 #define CSR_INT_MASK            (CSR_BASE+0x00c) /* host interrupt enable */
 #define CSR_FH_INT_STATUS       (CSR_BASE+0x010) /* busmaster int status/ack*/
@@ -74,43 +92,67 @@
 #define CSR_RESET               (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/
 #define CSR_GP_CNTRL            (CSR_BASE+0x024)
 
+/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */
+#define CSR_INT_PERIODIC_REG	(CSR_BASE+0x005)
+
 /*
  * Hardware revision info
  * Bit fields:
  * 31-8:  Reserved
- *  7-4:  Type of device:  0x0 = 4965, 0xd = 3945
+ *  7-4:  Type of device:  see CSR_HW_REV_TYPE_xxx definitions
  *  3-2:  Revision step:  0 = A, 1 = B, 2 = C, 3 = D
- *  1-0:  "Dash" value, as in A-1, etc.
+ *  1-0:  "Dash" (-) value, as in A-1, etc.
  *
  * NOTE:  Revision step affects calculation of CCK txpower for 4965.
+ * NOTE:  See also CSR_HW_REV_WA_REG (work-around for bug in 4965).
  */
 #define CSR_HW_REV              (CSR_BASE+0x028)
 
-/* EEPROM reads */
+/*
+ * EEPROM and OTP (one-time-programmable) memory reads
+ *
+ * NOTE:  For (newer) devices using OTP, device must be awake, initialized via
+ *        apm_ops.init() in order to read.  Older devices (3945/4965/5000)
+ *        use EEPROM and do not require this.
+ */
 #define CSR_EEPROM_REG          (CSR_BASE+0x02c)
 #define CSR_EEPROM_GP           (CSR_BASE+0x030)
 #define CSR_OTP_GP_REG   	(CSR_BASE+0x034)
+
 #define CSR_GIO_REG		(CSR_BASE+0x03C)
 #define CSR_GP_UCODE_REG	(CSR_BASE+0x048)
 #define CSR_GP_DRIVER_REG	(CSR_BASE+0x050)
+
+/*
+ * UCODE-DRIVER GP (general purpose) mailbox registers.
+ * SET/CLR registers set/clear bit(s) if "1" is written.
+ */
 #define CSR_UCODE_DRV_GP1       (CSR_BASE+0x054)
 #define CSR_UCODE_DRV_GP1_SET   (CSR_BASE+0x058)
 #define CSR_UCODE_DRV_GP1_CLR   (CSR_BASE+0x05c)
 #define CSR_UCODE_DRV_GP2       (CSR_BASE+0x060)
+
 #define CSR_LED_REG             (CSR_BASE+0x094)
 #define CSR_DRAM_INT_TBL_REG	(CSR_BASE+0x0A0)
+
+/* GIO Chicken Bits (PCI Express bus link power management) */
 #define CSR_GIO_CHICKEN_BITS    (CSR_BASE+0x100)
 
-#define CSR_INT_PERIODIC_REG	(CSR_BASE+0x005)
 /* Analog phase-lock-loop configuration  */
 #define CSR_ANA_PLL_CFG         (CSR_BASE+0x20c)
+
 /*
- * Indicates hardware rev, to determine CCK backoff for txpower calculation.
+ * CSR Hardware Revision Workaround Register.  Indicates hardware rev;
+ * "step" determines CCK backoff for txpower calculation.  Used for 4965 only.
+ * See also CSR_HW_REV register.
  * Bit fields:
  *  3-2:  0 = A, 1 = B, 2 = C, 3 = D step
+ *  1-0:  "Dash" (-) value, as in C-1, etc.
  */
-#define CSR_HW_REV_WA_REG	(CSR_BASE+0x22C)
-#define CSR_DBG_HPET_MEM_REG	(CSR_BASE+0x240)
+#define CSR_HW_REV_WA_REG		(CSR_BASE+0x22C)
+
+#define CSR_DBG_HPET_MEM_REG		(CSR_BASE+0x240)
+#define CSR_DBG_LINK_PWR_MGMT_REG	(CSR_BASE+0x250)
 
 /* Bits for CSR_HW_IF_CONFIG_REG */
 #define CSR49_HW_IF_CONFIG_REG_BIT_4965_R	(0x00000010)
@@ -125,14 +167,14 @@
 #define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A    (0x00000000)
 #define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B    (0x00001000)
 
-#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A		(0x00080000)
-#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM		(0x00200000)
-#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY		(0x00400000)
-#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE	(0x02000000)
-#define CSR_HW_IF_CONFIG_REG_PREPARE			(0x08000000)
+#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A	(0x00080000)
+#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM	(0x00200000)
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY	(0x00400000) /* PCI_OWN_SEM */
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
+#define CSR_HW_IF_CONFIG_REG_PREPARE		  (0x08000000) /* WAKE_ME */
 
-#define CSR_INT_PERIODIC_DIS			(0x00)
-#define CSR_INT_PERIODIC_ENA			(0xFF)
+#define CSR_INT_PERIODIC_DIS			(0x00) /* disable periodic int*/
+#define CSR_INT_PERIODIC_ENA			(0xFF) /* 255*32 usec ~ 8 msec*/
 
 /* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
  * acknowledged (reset) by host writing "1" to flagged bits. */
@@ -195,8 +237,46 @@
 #define CSR_RESET_REG_FLAG_SW_RESET                  (0x00000080)
 #define CSR_RESET_REG_FLAG_MASTER_DISABLED           (0x00000100)
 #define CSR_RESET_REG_FLAG_STOP_MASTER               (0x00000200)
+#define CSR_RESET_LINK_PWR_MGMT_DISABLED             (0x80000000)
 
-/* GP (general purpose) CONTROL */
+/*
+ * GP (general purpose) CONTROL REGISTER
+ * Bit fields:
+ *    27:  HW_RF_KILL_SW
+ *         Indicates state of (platform's) hardware RF-Kill switch
+ * 26-24:  POWER_SAVE_TYPE
+ *         Indicates current power-saving mode:
+ *         000 -- No power saving
+ *         001 -- MAC power-down
+ *         010 -- PHY (radio) power-down
+ *         011 -- Error
+ *   9-6:  SYS_CONFIG
+ *         Indicates current system configuration, reflecting pins on chip
+ *         as forced high/low by device circuit board.
+ *     4:  GOING_TO_SLEEP
+ *         Indicates MAC is entering a power-saving sleep power-down.
+ *         Not a good time to access device-internal resources.
+ *     3:  MAC_ACCESS_REQ
+ *         Host sets this to request and maintain MAC wakeup, to allow host
+ *         access to device-internal resources.  Host must wait for
+ *         MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR
+ *         device registers.
+ *     2:  INIT_DONE
+ *         Host sets this to put device into fully operational D0 power mode.
+ *         Host resets this after SW_RESET to put device into low power mode.
+ *     0:  MAC_CLOCK_READY
+ *         Indicates MAC (ucode processor, etc.) is powered up and can run.
+ *         Internal resources are accessible.
+ *         NOTE:  This does not indicate that the processor is actually running.
+ *         NOTE:  This does not indicate that 4965 or 3945 has completed
+ *                init or post-power-down restore of internal SRAM memory.
+ *                Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
+ *                SRAM is restored and uCode is in normal operation mode.
+ *                Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
+ *                do not need to save/restore it.
+ *         NOTE:  After device reset, this bit remains "0" until host sets
+ *                INIT_DONE
+ */
 #define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY        (0x00000001)
 #define CSR_GP_CNTRL_REG_FLAG_INIT_DONE              (0x00000004)
 #define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ         (0x00000008)
@@ -229,18 +309,58 @@
 #define CSR_EEPROM_REG_MSK_DATA		(0xFFFF0000)
 
 /* EEPROM GP */
-#define CSR_EEPROM_GP_VALID_MSK		(0x00000007)
-#define CSR_EEPROM_GP_BAD_SIGNATURE	(0x00000000)
+#define CSR_EEPROM_GP_VALID_MSK		(0x00000007) /* signature */
 #define CSR_EEPROM_GP_IF_OWNER_MSK	(0x00000180)
+#define CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP	(0x00000000)
+#define CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP		(0x00000001)
+#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K		(0x00000002)
+#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K		(0x00000004)
+
+/* One-time-programmable memory general purpose reg */
 #define CSR_OTP_GP_REG_DEVICE_SELECT	(0x00010000) /* 0 - EEPROM, 1 - OTP */
 #define CSR_OTP_GP_REG_OTP_ACCESS_MODE	(0x00020000) /* 0 - absolute, 1 - relative */
 #define CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK          (0x00100000) /* bit 20 */
 #define CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK        (0x00200000) /* bit 21 */
 
+/* GP REG */
+#define CSR_GP_REG_POWER_SAVE_STATUS_MSK            (0x03000000) /* bit 24/25 */
+#define CSR_GP_REG_NO_POWER_SAVE            (0x00000000)
+#define CSR_GP_REG_MAC_POWER_SAVE           (0x01000000)
+#define CSR_GP_REG_PHY_POWER_SAVE           (0x02000000)
+#define CSR_GP_REG_POWER_SAVE_ERROR         (0x03000000)
+
+
 /* CSR GIO */
 #define CSR_GIO_REG_VAL_L0S_ENABLED	(0x00000002)
 
-/* UCODE DRV GP */
+/*
+ * UCODE-DRIVER GP (general purpose) mailbox register 1
+ * Host driver and uCode write and/or read this register to communicate with
+ * each other.
+ * Bit fields:
+ *     4:  UCODE_DISABLE
+ *         Host sets this to request permanent halt of uCode, same as
+ *         sending CARD_STATE command with "halt" bit set.
+ *     3:  CT_KILL_EXIT
+ *         Host sets this to request exit from CT_KILL state, i.e. host thinks
+ *         device temperature is low enough to continue normal operation.
+ *     2:  CMD_BLOCKED
+ *         Host sets this during RF KILL power-down sequence (HW, SW, CT KILL)
+ *         to release uCode to clear all Tx and command queues, enter
+ *         unassociated mode, and power down.
+ *         NOTE:  Some devices also use HBUS_TARG_MBX_C register for this bit.
+ *     1:  SW_BIT_RFKILL
+ *         Host sets this when issuing CARD_STATE command to request
+ *         device sleep.
+ *     0:  MAC_SLEEP
+ *         uCode sets this when preparing a power-saving power-down.
+ *         uCode resets this when power-up is complete and SRAM is sane.
+ *         NOTE:  3945/4965 saves internal SRAM data to host when powering down,
+ *                and must restore this data after powering back up.
+ *                MAC_SLEEP is the best indication that restore is complete.
+ *                Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
+ *                do not need to save/restore it.
+ */
 #define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP             (0x00000001)
 #define CSR_UCODE_SW_BIT_RFKILL                     (0x00000002)
 #define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED           (0x00000004)
@@ -253,7 +373,7 @@
 #define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA	    (0x00000002)
 
 
-/* GI Chicken Bits */
+/* GIO Chicken Bits (PCI Express bus link power management) */
 #define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX  (0x00800000)
 #define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER  (0x20000000)
 
@@ -273,8 +393,23 @@
 #define CSR_DRAM_INT_TBL_ENABLE		(1 << 31)
 #define CSR_DRAM_INIT_TBL_WRAP_CHECK	(1 << 27)
 
-/*=== HBUS (Host-side Bus) ===*/
+/*
+ * HBUS (Host-side Bus)
+ *
+ * HBUS registers are mapped directly into PCI bus space, but are used
+ * to indirectly access device's internal memory or registers that
+ * may be powered-down.
+ *
+ * Use iwl_write_direct32()/iwl_read_direct32() family for these registers;
+ * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
+ * to make sure the MAC (uCode processor, etc.) is powered up for accessing
+ * internal resources.
+ *
+ * Do not use iwl_write32()/iwl_read32() family to access these registers;
+ * these provide only simple PCI bus access, without waking up the MAC.
+ */
 #define HBUS_BASE	(0x400)
+
 /*
  * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM
  * structures, error log, event log, verifying uCode load).
@@ -289,6 +424,10 @@
 #define HBUS_TARG_MEM_WDAT      (HBUS_BASE+0x018)
 #define HBUS_TARG_MEM_RDAT      (HBUS_BASE+0x01c)
 
+/* Mailbox C, used as workaround alternative to CSR_UCODE_DRV_GP1 mailbox */
+#define HBUS_TARG_MBX_C         (HBUS_BASE+0x030)
+#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED         (0x00000004)
+
 /*
  * Registers for accessing device's internal peripheral registers
  * (e.g. SCD, BSM, etc.).  First write to address register,
@@ -303,16 +442,12 @@
 #define HBUS_TARG_PRPH_RDAT     (HBUS_BASE+0x050)
 
 /*
- * Per-Tx-queue write pointer (index, really!) (3945 and 4965).
+ * Per-Tx-queue write pointer (index, really!)
  * Indicates index to next TFD that driver will fill (1 past latest filled).
  * Bit usage:
  *  0-7:  queue write index
  * 11-8:  queue selector
  */
 #define HBUS_TARG_WRPTR         (HBUS_BASE+0x060)
-#define HBUS_TARG_MBX_C         (HBUS_BASE+0x030)
-
-#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED         (0x00000004)
-
 
 #endif /* !__iwl_csr_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index cbc62904655d..d61293ab67c9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -84,9 +84,7 @@ struct iwl_debugfs {
 		struct dentry *file_interrupt;
 		struct dentry *file_qos;
 		struct dentry *file_thermal_throttling;
-#ifdef CONFIG_IWLWIFI_LEDS
 		struct dentry *file_led;
-#endif
 		struct dentry *file_disable_ht40;
 		struct dentry *file_sleep_level_override;
 		struct dentry *file_current_sleep_command;
@@ -108,6 +106,9 @@ struct iwl_debugfs {
 		struct dentry *file_sensitivity;
 		struct dentry *file_chain_noise;
 		struct dentry *file_tx_power;
+		struct dentry *file_power_save_status;
+		struct dentry *file_clear_ucode_statistics;
+		struct dentry *file_clear_traffic_statistics;
 	} dbgfs_debug_files;
 	u32 sram_offset;
 	u32 sram_len;
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index a198bcf61022..21e0f6699daf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -47,9 +47,9 @@
 		goto err; 						\
 } while (0)
 
-#define DEBUGFS_ADD_FILE(name, parent) do {                             \
+#define DEBUGFS_ADD_FILE(name, parent, mode) do {                       \
 	dbgfs->dbgfs_##parent##_files.file_##name =                     \
-	debugfs_create_file(#name, S_IWUSR | S_IRUSR,                   \
+	debugfs_create_file(#name, mode,                                \
 				dbgfs->dir_##parent, priv,              \
 				&iwl_dbgfs_##name##_ops);               \
 	if (!(dbgfs->dbgfs_##parent##_files.file_##name))               \
@@ -131,21 +131,22 @@ static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file,
 
 	int cnt;
 	ssize_t ret;
-	const size_t bufsz = 100 + sizeof(char) * 24 * (MANAGEMENT_MAX + CONTROL_MAX);
+	const size_t bufsz = 100 +
+		sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
 	buf = kzalloc(bufsz, GFP_KERNEL);
 	if (!buf)
 		return -ENOMEM;
 	pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
 	for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
 		pos += scnprintf(buf + pos, bufsz - pos,
-				 "\t%s\t\t: %u\n",
+				 "\t%25s\t\t: %u\n",
 				 get_mgmt_string(cnt),
 				 priv->tx_stats.mgmt[cnt]);
 	}
 	pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
 	for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
 		pos += scnprintf(buf + pos, bufsz - pos,
-				 "\t%s\t\t: %u\n",
+				 "\t%25s\t\t: %u\n",
 				 get_ctrl_string(cnt),
 				 priv->tx_stats.ctrl[cnt]);
 	}
@@ -159,7 +160,7 @@ static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file,
 	return ret;
 }
 
-static ssize_t iwl_dbgfs_tx_statistics_write(struct file *file,
+static ssize_t iwl_dbgfs_clear_traffic_statistics_write(struct file *file,
 					const char __user *user_buf,
 					size_t count, loff_t *ppos)
 {
@@ -174,8 +175,7 @@ static ssize_t iwl_dbgfs_tx_statistics_write(struct file *file,
 		return -EFAULT;
 	if (sscanf(buf, "%x", &clear_flag) != 1)
 		return -EFAULT;
-	if (clear_flag == 1)
-		iwl_clear_tx_stats(priv);
+	iwl_clear_traffic_stats(priv);
 
 	return count;
 }
@@ -190,7 +190,7 @@ static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file,
 	int cnt;
 	ssize_t ret;
 	const size_t bufsz = 100 +
-		sizeof(char) * 24 * (MANAGEMENT_MAX + CONTROL_MAX);
+		sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
 	buf = kzalloc(bufsz, GFP_KERNEL);
 	if (!buf)
 		return -ENOMEM;
@@ -198,14 +198,14 @@ static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file,
 	pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
 	for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
 		pos += scnprintf(buf + pos, bufsz - pos,
-				 "\t%s\t\t: %u\n",
+				 "\t%25s\t\t: %u\n",
 				 get_mgmt_string(cnt),
 				 priv->rx_stats.mgmt[cnt]);
 	}
 	pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
 	for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
 		pos += scnprintf(buf + pos, bufsz - pos,
-				 "\t%s\t\t: %u\n",
+				 "\t%25s\t\t: %u\n",
 				 get_ctrl_string(cnt),
 				 priv->rx_stats.ctrl[cnt]);
 	}
@@ -220,26 +220,6 @@ static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file,
 	return ret;
 }
 
-static ssize_t iwl_dbgfs_rx_statistics_write(struct file *file,
-					const char __user *user_buf,
-					size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	u32 clear_flag;
-	char buf[8];
-	int buf_size;
-
-	memset(buf, 0, sizeof(buf));
-	buf_size = min(count, sizeof(buf) -  1);
-	if (copy_from_user(buf, user_buf, buf_size))
-		return -EFAULT;
-	if (sscanf(buf, "%x", &clear_flag) != 1)
-		return -EFAULT;
-	if (clear_flag == 1)
-		iwl_clear_rx_stats(priv);
-	return count;
-}
-
 #define BYTE1_MASK 0x000000ff;
 #define BYTE2_MASK 0x0000ffff;
 #define BYTE3_MASK 0x00ffffff;
@@ -248,13 +228,29 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
 					size_t count, loff_t *ppos)
 {
 	u32 val;
-	char buf[1024];
+	char *buf;
 	ssize_t ret;
 	int i;
 	int pos = 0;
 	struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
-	const size_t bufsz = sizeof(buf);
-
+	size_t bufsz;
+
+	/* default is to dump the entire data segment */
+	if (!priv->dbgfs->sram_offset && !priv->dbgfs->sram_len) {
+		priv->dbgfs->sram_offset = 0x800000;
+		if (priv->ucode_type == UCODE_INIT)
+			priv->dbgfs->sram_len = priv->ucode_init_data.len;
+		else
+			priv->dbgfs->sram_len = priv->ucode_data.len;
+	}
+	bufsz =  30 + priv->dbgfs->sram_len * sizeof(char) * 10;
+	buf = kmalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+	pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
+			priv->dbgfs->sram_len);
+	pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
+			priv->dbgfs->sram_offset);
 	for (i = priv->dbgfs->sram_len; i > 0; i -= 4) {
 		val = iwl_read_targ_mem(priv, priv->dbgfs->sram_offset + \
 					priv->dbgfs->sram_len - i);
@@ -271,11 +267,14 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
 				break;
 			}
 		}
+		if (!(i % 16))
+			pos += scnprintf(buf + pos, bufsz - pos, "\n");
 		pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
 	}
 	pos += scnprintf(buf + pos, bufsz - pos, "\n");
 
 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
 	return ret;
 }
 
@@ -335,8 +334,6 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
 			pos += scnprintf(buf + pos, bufsz - pos,
 					"flags: 0x%x\n",
 					station->sta.station_flags_msk);
-			pos += scnprintf(buf + pos, bufsz - pos,
-					"ps_status: %u\n", station->ps_status);
 			pos += scnprintf(buf + pos, bufsz - pos, "tid data:\n");
 			pos += scnprintf(buf + pos, bufsz - pos,
 					"seq_num\t\ttxq_id");
@@ -383,6 +380,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
 	int pos = 0, ofs = 0, buf_size = 0;
 	const u8 *ptr;
 	char *buf;
+	u16 eeprom_ver;
 	size_t eeprom_len = priv->cfg->eeprom_size;
 	buf_size = 4 * eeprom_len + 256;
 
@@ -403,9 +401,11 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
 		IWL_ERR(priv, "Can not allocate Buffer\n");
 		return -ENOMEM;
 	}
-	pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s\n",
+	eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
+	pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s, "
+			"version: 0x%x\n",
 			(priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
-			? "OTP" : "EEPROM");
+			 ? "OTP" : "EEPROM", eeprom_ver);
 	for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
 		pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
 		hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
@@ -436,7 +436,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
 	if (sscanf(buf, "%d", &event_log_flag) != 1)
 		return -EFAULT;
 	if (event_log_flag == 1)
-		priv->cfg->ops->lib->dump_nic_event_log(priv);
+		priv->cfg->ops->lib->dump_nic_event_log(priv, true);
 
 	return count;
 }
@@ -532,6 +532,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
 		test_bit(STATUS_INT_ENABLED, &priv->status));
 	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
 		test_bit(STATUS_RF_KILL_HW, &priv->status));
+	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
+		test_bit(STATUS_CT_KILL, &priv->status));
 	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
 		test_bit(STATUS_INIT, &priv->status));
 	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
@@ -672,7 +674,6 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
 	return ret;
 }
 
-#ifdef CONFIG_IWLWIFI_LEDS
 static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
 				  size_t count, loff_t *ppos)
 {
@@ -697,7 +698,6 @@ static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 	return ret;
 }
-#endif
 
 static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
 				char __user *user_buf,
@@ -798,15 +798,20 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
 	 * valid here. However, let's not confuse them and present
 	 * IWL_POWER_INDEX_1 as "1", not "0".
 	 */
-	if (value > 0)
+	if (value == 0)
+		return -EINVAL;
+	else if (value > 0)
 		value -= 1;
 
 	if (value != -1 && (value < 0 || value >= IWL_POWER_NUM))
 		return -EINVAL;
 
+	if (!iwl_is_ready_rf(priv))
+		return -EAGAIN;
+
 	priv->power_data.debug_sleep_level_override = value;
 
-	iwl_power_update_mode(priv, false);
+	iwl_power_update_mode(priv, true);
 
 	return count;
 }
@@ -861,9 +866,7 @@ DEBUGFS_READ_FILE_OPS(channels);
 DEBUGFS_READ_FILE_OPS(status);
 DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
 DEBUGFS_READ_FILE_OPS(qos);
-#ifdef CONFIG_IWLWIFI_LEDS
 DEBUGFS_READ_FILE_OPS(led);
-#endif
 DEBUGFS_READ_FILE_OPS(thermal_throttling);
 DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
@@ -881,10 +884,14 @@ static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
 	struct iwl_rx_queue *rxq = &priv->rxq;
 	char *buf;
 	int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
-		(IWL_MAX_NUM_QUEUES * 32 * 8) + 400;
+		(priv->cfg->num_of_queues * 32 * 8) + 400;
 	const u8 *ptr;
 	ssize_t ret;
 
+	if (!priv->txq) {
+		IWL_ERR(priv, "txq not ready\n");
+		return -EAGAIN;
+	}
 	buf = kzalloc(bufsz, GFP_KERNEL);
 	if (!buf) {
 		IWL_ERR(priv, "Can not allocate buffer\n");
@@ -976,8 +983,12 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
 	int pos = 0;
 	int cnt;
 	int ret;
-	const size_t bufsz = sizeof(char) * 60 * IWL_MAX_NUM_QUEUES;
+	const size_t bufsz = sizeof(char) * 64 * priv->cfg->num_of_queues;
 
+	if (!priv->txq) {
+		IWL_ERR(priv, "txq not ready\n");
+		return -EAGAIN;
+	}
 	buf = kzalloc(bufsz, GFP_KERNEL);
 	if (!buf)
 		return -ENOMEM;
@@ -1028,10 +1039,6 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
 	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
-#define UCODE_STATISTICS_CLEAR_MSK		(0x1 << 0)
-#define UCODE_STATISTICS_FREQUENCY_MSK		(0x1 << 1)
-#define UCODE_STATISTICS_NARROW_BAND_MSK	(0x1 << 2)
-
 static int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf,
 				     int bufsz)
 {
@@ -1068,17 +1075,17 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
 		sizeof(struct statistics_rx_non_phy) * 20 +
 		sizeof(struct statistics_rx_ht_phy) * 20 + 400;
 	ssize_t ret;
-	struct statistics_rx_phy *ofdm;
-	struct statistics_rx_phy *cck;
-	struct statistics_rx_non_phy *general;
-	struct statistics_rx_ht_phy *ht;
+	struct statistics_rx_phy *ofdm, *accum_ofdm;
+	struct statistics_rx_phy *cck, *accum_cck;
+	struct statistics_rx_non_phy *general, *accum_general;
+	struct statistics_rx_ht_phy *ht, *accum_ht;
 
 	if (!iwl_is_alive(priv))
 		return -EAGAIN;
 
 	/* make request to uCode to retrieve statistics information */
 	mutex_lock(&priv->mutex);
-	ret = iwl_send_statistics_request(priv, 0);
+	ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
 	mutex_unlock(&priv->mutex);
 
 	if (ret) {
@@ -1100,155 +1107,268 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
 	cck = &priv->statistics.rx.cck;
 	general = &priv->statistics.rx.general;
 	ht = &priv->statistics.rx.ofdm_ht;
+	accum_ofdm = &priv->accum_statistics.rx.ofdm;
+	accum_cck = &priv->accum_statistics.rx.cck;
+	accum_general = &priv->accum_statistics.rx.general;
+	accum_ht = &priv->accum_statistics.rx.ofdm_ht;
 	pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
 	pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM:\n");
-	pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt: %u\n",
-			 le32_to_cpu(ofdm->ina_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt: %u\n",
-			 le32_to_cpu(ofdm->fina_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "plcp_err: %u\n",
-			 le32_to_cpu(ofdm->plcp_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "crc32_err: %u\n",
-			 le32_to_cpu(ofdm->crc32_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "overrun_err: %u\n",
-			 le32_to_cpu(ofdm->overrun_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "early_overrun_err: %u\n",
-			 le32_to_cpu(ofdm->early_overrun_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "crc32_good: %u\n",
-			 le32_to_cpu(ofdm->crc32_good));
-	pos += scnprintf(buf + pos, bufsz - pos, "false_alarm_cnt: %u\n",
-			 le32_to_cpu(ofdm->false_alarm_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "fina_sync_err_cnt: %u\n",
-			 le32_to_cpu(ofdm->fina_sync_err_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "sfd_timeout: %u\n",
-			 le32_to_cpu(ofdm->sfd_timeout));
-	pos += scnprintf(buf + pos, bufsz - pos, "fina_timeout: %u\n",
-			 le32_to_cpu(ofdm->fina_timeout));
-	pos += scnprintf(buf + pos, bufsz - pos, "unresponded_rts: %u\n",
-			 le32_to_cpu(ofdm->unresponded_rts));
-	pos += scnprintf(buf + pos, bufsz - pos,
-			"rxe_frame_limit_overrun: %u\n",
-			le32_to_cpu(ofdm->rxe_frame_limit_overrun));
-	pos += scnprintf(buf + pos, bufsz - pos, "sent_ack_cnt: %u\n",
-			 le32_to_cpu(ofdm->sent_ack_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "sent_cts_cnt: %u\n",
-			 le32_to_cpu(ofdm->sent_cts_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "sent_ba_rsp_cnt: %u\n",
-			 le32_to_cpu(ofdm->sent_ba_rsp_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "dsp_self_kill: %u\n",
-			 le32_to_cpu(ofdm->dsp_self_kill));
-	pos += scnprintf(buf + pos, bufsz - pos, "mh_format_err: %u\n",
-			 le32_to_cpu(ofdm->mh_format_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "re_acq_main_rssi_sum: %u\n",
-			 le32_to_cpu(ofdm->re_acq_main_rssi_sum));
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"\t\t\tcurrent\t\t\taccumulative\n");
+	pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err);
+	pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "overrun_err:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->overrun_err),
+			 accum_ofdm->overrun_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "early_overrun_err:\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->early_overrun_err),
+			 accum_ofdm->early_overrun_err);
+	pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->crc32_good),
+			 accum_ofdm->crc32_good);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "false_alarm_cnt:\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->false_alarm_cnt),
+			 accum_ofdm->false_alarm_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "fina_sync_err_cnt:\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->fina_sync_err_cnt),
+			 accum_ofdm->fina_sync_err_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "sfd_timeout:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->sfd_timeout),
+			 accum_ofdm->sfd_timeout);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "fina_timeout:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->fina_timeout),
+			 accum_ofdm->fina_timeout);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "unresponded_rts:\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->unresponded_rts),
+			 accum_ofdm->unresponded_rts);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"rxe_frame_lmt_ovrun:\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+			 accum_ofdm->rxe_frame_limit_overrun);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "sent_ack_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->sent_ack_cnt),
+			 accum_ofdm->sent_ack_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "sent_cts_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->sent_cts_cnt),
+			 accum_ofdm->sent_cts_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "sent_ba_rsp_cnt:\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
+			 accum_ofdm->sent_ba_rsp_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "dsp_self_kill:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->dsp_self_kill),
+			 accum_ofdm->dsp_self_kill);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "mh_format_err:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->mh_format_err),
+			 accum_ofdm->mh_format_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "re_acq_main_rssi_sum:\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
+			 accum_ofdm->re_acq_main_rssi_sum);
 
 	pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - CCK:\n");
-	pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt: %u\n",
-			 le32_to_cpu(cck->ina_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt: %u\n",
-			 le32_to_cpu(cck->fina_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "plcp_err: %u\n",
-			 le32_to_cpu(cck->plcp_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "crc32_err: %u\n",
-			 le32_to_cpu(cck->crc32_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "overrun_err: %u\n",
-			 le32_to_cpu(cck->overrun_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "early_overrun_err: %u\n",
-			 le32_to_cpu(cck->early_overrun_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "crc32_good: %u\n",
-			 le32_to_cpu(cck->crc32_good));
-	pos += scnprintf(buf + pos, bufsz - pos, "false_alarm_cnt: %u\n",
-			 le32_to_cpu(cck->false_alarm_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "fina_sync_err_cnt: %u\n",
-			 le32_to_cpu(cck->fina_sync_err_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "sfd_timeout: %u\n",
-			 le32_to_cpu(cck->sfd_timeout));
-	pos += scnprintf(buf + pos, bufsz - pos, "fina_timeout: %u\n",
-			 le32_to_cpu(cck->fina_timeout));
-	pos += scnprintf(buf + pos, bufsz - pos, "unresponded_rts: %u\n",
-			 le32_to_cpu(cck->unresponded_rts));
-	pos += scnprintf(buf + pos, bufsz - pos,
-			"rxe_frame_limit_overrun: %u\n",
-			le32_to_cpu(cck->rxe_frame_limit_overrun));
-	pos += scnprintf(buf + pos, bufsz - pos, "sent_ack_cnt: %u\n",
-			 le32_to_cpu(cck->sent_ack_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "sent_cts_cnt: %u\n",
-			 le32_to_cpu(cck->sent_cts_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "sent_ba_rsp_cnt: %u\n",
-			 le32_to_cpu(cck->sent_ba_rsp_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "dsp_self_kill: %u\n",
-			 le32_to_cpu(cck->dsp_self_kill));
-	pos += scnprintf(buf + pos, bufsz - pos, "mh_format_err: %u\n",
-			 le32_to_cpu(cck->mh_format_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "re_acq_main_rssi_sum: %u\n",
-			 le32_to_cpu(cck->re_acq_main_rssi_sum));
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"\t\t\tcurrent\t\t\taccumulative\n");
+	pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err);
+	pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "overrun_err:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->overrun_err),
+			 accum_cck->overrun_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "early_overrun_err:\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->early_overrun_err),
+			 accum_cck->early_overrun_err);
+	pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "false_alarm_cnt:\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->false_alarm_cnt),
+			 accum_cck->false_alarm_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "fina_sync_err_cnt:\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->fina_sync_err_cnt),
+			 accum_cck->fina_sync_err_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "sfd_timeout:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->sfd_timeout),
+			 accum_cck->sfd_timeout);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "fina_timeout:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->fina_timeout),
+			 accum_cck->fina_timeout);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "unresponded_rts:\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->unresponded_rts),
+			 accum_cck->unresponded_rts);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"rxe_frame_lmt_ovrun:\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->rxe_frame_limit_overrun),
+			 accum_cck->rxe_frame_limit_overrun);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "sent_ack_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->sent_ack_cnt),
+			 accum_cck->sent_ack_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "sent_cts_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->sent_cts_cnt),
+			 accum_cck->sent_cts_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "sent_ba_rsp_cnt:\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->sent_ba_rsp_cnt),
+			 accum_cck->sent_ba_rsp_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "dsp_self_kill:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->dsp_self_kill),
+			 accum_cck->dsp_self_kill);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "mh_format_err:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->mh_format_err),
+			 accum_cck->mh_format_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "re_acq_main_rssi_sum:\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->re_acq_main_rssi_sum),
+			 accum_cck->re_acq_main_rssi_sum);
 
 	pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - GENERAL:\n");
-	pos += scnprintf(buf + pos, bufsz - pos, "bogus_cts: %u\n",
-			 le32_to_cpu(general->bogus_cts));
-	pos += scnprintf(buf + pos, bufsz - pos, "bogus_ack: %u\n",
-			 le32_to_cpu(general->bogus_ack));
-	pos += scnprintf(buf + pos, bufsz - pos, "non_bssid_frames: %u\n",
-			 le32_to_cpu(general->non_bssid_frames));
-	pos += scnprintf(buf + pos, bufsz - pos, "filtered_frames: %u\n",
-			 le32_to_cpu(general->filtered_frames));
-	pos += scnprintf(buf + pos, bufsz - pos, "non_channel_beacons: %u\n",
-			 le32_to_cpu(general->non_channel_beacons));
-	pos += scnprintf(buf + pos, bufsz - pos, "channel_beacons: %u\n",
-			 le32_to_cpu(general->channel_beacons));
-	pos += scnprintf(buf + pos, bufsz - pos, "num_missed_bcon: %u\n",
-			 le32_to_cpu(general->num_missed_bcon));
-	pos += scnprintf(buf + pos, bufsz - pos,
-			"adc_rx_saturation_time: %u\n",
-			le32_to_cpu(general->adc_rx_saturation_time));
-	pos += scnprintf(buf + pos, bufsz - pos,
-			"ina_detection_search_time: %u\n",
-			le32_to_cpu(general->ina_detection_search_time));
-	pos += scnprintf(buf + pos, bufsz - pos, "beacon_silence_rssi_a: %u\n",
-			 le32_to_cpu(general->beacon_silence_rssi_a));
-	pos += scnprintf(buf + pos, bufsz - pos, "beacon_silence_rssi_b: %u\n",
-			 le32_to_cpu(general->beacon_silence_rssi_b));
-	pos += scnprintf(buf + pos, bufsz - pos, "beacon_silence_rssi_c: %u\n",
-			 le32_to_cpu(general->beacon_silence_rssi_c));
-	pos += scnprintf(buf + pos, bufsz - pos,
-			"interference_data_flag: %u\n",
-			le32_to_cpu(general->interference_data_flag));
-	pos += scnprintf(buf + pos, bufsz - pos, "channel_load: %u\n",
-			 le32_to_cpu(general->channel_load));
-	pos += scnprintf(buf + pos, bufsz - pos, "dsp_false_alarms: %u\n",
-			 le32_to_cpu(general->dsp_false_alarms));
-	pos += scnprintf(buf + pos, bufsz - pos, "beacon_rssi_a: %u\n",
-			 le32_to_cpu(general->beacon_rssi_a));
-	pos += scnprintf(buf + pos, bufsz - pos, "beacon_rssi_b: %u\n",
-			 le32_to_cpu(general->beacon_rssi_b));
-	pos += scnprintf(buf + pos, bufsz - pos, "beacon_rssi_c: %u\n",
-			 le32_to_cpu(general->beacon_rssi_c));
-	pos += scnprintf(buf + pos, bufsz - pos, "beacon_energy_a: %u\n",
-			 le32_to_cpu(general->beacon_energy_a));
-	pos += scnprintf(buf + pos, bufsz - pos, "beacon_energy_b: %u\n",
-			 le32_to_cpu(general->beacon_energy_b));
-	pos += scnprintf(buf + pos, bufsz - pos, "beacon_energy_c: %u\n",
-			 le32_to_cpu(general->beacon_energy_c));
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"\t\t\tcurrent\t\t\taccumulative\n");
+	pos += scnprintf(buf + pos, bufsz - pos, "bogus_cts:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->bogus_cts),
+			 accum_general->bogus_cts);
+	pos += scnprintf(buf + pos, bufsz - pos, "bogus_ack:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->bogus_ack),
+			 accum_general->bogus_ack);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "non_bssid_frames:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->non_bssid_frames),
+			 accum_general->non_bssid_frames);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "filtered_frames:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->filtered_frames),
+			 accum_general->filtered_frames);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "non_channel_beacons:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->non_channel_beacons),
+			 accum_general->non_channel_beacons);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "channel_beacons:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->channel_beacons),
+			 accum_general->channel_beacons);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "num_missed_bcon:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->num_missed_bcon),
+			 accum_general->num_missed_bcon);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"adc_rx_saturation_time:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->adc_rx_saturation_time),
+			 accum_general->adc_rx_saturation_time);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"ina_detect_search_tm:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->ina_detection_search_time),
+			 accum_general->ina_detection_search_time);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "beacon_silence_rssi_a:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->beacon_silence_rssi_a),
+			 accum_general->beacon_silence_rssi_a);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "beacon_silence_rssi_b:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->beacon_silence_rssi_b),
+			 accum_general->beacon_silence_rssi_b);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "beacon_silence_rssi_c:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->beacon_silence_rssi_c),
+			 accum_general->beacon_silence_rssi_c);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"interference_data_flag:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->interference_data_flag),
+			 accum_general->interference_data_flag);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "channel_load:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->channel_load),
+			 accum_general->channel_load);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "dsp_false_alarms:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->dsp_false_alarms),
+			 accum_general->dsp_false_alarms);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "beacon_rssi_a:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->beacon_rssi_a),
+			 accum_general->beacon_rssi_a);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "beacon_rssi_b:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->beacon_rssi_b),
+			 accum_general->beacon_rssi_b);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "beacon_rssi_c:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->beacon_rssi_c),
+			 accum_general->beacon_rssi_c);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "beacon_energy_a:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->beacon_energy_a),
+			 accum_general->beacon_energy_a);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "beacon_energy_b:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->beacon_energy_b),
+			 accum_general->beacon_energy_b);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "beacon_energy_c:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->beacon_energy_c),
+			 accum_general->beacon_energy_c);
 
 	pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM_HT:\n");
-	pos += scnprintf(buf + pos, bufsz - pos, "plcp_err: %u\n",
-			 le32_to_cpu(ht->plcp_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "overrun_err: %u\n",
-			 le32_to_cpu(ht->overrun_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "early_overrun_err: %u\n",
-			 le32_to_cpu(ht->early_overrun_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "crc32_good: %u\n",
-			 le32_to_cpu(ht->crc32_good));
-	pos += scnprintf(buf + pos, bufsz - pos, "crc32_err: %u\n",
-			 le32_to_cpu(ht->crc32_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "mh_format_err: %u\n",
-			 le32_to_cpu(ht->mh_format_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "agg_crc32_good: %u\n",
-			 le32_to_cpu(ht->agg_crc32_good));
-	pos += scnprintf(buf + pos, bufsz - pos, "agg_mpdu_cnt: %u\n",
-			 le32_to_cpu(ht->agg_mpdu_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "agg_cnt: %u\n",
-			 le32_to_cpu(ht->agg_cnt));
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"\t\t\tcurrent\t\t\taccumulative\n");
+	pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "overrun_err:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "early_overrun_err:\t%u\t\t\t%u\n",
+			 le32_to_cpu(ht->early_overrun_err),
+			 accum_ht->early_overrun_err);
+	pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good);
+	pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "mh_format_err:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ht->mh_format_err),
+			 accum_ht->mh_format_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "agg_crc32_good:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ht->agg_crc32_good),
+			 accum_ht->agg_crc32_good);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "agg_mpdu_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ht->agg_mpdu_cnt),
+			 accum_ht->agg_mpdu_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos, "agg_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt);
 
 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 	kfree(buf);
@@ -1264,14 +1384,14 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
 	char *buf;
 	int bufsz = (sizeof(struct statistics_tx) * 24) + 250;
 	ssize_t ret;
-	struct statistics_tx *tx;
+	struct statistics_tx *tx, *accum_tx;
 
 	if (!iwl_is_alive(priv))
 		return -EAGAIN;
 
 	/* make request to uCode to retrieve statistics information */
 	mutex_lock(&priv->mutex);
-	ret = iwl_send_statistics_request(priv, 0);
+	ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
 	mutex_unlock(&priv->mutex);
 
 	if (ret) {
@@ -1290,62 +1410,107 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
 	 * might not reflect the current uCode activity
 	 */
 	tx = &priv->statistics.tx;
+	accum_tx = &priv->accum_statistics.tx;
 	pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
 	pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Tx:\n");
-	pos += scnprintf(buf + pos, bufsz - pos, "preamble: %u\n",
-			 le32_to_cpu(tx->preamble_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "rx_detected_cnt: %u\n",
-			 le32_to_cpu(tx->rx_detected_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "bt_prio_defer_cnt: %u\n",
-			 le32_to_cpu(tx->bt_prio_defer_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "bt_prio_kill_cnt: %u\n",
-			 le32_to_cpu(tx->bt_prio_kill_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "few_bytes_cnt: %u\n",
-			 le32_to_cpu(tx->few_bytes_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "cts_timeout: %u\n",
-			 le32_to_cpu(tx->cts_timeout));
-	pos += scnprintf(buf + pos, bufsz - pos, "ack_timeout: %u\n",
-			 le32_to_cpu(tx->ack_timeout));
-	pos += scnprintf(buf + pos, bufsz - pos, "expected_ack_cnt: %u\n",
-			 le32_to_cpu(tx->expected_ack_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "actual_ack_cnt: %u\n",
-			 le32_to_cpu(tx->actual_ack_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "dump_msdu_cnt: %u\n",
-			 le32_to_cpu(tx->dump_msdu_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos,
-			"burst_abort_next_frame_mismatch_cnt: %u\n",
-			le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos,
-			"burst_abort_missing_next_frame_cnt: %u\n",
-			le32_to_cpu(tx->burst_abort_missing_next_frame_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "cts_timeout_collision: %u\n",
-			 le32_to_cpu(tx->cts_timeout_collision));
-	pos += scnprintf(buf + pos, bufsz - pos,
-			"ack_or_ba_timeout_collision: %u\n",
-			le32_to_cpu(tx->ack_or_ba_timeout_collision));
-	pos += scnprintf(buf + pos, bufsz - pos, "agg ba_timeout: %u\n",
-			 le32_to_cpu(tx->agg.ba_timeout));
-	pos += scnprintf(buf + pos, bufsz - pos,
-			"agg ba_reschedule_frames: %u\n",
-			le32_to_cpu(tx->agg.ba_reschedule_frames));
-	pos += scnprintf(buf + pos, bufsz - pos,
-			"agg scd_query_agg_frame_cnt: %u\n",
-			le32_to_cpu(tx->agg.scd_query_agg_frame_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "agg scd_query_no_agg: %u\n",
-			 le32_to_cpu(tx->agg.scd_query_no_agg));
-	pos += scnprintf(buf + pos, bufsz - pos, "agg scd_query_agg: %u\n",
-			 le32_to_cpu(tx->agg.scd_query_agg));
-	pos += scnprintf(buf + pos, bufsz - pos,
-			"agg scd_query_mismatch: %u\n",
-			le32_to_cpu(tx->agg.scd_query_mismatch));
-	pos += scnprintf(buf + pos, bufsz - pos, "agg frame_not_ready: %u\n",
-			 le32_to_cpu(tx->agg.frame_not_ready));
-	pos += scnprintf(buf + pos, bufsz - pos, "agg underrun: %u\n",
-			 le32_to_cpu(tx->agg.underrun));
-	pos += scnprintf(buf + pos, bufsz - pos, "agg bt_prio_kill: %u\n",
-			 le32_to_cpu(tx->agg.bt_prio_kill));
-	pos += scnprintf(buf + pos, bufsz - pos, "agg rx_ba_rsp_cnt: %u\n",
-			 le32_to_cpu(tx->agg.rx_ba_rsp_cnt));
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"\t\t\tcurrent\t\t\taccumulative\n");
+	pos += scnprintf(buf + pos, bufsz - pos, "preamble:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->preamble_cnt),
+			 accum_tx->preamble_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "rx_detected_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->rx_detected_cnt),
+			 accum_tx->rx_detected_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "bt_prio_defer_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->bt_prio_defer_cnt),
+			 accum_tx->bt_prio_defer_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "bt_prio_kill_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->bt_prio_kill_cnt),
+			 accum_tx->bt_prio_kill_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "few_bytes_cnt:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->few_bytes_cnt),
+			 accum_tx->few_bytes_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "cts_timeout:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "ack_timeout:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->ack_timeout),
+			 accum_tx->ack_timeout);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "expected_ack_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->expected_ack_cnt),
+			 accum_tx->expected_ack_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "actual_ack_cnt:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->actual_ack_cnt),
+			 accum_tx->actual_ack_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "dump_msdu_cnt:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->dump_msdu_cnt),
+			 accum_tx->dump_msdu_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "abort_nxt_frame_mismatch:"
+			 "\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
+			 accum_tx->burst_abort_next_frame_mismatch_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "abort_missing_nxt_frame:"
+			 "\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
+			 accum_tx->burst_abort_missing_next_frame_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "cts_timeout_collision:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->cts_timeout_collision),
+			 accum_tx->cts_timeout_collision);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"ack_ba_timeout_collision:\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->ack_or_ba_timeout_collision),
+			 accum_tx->ack_or_ba_timeout_collision);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "agg ba_timeout:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->agg.ba_timeout),
+			 accum_tx->agg.ba_timeout);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"agg ba_resched_frames:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->agg.ba_reschedule_frames),
+			 accum_tx->agg.ba_reschedule_frames);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"agg scd_query_agg_frame:\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
+			 accum_tx->agg.scd_query_agg_frame_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "agg scd_query_no_agg:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->agg.scd_query_no_agg),
+			 accum_tx->agg.scd_query_no_agg);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "agg scd_query_agg:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->agg.scd_query_agg),
+			 accum_tx->agg.scd_query_agg);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"agg scd_query_mismatch:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->agg.scd_query_mismatch),
+			 accum_tx->agg.scd_query_mismatch);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "agg frame_not_ready:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->agg.frame_not_ready),
+			 accum_tx->agg.frame_not_ready);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "agg underrun:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->agg.underrun),
+			 accum_tx->agg.underrun);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "agg bt_prio_kill:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->agg.bt_prio_kill),
+			 accum_tx->agg.bt_prio_kill);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "agg rx_ba_rsp_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
+			 accum_tx->agg.rx_ba_rsp_cnt);
 
 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 	kfree(buf);
@@ -1361,16 +1526,16 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
 	char *buf;
 	int bufsz = sizeof(struct statistics_general) * 4 + 250;
 	ssize_t ret;
-	struct statistics_general *general;
-	struct statistics_dbg *dbg;
-	struct statistics_div *div;
+	struct statistics_general *general, *accum_general;
+	struct statistics_dbg *dbg, *accum_dbg;
+	struct statistics_div *div, *accum_div;
 
 	if (!iwl_is_alive(priv))
 		return -EAGAIN;
 
 	/* make request to uCode to retrieve statistics information */
 	mutex_lock(&priv->mutex);
-	ret = iwl_send_statistics_request(priv, 0);
+	ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
 	mutex_unlock(&priv->mutex);
 
 	if (ret) {
@@ -1391,34 +1556,53 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
 	general = &priv->statistics.general;
 	dbg = &priv->statistics.general.dbg;
 	div = &priv->statistics.general.div;
+	accum_general = &priv->accum_statistics.general;
+	accum_dbg = &priv->accum_statistics.general.dbg;
+	accum_div = &priv->accum_statistics.general.div;
 	pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
 	pos += scnprintf(buf + pos, bufsz - pos, "Statistics_General:\n");
-	pos += scnprintf(buf + pos, bufsz - pos, "temperature: %u\n",
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"\t\t\tcurrent\t\t\taccumulative\n");
+	pos += scnprintf(buf + pos, bufsz - pos, "temperature:\t\t\t%u\n",
 			 le32_to_cpu(general->temperature));
-	pos += scnprintf(buf + pos, bufsz - pos, "temperature_m: %u\n",
+	pos += scnprintf(buf + pos, bufsz - pos, "temperature_m:\t\t\t%u\n",
 			 le32_to_cpu(general->temperature_m));
-	pos += scnprintf(buf + pos, bufsz - pos, "burst_check: %u\n",
-			 le32_to_cpu(dbg->burst_check));
-	pos += scnprintf(buf + pos, bufsz - pos, "burst_count: %u\n",
-			 le32_to_cpu(dbg->burst_count));
-	pos += scnprintf(buf + pos, bufsz - pos, "sleep_time: %u\n",
-			 le32_to_cpu(general->sleep_time));
-	pos += scnprintf(buf + pos, bufsz - pos, "slots_out: %u\n",
-			 le32_to_cpu(general->slots_out));
-	pos += scnprintf(buf + pos, bufsz - pos, "slots_idle: %u\n",
-			 le32_to_cpu(general->slots_idle));
-	pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp: %u\n",
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "burst_check:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(dbg->burst_check),
+			 accum_dbg->burst_check);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "burst_count:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(dbg->burst_count),
+			 accum_dbg->burst_count);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "sleep_time:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->sleep_time),
+			 accum_general->sleep_time);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "slots_out:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->slots_out),
+			 accum_general->slots_out);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "slots_idle:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->slots_idle),
+			 accum_general->slots_idle);
+	pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
 			 le32_to_cpu(general->ttl_timestamp));
-	pos += scnprintf(buf + pos, bufsz - pos, "tx_on_a: %u\n",
-			 le32_to_cpu(div->tx_on_a));
-	pos += scnprintf(buf + pos, bufsz - pos, "tx_on_b: %u\n",
-			 le32_to_cpu(div->tx_on_b));
-	pos += scnprintf(buf + pos, bufsz - pos, "exec_time: %u\n",
-			 le32_to_cpu(div->exec_time));
-	pos += scnprintf(buf + pos, bufsz - pos, "probe_time: %u\n",
-			 le32_to_cpu(div->probe_time));
-	pos += scnprintf(buf + pos, bufsz - pos, "rx_enable_counter: %u\n",
-			 le32_to_cpu(general->rx_enable_counter));
+	pos += scnprintf(buf + pos, bufsz - pos, "tx_on_a:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a);
+	pos += scnprintf(buf + pos, bufsz - pos, "tx_on_b:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "exec_time:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(div->exec_time), accum_div->exec_time);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "probe_time:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(div->probe_time), accum_div->probe_time);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "rx_enable_counter:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->rx_enable_counter),
+			 accum_general->rx_enable_counter);
 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 	kfree(buf);
 	return ret;
@@ -1579,7 +1763,7 @@ static ssize_t iwl_dbgfs_tx_power_read(struct file *file,
 	else {
 		/* make request to uCode to retrieve statistics information */
 		mutex_lock(&priv->mutex);
-		ret = iwl_send_statistics_request(priv, 0);
+		ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
 		mutex_unlock(&priv->mutex);
 
 		if (ret) {
@@ -1614,8 +1798,55 @@ static ssize_t iwl_dbgfs_tx_power_read(struct file *file,
 	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
-DEBUGFS_READ_WRITE_FILE_OPS(rx_statistics);
-DEBUGFS_READ_WRITE_FILE_OPS(tx_statistics);
+static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
+						    char __user *user_buf,
+						    size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+	char buf[60];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+	u32 pwrsave_status;
+
+	pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) &
+			CSR_GP_REG_POWER_SAVE_STATUS_MSK;
+
+	pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
+	pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
+		(pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
+		(pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
+		(pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
+		"error");
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
+					 const char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	char buf[8];
+	int buf_size;
+	int clear;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) -  1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%d", &clear) != 1)
+		return -EFAULT;
+
+	/* make request to uCode to retrieve statistics information */
+	mutex_lock(&priv->mutex);
+	iwl_send_statistics_request(priv, CMD_SYNC, true);
+	mutex_unlock(&priv->mutex);
+
+	return count;
+}
+
+DEBUGFS_READ_FILE_OPS(rx_statistics);
+DEBUGFS_READ_FILE_OPS(tx_statistics);
 DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
 DEBUGFS_READ_FILE_OPS(rx_queue);
 DEBUGFS_READ_FILE_OPS(tx_queue);
@@ -1625,6 +1856,9 @@ DEBUGFS_READ_FILE_OPS(ucode_general_stats);
 DEBUGFS_READ_FILE_OPS(sensitivity);
 DEBUGFS_READ_FILE_OPS(chain_noise);
 DEBUGFS_READ_FILE_OPS(tx_power);
+DEBUGFS_READ_FILE_OPS(power_save_status);
+DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
+DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
 
 /*
  * Create the debugfs files and directories
@@ -1653,33 +1887,34 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
 	DEBUGFS_ADD_DIR(data, dbgfs->dir_drv);
 	DEBUGFS_ADD_DIR(rf, dbgfs->dir_drv);
 	DEBUGFS_ADD_DIR(debug, dbgfs->dir_drv);
-	DEBUGFS_ADD_FILE(nvm, data);
-	DEBUGFS_ADD_FILE(sram, data);
-	DEBUGFS_ADD_FILE(log_event, data);
-	DEBUGFS_ADD_FILE(stations, data);
-	DEBUGFS_ADD_FILE(channels, data);
-	DEBUGFS_ADD_FILE(status, data);
-	DEBUGFS_ADD_FILE(interrupt, data);
-	DEBUGFS_ADD_FILE(qos, data);
-#ifdef CONFIG_IWLWIFI_LEDS
-	DEBUGFS_ADD_FILE(led, data);
-#endif
-	DEBUGFS_ADD_FILE(sleep_level_override, data);
-	DEBUGFS_ADD_FILE(current_sleep_command, data);
-	DEBUGFS_ADD_FILE(thermal_throttling, data);
-	DEBUGFS_ADD_FILE(disable_ht40, data);
-	DEBUGFS_ADD_FILE(rx_statistics, debug);
-	DEBUGFS_ADD_FILE(tx_statistics, debug);
-	DEBUGFS_ADD_FILE(traffic_log, debug);
-	DEBUGFS_ADD_FILE(rx_queue, debug);
-	DEBUGFS_ADD_FILE(tx_queue, debug);
-	DEBUGFS_ADD_FILE(tx_power, debug);
+	DEBUGFS_ADD_FILE(nvm, data, S_IRUSR);
+	DEBUGFS_ADD_FILE(sram, data, S_IWUSR | S_IRUSR);
+	DEBUGFS_ADD_FILE(log_event, data, S_IWUSR);
+	DEBUGFS_ADD_FILE(stations, data, S_IRUSR);
+	DEBUGFS_ADD_FILE(channels, data, S_IRUSR);
+	DEBUGFS_ADD_FILE(status, data, S_IRUSR);
+	DEBUGFS_ADD_FILE(interrupt, data, S_IWUSR | S_IRUSR);
+	DEBUGFS_ADD_FILE(qos, data, S_IRUSR);
+	DEBUGFS_ADD_FILE(led, data, S_IRUSR);
+	DEBUGFS_ADD_FILE(sleep_level_override, data, S_IWUSR | S_IRUSR);
+	DEBUGFS_ADD_FILE(current_sleep_command, data, S_IRUSR);
+	DEBUGFS_ADD_FILE(thermal_throttling, data, S_IRUSR);
+	DEBUGFS_ADD_FILE(disable_ht40, data, S_IWUSR | S_IRUSR);
+	DEBUGFS_ADD_FILE(rx_statistics, debug, S_IRUSR);
+	DEBUGFS_ADD_FILE(tx_statistics, debug, S_IRUSR);
+	DEBUGFS_ADD_FILE(traffic_log, debug, S_IWUSR | S_IRUSR);
+	DEBUGFS_ADD_FILE(rx_queue, debug, S_IRUSR);
+	DEBUGFS_ADD_FILE(tx_queue, debug, S_IRUSR);
+	DEBUGFS_ADD_FILE(tx_power, debug, S_IRUSR);
+	DEBUGFS_ADD_FILE(power_save_status, debug, S_IRUSR);
+	DEBUGFS_ADD_FILE(clear_ucode_statistics, debug, S_IWUSR);
+	DEBUGFS_ADD_FILE(clear_traffic_statistics, debug, S_IWUSR);
 	if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
-		DEBUGFS_ADD_FILE(ucode_rx_stats, debug);
-		DEBUGFS_ADD_FILE(ucode_tx_stats, debug);
-		DEBUGFS_ADD_FILE(ucode_general_stats, debug);
-		DEBUGFS_ADD_FILE(sensitivity, debug);
-		DEBUGFS_ADD_FILE(chain_noise, debug);
+		DEBUGFS_ADD_FILE(ucode_rx_stats, debug, S_IRUSR);
+		DEBUGFS_ADD_FILE(ucode_tx_stats, debug, S_IRUSR);
+		DEBUGFS_ADD_FILE(ucode_general_stats, debug, S_IRUSR);
+		DEBUGFS_ADD_FILE(sensitivity, debug, S_IRUSR);
+		DEBUGFS_ADD_FILE(chain_noise, debug, S_IRUSR);
 	}
 	DEBUGFS_ADD_BOOL(disable_sensitivity, rf, &priv->disable_sens_cal);
 	DEBUGFS_ADD_BOOL(disable_chain_noise, rf,
@@ -1716,9 +1951,7 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
 	DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_status);
 	DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_interrupt);
 	DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_qos);
-#ifdef CONFIG_IWLWIFI_LEDS
 	DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_led);
-#endif
 	DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_thermal_throttling);
 	DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_disable_ht40);
 	DEBUGFS_REMOVE(priv->dbgfs->dir_data);
@@ -1728,6 +1961,11 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
 	DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_rx_queue);
 	DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_queue);
 	DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_power);
+	DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_power_save_status);
+	DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
+			file_clear_ucode_statistics);
+	DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
+			file_clear_traffic_statistics);
 	if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
 		DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
 			file_ucode_rx_stats);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 028d50599550..2673e9a4db92 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -43,7 +43,6 @@
 #include "iwl-debug.h"
 #include "iwl-4965-hw.h"
 #include "iwl-3945-hw.h"
-#include "iwl-3945-led.h"
 #include "iwl-led.h"
 #include "iwl-power.h"
 #include "iwl-agn-rs.h"
@@ -53,21 +52,23 @@ extern struct iwl_cfg iwl4965_agn_cfg;
 extern struct iwl_cfg iwl5300_agn_cfg;
 extern struct iwl_cfg iwl5100_agn_cfg;
 extern struct iwl_cfg iwl5350_agn_cfg;
-extern struct iwl_cfg iwl5100_bg_cfg;
+extern struct iwl_cfg iwl5100_bgn_cfg;
 extern struct iwl_cfg iwl5100_abg_cfg;
 extern struct iwl_cfg iwl5150_agn_cfg;
-extern struct iwl_cfg iwl6000h_2agn_cfg;
+extern struct iwl_cfg iwl5150_abg_cfg;
 extern struct iwl_cfg iwl6000i_2agn_cfg;
+extern struct iwl_cfg iwl6000i_2abg_cfg;
+extern struct iwl_cfg iwl6000i_2bg_cfg;
 extern struct iwl_cfg iwl6000_3agn_cfg;
 extern struct iwl_cfg iwl6050_2agn_cfg;
-extern struct iwl_cfg iwl6050_3agn_cfg;
+extern struct iwl_cfg iwl6050_2abg_cfg;
 extern struct iwl_cfg iwl1000_bgn_cfg;
+extern struct iwl_cfg iwl1000_bg_cfg;
 
 struct iwl_tx_queue;
 
 /* shared structures from iwl-5000.c */
 extern struct iwl_mod_params iwl50_mod_params;
-extern struct iwl_ops iwl5000_ops;
 extern struct iwl_ucode_ops iwl5000_ucode;
 extern struct iwl_lib_ops iwl5000_lib;
 extern struct iwl_hcmd_ops iwl5000_hcmd;
@@ -81,9 +82,6 @@ extern void iwl5000_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
 				    __le32 *tx_flags);
 extern int iwl5000_calc_rssi(struct iwl_priv *priv,
 			     struct iwl_rx_phy_res *rx_resp);
-extern int iwl5000_apm_init(struct iwl_priv *priv);
-extern void iwl5000_apm_stop(struct iwl_priv *priv);
-extern int iwl5000_apm_reset(struct iwl_priv *priv);
 extern void iwl5000_nic_config(struct iwl_priv *priv);
 extern u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv);
 extern const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv,
@@ -144,12 +142,13 @@ extern void iwl5000_temperature(struct iwl_priv *priv);
 #define	DEFAULT_LONG_RETRY_LIMIT  4U
 
 struct iwl_rx_mem_buffer {
-	dma_addr_t real_dma_addr;
-	dma_addr_t aligned_dma_addr;
-	struct sk_buff *skb;
+	dma_addr_t page_dma;
+	struct page *page;
 	struct list_head list;
 };
 
+#define rxb_addr(r) page_address(r->page)
+
 /* defined below */
 struct iwl_device_cmd;
 
@@ -165,7 +164,7 @@ struct iwl_cmd_meta {
 	 */
 	void (*callback)(struct iwl_priv *priv,
 			 struct iwl_device_cmd *cmd,
-			 struct sk_buff *skb);
+			 struct iwl_rx_packet *pkt);
 
 	/* The CMD_SIZE_HUGE flag bit indicates that the command
 	 * structure is stored at the end of the shared queue memory. */
@@ -293,9 +292,6 @@ struct iwl_channel_info {
 
 	/* HT40 channel info */
 	s8 ht40_max_power_avg;	/* (dBm) regul. eeprom, normal Tx, any rate */
-	s8 ht40_curr_txpow;	/* (dBm) regulatory/spectrum/user (not h/w) */
-	s8 ht40_min_power;	/* always 0 */
-	s8 ht40_scan_power;	/* (dBm) eeprom, direct scans, any rate */
 	u8 ht40_flags;		/* flags copied from EEPROM */
 	u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
 
@@ -321,6 +317,13 @@ struct iwl_channel_info {
  * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
 #define IWL_MIN_NUM_QUEUES	10
 
+/*
+ * Queue #4 is the command queue for 3945/4965/5x00/1000/6x00,
+ * the driver maps it into the appropriate device FIFO for the
+ * uCode.
+ */
+#define IWL_CMD_QUEUE_NUM	4
+
 /* Power management (not Tx power) structures */
 
 enum iwl_pwr_src {
@@ -356,7 +359,14 @@ enum {
 	CMD_WANT_SKB = (1 << 2),
 };
 
-#define IWL_CMD_MAX_PAYLOAD 320
+#define DEF_CMD_PAYLOAD_SIZE 320
+
+/*
+ * IWL_LINK_HDR_MAX should include ieee80211_hdr, radiotap header,
+ * SNAP header and alignment. It should also be big enough for 802.11
+ * control frames.
+ */
+#define IWL_LINK_HDR_MAX 64
 
 /**
  * struct iwl_device_cmd
@@ -373,7 +383,8 @@ struct iwl_device_cmd {
 		u16 val16;
 		u32 val32;
 		struct iwl_tx_cmd tx;
-		u8 payload[IWL_CMD_MAX_PAYLOAD];
+		struct iwl6000_channel_switch_cmd chswitch;
+		u8 payload[DEF_CMD_PAYLOAD_SIZE];
 	} __attribute__ ((packed)) cmd;
 } __attribute__ ((packed));
 
@@ -382,21 +393,15 @@ struct iwl_device_cmd {
 
 struct iwl_host_cmd {
 	const void *data;
-	struct sk_buff *reply_skb;
+	unsigned long reply_page;
 	void (*callback)(struct iwl_priv *priv,
 			 struct iwl_device_cmd *cmd,
-			 struct sk_buff *skb);
+			 struct iwl_rx_packet *pkt);
 	u32 flags;
 	u16 len;
 	u8 id;
 };
 
-/*
- * RX related structures and functions
- */
-#define RX_FREE_BUFFERS 64
-#define RX_LOW_WATERMARK 8
-
 #define SUP_RATE_11A_MAX_NUM_CHANNELS  8
 #define SUP_RATE_11B_MAX_NUM_CHANNELS  4
 #define SUP_RATE_11G_MAX_NUM_CHANNELS  12
@@ -502,12 +507,11 @@ union iwl_ht_rate_supp {
 #define CFG_HT_MPDU_DENSITY_4USEC   (0x5)
 #define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
 
-struct iwl_ht_info {
+struct iwl_ht_config {
 	/* self configuration data */
-	u8 is_ht;
-	u8 supported_chan_width;
-	u8 sm_ps;
-	struct ieee80211_mcs_info mcs;
+	bool is_ht;
+	bool is_40mhz;
+	bool single_chain_sufficient;
 	/* BSS related data */
 	u8 extension_chan_offset;
 	u8 ht_protection;
@@ -541,26 +545,27 @@ struct iwl_qos_info {
 	struct iwl_qosparam_cmd def_qos_parm;
 };
 
-#define STA_PS_STATUS_WAKE             0
-#define STA_PS_STATUS_SLEEP            1
-
-
-struct iwl3945_station_entry {
-	struct iwl3945_addsta_cmd sta;
-	struct iwl_tid_data tid[MAX_TID_COUNT];
-	u8 used;
-	u8 ps_status;
-	struct iwl_hw_key keyinfo;
-};
-
 struct iwl_station_entry {
 	struct iwl_addsta_cmd sta;
 	struct iwl_tid_data tid[MAX_TID_COUNT];
 	u8 used;
-	u8 ps_status;
 	struct iwl_hw_key keyinfo;
 };
 
+/*
+ * iwl_station_priv: Driver's private station information
+ *
+ * When mac80211 creates a station it reserves some space (hw->sta_data_size)
+ * in the structure for use by driver. This structure is places in that
+ * space.
+ */
+struct iwl_station_priv {
+	struct iwl_lq_sta lq_sta;
+	atomic_t pending_frames;
+	bool client;
+	bool asleep;
+};
+
 /* one for each uCode image (inst/data, boot/init/runtime) */
 struct fw_desc {
 	void *v_addr;		/* access by driver */
@@ -622,6 +627,10 @@ struct iwl_sensitivity_ranges {
 	u16 auto_corr_max_cck_mrc;
 	u16 auto_corr_min_cck;
 	u16 auto_corr_min_cck_mrc;
+
+	u16 barker_corr_th_min;
+	u16 barker_corr_th_min_mrc;
+	u16 nrg_th_cca;
 };
 
 
@@ -639,7 +648,7 @@ struct iwl_sensitivity_ranges {
  * @valid_tx/rx_ant: usable antennas
  * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
  * @max_rxq_log: Log-base-2 of max_rxq_size
- * @rx_buf_size: Rx buffer size
+ * @rx_page_order: Rx buffer page order
  * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
  * @max_stations:
  * @bcast_sta_id:
@@ -662,9 +671,8 @@ struct iwl_hw_params {
 	u8  valid_rx_ant;
 	u16 max_rxq_size;
 	u16 max_rxq_log;
-	u32 rx_buf_size;
+	u32 rx_page_order;
 	u32 rx_wrt_ptr_reg;
-	u32 max_pkt_size;
 	u8  max_stations;
 	u8  bcast_sta_id;
 	u8  ht40_channel;
@@ -711,7 +719,11 @@ static inline int iwl_queue_used(const struct iwl_queue *q, int i)
 
 static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge)
 {
-	/* This is for scan command, the big buffer at end of command array */
+	/*
+	 * This is for init calibration result and scan command which
+	 * required buffer > TFD_MAX_PAYLOAD_SIZE,
+	 * the big buffer at end of command array
+	 */
 	if (is_huge)
 		return q->n_window;	/* must be power of 2 */
 
@@ -726,9 +738,6 @@ struct iwl_dma_ptr {
 	size_t size;
 };
 
-#define IWL_CHANNEL_WIDTH_20MHZ   0
-#define IWL_CHANNEL_WIDTH_40MHZ   1
-
 #define IWL_OPERATION_MODE_AUTO     0
 #define IWL_OPERATION_MODE_HT_ONLY  1
 #define IWL_OPERATION_MODE_MIXED    2
@@ -741,7 +750,8 @@ struct iwl_dma_ptr {
 
 /* Sensitivity and chain noise calibration */
 #define INITIALIZATION_VALUE		0xFFFF
-#define CAL_NUM_OF_BEACONS		20
+#define IWL4965_CAL_NUM_BEACONS		20
+#define IWL_CAL_NUM_BEACONS		16
 #define MAXIMUM_ALLOWED_PATHLOSS	15
 
 #define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
@@ -845,6 +855,10 @@ struct iwl_sensitivity_data {
 	s32 nrg_auto_corr_silence_diff;
 	u32 num_in_cck_no_fa;
 	u32 nrg_th_ofdm;
+
+	u16 barker_corr_th_min;
+	u16 barker_corr_th_min_mrc;
+	u16 nrg_th_cca;
 };
 
 /* Chain noise (differential Rx gain) calib data */
@@ -894,13 +908,11 @@ enum iwl_access_mode {
 /**
  * enum iwl_pa_type - Power Amplifier type
  * @IWL_PA_SYSTEM:  based on uCode configuration
- * @IWL_PA_HYBRID: use both Internal and external PA
  * @IWL_PA_INTERNAL: use Internal only
  */
 enum iwl_pa_type {
 	IWL_PA_SYSTEM = 0,
-	IWL_PA_HYBRID = 1,
-	IWL_PA_INTERNAL = 2,
+	IWL_PA_INTERNAL = 1,
 };
 
 /* interrupt statistics */
@@ -961,7 +973,16 @@ struct traffic_stats {
 };
 #endif
 
-#define IWL_MAX_NUM_QUEUES	20 /* FIXME: do dynamic allocation */
+/*
+ * iwl_switch_rxon: "channel switch" structure
+ *
+ * @ switch_in_progress: channel switch in progress
+ * @ channel: new channel
+ */
+struct iwl_switch_rxon {
+	bool switch_in_progress;
+	__le16 channel;
+};
 
 struct iwl_priv {
 
@@ -976,7 +997,7 @@ struct iwl_priv {
 	int frames_count;
 
 	enum ieee80211_band band;
-	int alloc_rxb_skb;
+	int alloc_rxb_page;
 
 	void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
 				       struct iwl_rx_mem_buffer *rxb);
@@ -1056,21 +1077,18 @@ struct iwl_priv {
 	const struct iwl_rxon_cmd active_rxon;
 	struct iwl_rxon_cmd staging_rxon;
 
-	struct iwl_rxon_cmd recovery_rxon;
+	struct iwl_switch_rxon switch_rxon;
 
 	/* 1st responses from initialize and runtime uCode images.
 	 * 4965's initialize alive response contains some calibration data. */
 	struct iwl_init_alive_resp card_alive_init;
 	struct iwl_alive_resp card_alive;
 
-#ifdef CONFIG_IWLWIFI_LEDS
 	unsigned long last_blink_time;
 	u8 last_blink_rate;
 	u8 allow_blinking;
 	u64 led_tpt;
-	struct iwl_led led[IWL_LED_TRG_MAX];
-	unsigned int rxtxpackets;
-#endif
+
 	u16 active_rate;
 	u16 active_rate_basic;
 
@@ -1080,11 +1098,10 @@ struct iwl_priv {
 	struct iwl_chain_noise_data chain_noise_data;
 	__le16 sensitivity_tbl[HD_TABLE_SIZE];
 
-	struct iwl_ht_info current_ht_config;
+	struct iwl_ht_config current_ht_config;
 	u8 last_phy_res[100];
 
 	/* Rate scaling data */
-	s8 data_retry_limit;
 	u8 retry_rate;
 
 	wait_queue_head_t wait_command_queue;
@@ -1093,7 +1110,7 @@ struct iwl_priv {
 
 	/* Rx and Tx DMA processing queues */
 	struct iwl_rx_queue rxq;
-	struct iwl_tx_queue txq[IWL_MAX_NUM_QUEUES];
+	struct iwl_tx_queue *txq;
 	unsigned long txq_ctx_active_msk;
 	struct iwl_dma_ptr  kw;	/* keep warm address */
 	struct iwl_dma_ptr  scd_bc_tbls;
@@ -1116,7 +1133,9 @@ struct iwl_priv {
 	struct iwl_tt_mgmt thermal_throttle;
 
 	struct iwl_notif_statistics statistics;
-	unsigned long last_statistics_time;
+#ifdef CONFIG_IWLWIFI_DEBUG
+	struct iwl_notif_statistics accum_statistics;
+#endif
 
 	/* context information */
 	u16 rates_mask;
@@ -1216,6 +1235,7 @@ struct iwl_priv {
 	/* TX Power */
 	s8 tx_power_user_lmt;
 	s8 tx_power_device_lmt;
+	s8 tx_power_lmt_in_half_dbm; /* max tx power in half-dBm format */
 
 
 #ifdef CONFIG_IWLWIFI_DEBUG
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
new file mode 100644
index 000000000000..e7d88d1da15d
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -0,0 +1,14 @@
+#include <linux/module.h>
+
+/* sparse doesn't like tracepoint macros */
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "iwl-devtrace.h"
+
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite8);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ioread32);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
+#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
new file mode 100644
index 000000000000..21361968ab7e
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -0,0 +1,197 @@
+#if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
+#define __IWLWIFI_DEVICE_TRACE
+
+#include <linux/tracepoint.h>
+#include "iwl-dev.h"
+
+#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__)
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif
+
+#define PRIV_ENTRY	__field(struct iwl_priv *, priv)
+#define PRIV_ASSIGN	__entry->priv = priv
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi_io
+
+TRACE_EVENT(iwlwifi_dev_ioread32,
+	TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
+	TP_ARGS(priv, offs, val),
+	TP_STRUCT__entry(
+		PRIV_ENTRY
+		__field(u32, offs)
+		__field(u32, val)
+	),
+	TP_fast_assign(
+		PRIV_ASSIGN;
+		__entry->offs = offs;
+		__entry->val = val;
+	),
+	TP_printk("[%p] read io[%#x] = %#x", __entry->priv, __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(iwlwifi_dev_iowrite8,
+	TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val),
+	TP_ARGS(priv, offs, val),
+	TP_STRUCT__entry(
+		PRIV_ENTRY
+		__field(u32, offs)
+		__field(u8, val)
+	),
+	TP_fast_assign(
+		PRIV_ASSIGN;
+		__entry->offs = offs;
+		__entry->val = val;
+	),
+	TP_printk("[%p] write io[%#x] = %#x)", __entry->priv, __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(iwlwifi_dev_iowrite32,
+	TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
+	TP_ARGS(priv, offs, val),
+	TP_STRUCT__entry(
+		PRIV_ENTRY
+		__field(u32, offs)
+		__field(u32, val)
+	),
+	TP_fast_assign(
+		PRIV_ASSIGN;
+		__entry->offs = offs;
+		__entry->val = val;
+	),
+	TP_printk("[%p] write io[%#x] = %#x)", __entry->priv, __entry->offs, __entry->val)
+);
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi
+
+TRACE_EVENT(iwlwifi_dev_hcmd,
+	TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags),
+	TP_ARGS(priv, hcmd, len, flags),
+	TP_STRUCT__entry(
+		PRIV_ENTRY
+		__dynamic_array(u8, hcmd, len)
+		__field(u32, flags)
+	),
+	TP_fast_assign(
+		PRIV_ASSIGN;
+		memcpy(__get_dynamic_array(hcmd), hcmd, len);
+		__entry->flags = flags;
+	),
+	TP_printk("[%p] hcmd %#.2x (%ssync)",
+		  __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0],
+		  __entry->flags & CMD_ASYNC ? "a" : "")
+);
+
+TRACE_EVENT(iwlwifi_dev_rx,
+	TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
+	TP_ARGS(priv, rxbuf, len),
+	TP_STRUCT__entry(
+		PRIV_ENTRY
+		__dynamic_array(u8, rxbuf, len)
+	),
+	TP_fast_assign(
+		PRIV_ASSIGN;
+		memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
+	),
+	TP_printk("[%p] RX cmd %#.2x",
+		  __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4])
+);
+
+TRACE_EVENT(iwlwifi_dev_tx,
+	TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
+		 void *buf0, size_t buf0_len,
+		 void *buf1, size_t buf1_len),
+	TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
+	TP_STRUCT__entry(
+		PRIV_ENTRY
+
+		__field(size_t, framelen)
+		__dynamic_array(u8, tfd, tfdlen)
+
+		/*
+		 * Do not insert between or below these items,
+		 * we want to keep the frame together (except
+		 * for the possible padding).
+		 */
+		__dynamic_array(u8, buf0, buf0_len)
+		__dynamic_array(u8, buf1, buf1_len)
+	),
+	TP_fast_assign(
+		PRIV_ASSIGN;
+		__entry->framelen = buf0_len + buf1_len;
+		memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
+		memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
+		memcpy(__get_dynamic_array(buf1), buf1, buf0_len);
+	),
+	TP_printk("[%p] TX %.2x (%zu bytes)",
+		  __entry->priv,
+		  ((u8 *)__get_dynamic_array(buf0))[0],
+		  __entry->framelen)
+);
+
+TRACE_EVENT(iwlwifi_dev_ucode_error,
+	TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time,
+		 u32 data1, u32 data2, u32 line, u32 blink1,
+		 u32 blink2, u32 ilink1, u32 ilink2),
+	TP_ARGS(priv, desc, time, data1, data2, line,
+		blink1, blink2, ilink1, ilink2),
+	TP_STRUCT__entry(
+		PRIV_ENTRY
+		__field(u32, desc)
+		__field(u32, time)
+		__field(u32, data1)
+		__field(u32, data2)
+		__field(u32, line)
+		__field(u32, blink1)
+		__field(u32, blink2)
+		__field(u32, ilink1)
+		__field(u32, ilink2)
+	),
+	TP_fast_assign(
+		PRIV_ASSIGN;
+		__entry->desc = desc;
+		__entry->time = time;
+		__entry->data1 = data1;
+		__entry->data2 = data2;
+		__entry->line = line;
+		__entry->blink1 = blink1;
+		__entry->blink2 = blink2;
+		__entry->ilink1 = ilink1;
+		__entry->ilink2 = ilink2;
+	),
+	TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, "
+		  "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X",
+		  __entry->priv, __entry->desc, __entry->time, __entry->data1,
+		  __entry->data2, __entry->line, __entry->blink1,
+		  __entry->blink2, __entry->ilink1, __entry->ilink2)
+);
+
+TRACE_EVENT(iwlwifi_dev_ucode_event,
+	TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
+	TP_ARGS(priv, time, data, ev),
+	TP_STRUCT__entry(
+		PRIV_ENTRY
+
+		__field(u32, time)
+		__field(u32, data)
+		__field(u32, ev)
+	),
+	TP_fast_assign(
+		PRIV_ASSIGN;
+		__entry->time = time;
+		__entry->data = data;
+		__entry->ev = ev;
+	),
+	TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
+		  __entry->priv, __entry->time, __entry->data, __entry->ev)
+);
+#endif /* __IWLWIFI_DEVICE_TRACE */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE iwl-devtrace
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index e14c9952a935..3946e5c03f81 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -215,12 +215,35 @@ static const struct iwl_txpwr_section enhinfo[] = {
 
 int iwlcore_eeprom_verify_signature(struct iwl_priv *priv)
 {
-	u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
-	if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
-		IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
-		return -ENOENT;
+	u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
+	int ret = 0;
+
+	IWL_DEBUG_INFO(priv, "EEPROM signature=0x%08x\n", gp);
+	switch (gp) {
+	case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
+		if (priv->nvm_device_type != NVM_DEVICE_TYPE_OTP) {
+			IWL_ERR(priv, "EEPROM with bad signature: 0x%08x\n",
+				gp);
+			ret = -ENOENT;
+		}
+		break;
+	case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
+	case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
+		if (priv->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) {
+			IWL_ERR(priv, "OTP with bad signature: 0x%08x\n", gp);
+			ret = -ENOENT;
+		}
+		break;
+	case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
+	default:
+		IWL_ERR(priv, "bad EEPROM/OTP signature, type=%s, "
+			"EEPROM_GP=0x%08x\n",
+			(priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
+			? "OTP" : "EEPROM", gp);
+		ret = -ENOENT;
+		break;
 	}
-	return 0;
+	return ret;
 }
 EXPORT_SYMBOL(iwlcore_eeprom_verify_signature);
 
@@ -283,7 +306,8 @@ int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv)
 			    CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
 
 		/* See if we got it */
-		ret = iwl_poll_direct_bit(priv, CSR_HW_IF_CONFIG_REG,
+		ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
+				CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
 				CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
 				EEPROM_SEM_TIMEOUT);
 		if (ret >= 0) {
@@ -322,7 +346,8 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
 		     CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
 
 	/* wait for clock to be ready */
-	ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
+	ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
+				  CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
 				  CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
 				  25000);
 	if (ret < 0)
@@ -333,6 +358,14 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
 		udelay(5);
 		iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG,
 				    APMG_PS_CTRL_VAL_RESET_REQ);
+
+		/*
+		 * CSR auto clock gate disable bit -
+		 * this is only applicable for HW with OTP shadow RAM
+		 */
+		if (priv->cfg->shadow_ram_support)
+			iwl_set_bit(priv, CSR_DBG_LINK_PWR_MGMT_REG,
+				CSR_RESET_LINK_PWR_MGMT_DISABLED);
 	}
 	return ret;
 }
@@ -345,7 +378,8 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
 
 	_iwl_write32(priv, CSR_EEPROM_REG,
 		     CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
-	ret = iwl_poll_direct_bit(priv, CSR_EEPROM_REG,
+	ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
+				  CSR_EEPROM_REG_READ_VALID_MSK,
 				  CSR_EEPROM_REG_READ_VALID_MSK,
 				  IWL_EEPROM_ACCESS_TIMEOUT);
 	if (ret < 0) {
@@ -484,6 +518,11 @@ int iwl_eeprom_init(struct iwl_priv *priv)
 	}
 	e = (u16 *)priv->eeprom;
 
+	if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
+		/* OTP reads require powered-up chip */
+		priv->cfg->ops->lib->apm_ops.init(priv);
+	}
+
 	ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv);
 	if (ret < 0) {
 		IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
@@ -498,7 +537,9 @@ int iwl_eeprom_init(struct iwl_priv *priv)
 		ret = -ENOENT;
 		goto err;
 	}
+
 	if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
+
 		ret = iwl_init_otp_access(priv);
 		if (ret) {
 			IWL_ERR(priv, "Failed to initialize OTP access.\n");
@@ -529,6 +570,13 @@ int iwl_eeprom_init(struct iwl_priv *priv)
 			e[cache_addr / 2] = eeprom_data;
 			cache_addr += sizeof(u16);
 		}
+
+		/*
+		 * Now that OTP reads are complete, reset chip to save
+		 *   power until we load uCode during "up".
+		 */
+		priv->cfg->ops->lib->apm_ops.stop(priv);
+
 	} else {
 		/* eeprom is an array of 16bit values */
 		for (addr = 0; addr < sz; addr += sizeof(u16)) {
@@ -537,7 +585,8 @@ int iwl_eeprom_init(struct iwl_priv *priv)
 			_iwl_write32(priv, CSR_EEPROM_REG,
 				     CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
 
-			ret = iwl_poll_direct_bit(priv, CSR_EEPROM_REG,
+			ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
+						  CSR_EEPROM_REG_READ_VALID_MSK,
 						  CSR_EEPROM_REG_READ_VALID_MSK,
 						  IWL_EEPROM_ACCESS_TIMEOUT);
 			if (ret < 0) {
@@ -705,9 +754,6 @@ static int iwl_mod_ht40_chan_info(struct iwl_priv *priv,
 
 	ch_info->ht40_eeprom = *eeprom_ch;
 	ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
-	ch_info->ht40_curr_txpow = eeprom_ch->max_power_avg;
-	ch_info->ht40_min_power = 0;
-	ch_info->ht40_scan_power = eeprom_ch->max_power_avg;
 	ch_info->ht40_flags = eeprom_ch->flags;
 	ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
 
@@ -719,7 +765,8 @@ static int iwl_mod_ht40_chan_info(struct iwl_priv *priv,
  *     find the highest tx power from all chains for the channel
  */
 static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
-		struct iwl_eeprom_enhanced_txpwr *enhanced_txpower, int element)
+		struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
+		int element, s8 *max_txpower_in_half_dbm)
 {
 	s8 max_txpower_avg = 0; /* (dBm) */
 
@@ -751,10 +798,14 @@ static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
 	    (enhanced_txpower[element].mimo3_max > max_txpower_avg))
 		max_txpower_avg = enhanced_txpower[element].mimo3_max;
 
-	/* max. tx power in EEPROM is in 1/2 dBm format
-	 * convert from 1/2 dBm to dBm
+	/*
+	 * max. tx power in EEPROM is in 1/2 dBm format
+	 * convert from 1/2 dBm to dBm (round-up convert)
+	 * but we also do not want to loss 1/2 dBm resolution which
+	 * will impact performance
 	 */
-	return max_txpower_avg >> 1;
+	*max_txpower_in_half_dbm = max_txpower_avg;
+	return (max_txpower_avg & 0x01) + (max_txpower_avg >> 1);
 }
 
 /**
@@ -763,7 +814,7 @@ static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
  */
 static s8 iwl_update_common_txpower(struct iwl_priv *priv,
 		struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
-		int section, int element)
+		int section, int element, s8 *max_txpower_in_half_dbm)
 {
 	struct iwl_channel_info *ch_info;
 	int ch;
@@ -777,25 +828,25 @@ static s8 iwl_update_common_txpower(struct iwl_priv *priv,
 	if (element == EEPROM_TXPOWER_COMMON_HT40_INDEX)
 		is_ht40 = true;
 	max_txpower_avg =
-		iwl_get_max_txpower_avg(priv, enhanced_txpower, element);
+		iwl_get_max_txpower_avg(priv, enhanced_txpower,
+					element, max_txpower_in_half_dbm);
+
 	ch_info = priv->channel_info;
 
 	for (ch = 0; ch < priv->channel_count; ch++) {
 		/* find matching band and update tx power if needed */
 		if ((ch_info->band == enhinfo[section].band) &&
-		    (ch_info->max_power_avg < max_txpower_avg) && (!is_ht40)) {
+		    (ch_info->max_power_avg < max_txpower_avg) &&
+		    (!is_ht40)) {
 			/* Update regulatory-based run-time data */
 			ch_info->max_power_avg = ch_info->curr_txpow =
-			    max_txpower_avg;
+				max_txpower_avg;
 			ch_info->scan_power = max_txpower_avg;
 		}
 		if ((ch_info->band == enhinfo[section].band) && is_ht40 &&
-		    ch_info->ht40_max_power_avg &&
 		    (ch_info->ht40_max_power_avg < max_txpower_avg)) {
 			/* Update regulatory-based run-time data */
 			ch_info->ht40_max_power_avg = max_txpower_avg;
-			ch_info->ht40_curr_txpow = max_txpower_avg;
-			ch_info->ht40_scan_power = max_txpower_avg;
 		}
 		ch_info++;
 	}
@@ -808,7 +859,7 @@ static s8 iwl_update_common_txpower(struct iwl_priv *priv,
  */
 static s8 iwl_update_channel_txpower(struct iwl_priv *priv,
 		struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
-		int section, int element)
+		int section, int element, s8 *max_txpower_in_half_dbm)
 {
 	struct iwl_channel_info *ch_info;
 	int ch;
@@ -817,7 +868,8 @@ static s8 iwl_update_channel_txpower(struct iwl_priv *priv,
 
 	channel = enhinfo[section].iwl_eeprom_section_channel[element];
 	max_txpower_avg =
-		iwl_get_max_txpower_avg(priv, enhanced_txpower, element);
+		iwl_get_max_txpower_avg(priv, enhanced_txpower,
+					element, max_txpower_in_half_dbm);
 
 	ch_info = priv->channel_info;
 	for (ch = 0; ch < priv->channel_count; ch++) {
@@ -831,12 +883,9 @@ static s8 iwl_update_channel_txpower(struct iwl_priv *priv,
 				ch_info->scan_power = max_txpower_avg;
 			}
 			if ((enhinfo[section].is_ht40) &&
-			    (ch_info->ht40_max_power_avg) &&
 			    (ch_info->ht40_max_power_avg < max_txpower_avg)) {
 				/* Update regulatory-based run-time data */
 				ch_info->ht40_max_power_avg = max_txpower_avg;
-				ch_info->ht40_curr_txpow = max_txpower_avg;
-				ch_info->ht40_scan_power = max_txpower_avg;
 			}
 			break;
 		}
@@ -855,6 +904,7 @@ void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv)
 	struct iwl_eeprom_enhanced_txpwr *enhanced_txpower;
 	u32 offset;
 	s8 max_txpower_avg; /* (dBm) */
+	s8 max_txpower_in_half_dbm; /* (half-dBm) */
 
 	/* Loop through all the sections
 	 * adjust bands and channel's max tx power
@@ -867,20 +917,43 @@ void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv)
 		enhanced_txpower = (struct iwl_eeprom_enhanced_txpwr *)
 				iwl_eeprom_query_addr(priv, offset);
 
+		/*
+		 * check for valid entry -
+		 * different version of EEPROM might contain different set
+		 * of enhanced tx power table
+		 * always check for valid entry before process
+		 * the information
+		 */
+		if (!enhanced_txpower->common || enhanced_txpower->reserved)
+			continue;
+
 		for (element = 0; element < eeprom_section_count; element++) {
 			if (enhinfo[section].is_common)
 				max_txpower_avg =
 					iwl_update_common_txpower(priv,
-					enhanced_txpower, section, element);
+						enhanced_txpower, section,
+						element,
+						&max_txpower_in_half_dbm);
 			else
 				max_txpower_avg =
 					iwl_update_channel_txpower(priv,
-					enhanced_txpower, section, element);
+						enhanced_txpower, section,
+						element,
+						&max_txpower_in_half_dbm);
 
 			/* Update the tx_power_user_lmt to the highest power
 			 * supported by any channel */
 			if (max_txpower_avg > priv->tx_power_user_lmt)
 				priv->tx_power_user_lmt = max_txpower_avg;
+
+			/*
+			 * Update the tx_power_lmt_in_half_dbm to
+			 * the highest power supported by any channel
+			 */
+			if (max_txpower_in_half_dbm >
+			    priv->tx_power_lmt_in_half_dbm)
+				priv->tx_power_lmt_in_half_dbm =
+					max_txpower_in_half_dbm;
 		}
 	}
 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 80b9e45d9b9c..5cd2b66bbe45 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -63,6 +63,8 @@
 #ifndef __iwl_eeprom_h__
 #define __iwl_eeprom_h__
 
+#include <net/mac80211.h>
+
 struct iwl_priv;
 
 /*
@@ -125,19 +127,21 @@ struct iwl_eeprom_channel {
  *    Enhanced regulatory tx power portion of eeprom image can be broken down
  *    into individual structures; each one is 8 bytes in size and contain the
  *    following information
+ * @common: (desc + channel) not used by driver, should _NOT_ be "zero"
  * @chain_a_max_pwr: chain a max power in 1/2 dBm
  * @chain_b_max_pwr: chain b max power in 1/2 dBm
  * @chain_c_max_pwr: chain c max power in 1/2 dBm
+ * @reserved: not used, should be "zero"
  * @mimo2_max_pwr: mimo2 max power in 1/2 dBm
  * @mimo3_max_pwr: mimo3 max power in 1/2 dBm
  *
  */
 struct iwl_eeprom_enhanced_txpwr {
-	u16 reserved;
+	u16 common;
 	s8 chain_a_max;
 	s8 chain_b_max;
 	s8 chain_c_max;
-	s8 reserved1;
+	s8 reserved;
 	s8 mimo2_max;
 	s8 mimo3_max;
 } __attribute__ ((packed));
@@ -256,6 +260,15 @@ struct iwl_eeprom_enhanced_txpwr {
 #define EEPROM_5050_TX_POWER_VERSION    (4)
 #define EEPROM_5050_EEPROM_VERSION	(0x21E)
 
+/* 1000 Specific */
+#define EEPROM_1000_EEPROM_VERSION	(0x15C)
+
+/* 6x00 Specific */
+#define EEPROM_6000_EEPROM_VERSION	(0x434)
+
+/* 6x50 Specific */
+#define EEPROM_6050_EEPROM_VERSION	(0x532)
+
 /* OTP */
 /* lower blocks contain EEPROM image and calibration data */
 #define OTP_LOW_IMAGE_SIZE		(2 * 512 * sizeof(u16)) /* 2 KB */
@@ -370,12 +383,10 @@ struct iwl_eeprom_calib_info {
 #define EEPROM_BOARD_PBA_NUMBER             (2*0x3B+1)	/* 9  bytes */
 #define EEPROM_VERSION                      (2*0x44)	/* 2  bytes */
 #define EEPROM_SKU_CAP                      (2*0x45)	/* 1  bytes */
-#define EEPROM_LEDS_MODE                    (2*0x45+1)	/* 1  bytes */
 #define EEPROM_OEM_MODE                     (2*0x46)	/* 2  bytes */
 #define EEPROM_WOWLAN_MODE                  (2*0x47)	/* 2  bytes */
 #define EEPROM_RADIO_CONFIG                 (2*0x48)	/* 2  bytes */
 #define EEPROM_3945_M_VERSION               (2*0x4A)	/* 1  bytes */
-#define EEPROM_ANTENNA_SWITCH_TYPE          (2*0x4A+1)	/* 1  bytes */
 
 /* The following masks are to be applied on EEPROM_RADIO_CONFIG */
 #define EEPROM_RF_CFG_TYPE_MSK(x)   (x & 0x3)         /* bits 0-1   */
@@ -387,7 +398,12 @@ struct iwl_eeprom_calib_info {
 
 #define EEPROM_3945_RF_CFG_TYPE_MAX  0x0
 #define EEPROM_4965_RF_CFG_TYPE_MAX  0x1
-#define EEPROM_5000_RF_CFG_TYPE_MAX  0x3
+
+/* Radio Config for 5000 and up */
+#define EEPROM_RF_CONFIG_TYPE_R3x3	0x0
+#define EEPROM_RF_CONFIG_TYPE_R2x2	0x1
+#define EEPROM_RF_CONFIG_TYPE_R1x2	0x2
+#define EEPROM_RF_CONFIG_TYPE_MAX	0x3
 
 /*
  * Per-channel regulatory data.
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index a6856daf14cb..a23165948202 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -56,6 +56,8 @@ const char *get_cmd_string(u8 cmd)
 		IWL_CMD(REPLY_LEDS_CMD);
 		IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
 		IWL_CMD(COEX_PRIORITY_TABLE_CMD);
+		IWL_CMD(COEX_MEDIUM_NOTIFICATION);
+		IWL_CMD(COEX_EVENT_CMD);
 		IWL_CMD(RADAR_NOTIFICATION);
 		IWL_CMD(REPLY_QUIET_CMD);
 		IWL_CMD(REPLY_CHANNEL_SWITCH);
@@ -93,6 +95,8 @@ const char *get_cmd_string(u8 cmd)
 		IWL_CMD(CALIBRATION_RES_NOTIFICATION);
 		IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION);
 		IWL_CMD(REPLY_TX_POWER_DBM_CMD);
+		IWL_CMD(TEMPERATURE_NOTIFICATION);
+		IWL_CMD(TX_ANT_CONFIGURATION_CMD);
 	default:
 		return "UNKNOWN";
 
@@ -104,17 +108,8 @@ EXPORT_SYMBOL(get_cmd_string);
 
 static void iwl_generic_cmd_callback(struct iwl_priv *priv,
 				     struct iwl_device_cmd *cmd,
-				     struct sk_buff *skb)
+				     struct iwl_rx_packet *pkt)
 {
-	struct iwl_rx_packet *pkt = NULL;
-
-	if (!skb) {
-		IWL_ERR(priv, "Error: Response NULL in %s.\n",
-				get_cmd_string(cmd->hdr.cmd));
-		return;
-	}
-
-	pkt = (struct iwl_rx_packet *)skb->data;
 	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
 		IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
 			get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
@@ -205,18 +200,18 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
 	}
 
 	if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
-		IWL_DEBUG_INFO(priv, "Command %s aborted: RF KILL Switch\n",
+		IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
 			       get_cmd_string(cmd->id));
 		ret = -ECANCELED;
 		goto fail;
 	}
 	if (test_bit(STATUS_FW_ERROR, &priv->status)) {
-		IWL_DEBUG_INFO(priv, "Command %s failed: FW Error\n",
+		IWL_ERR(priv, "Command %s failed: FW Error\n",
 			       get_cmd_string(cmd->id));
 		ret = -EIO;
 		goto fail;
 	}
-	if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_skb) {
+	if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
 		IWL_ERR(priv, "Error: Response NULL in '%s'\n",
 			  get_cmd_string(cmd->id));
 		ret = -EIO;
@@ -238,9 +233,9 @@ cancel:
 							~CMD_WANT_SKB;
 	}
 fail:
-	if (cmd->reply_skb) {
-		dev_kfree_skb_any(cmd->reply_skb);
-		cmd->reply_skb = NULL;
+	if (cmd->reply_page) {
+		free_pages(cmd->reply_page, priv->hw_params.rx_page_order);
+		cmd->reply_page = 0;
 	}
 out:
 	clear_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status);
@@ -273,7 +268,7 @@ int iwl_send_cmd_pdu_async(struct iwl_priv *priv,
 			   u8 id, u16 len, const void *data,
 			   void (*callback)(struct iwl_priv *priv,
 					    struct iwl_device_cmd *cmd,
-					    struct sk_buff *skb))
+					    struct iwl_rx_packet *pkt))
 {
 	struct iwl_host_cmd cmd = {
 		.id = id,
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index d30cb0275d19..e552d4c4bdbe 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -32,6 +32,7 @@
 #include <linux/io.h>
 
 #include "iwl-debug.h"
+#include "iwl-devtrace.h"
 
 /*
  * IO, register, and NIC memory access functions
@@ -61,7 +62,32 @@
  *
  */
 
-#define _iwl_write32(priv, ofs, val) iowrite32((val), (priv)->hw_base + (ofs))
+static inline void _iwl_write8(struct iwl_priv *priv, u32 ofs, u8 val)
+{
+	trace_iwlwifi_dev_iowrite8(priv, ofs, val);
+	iowrite8(val, priv->hw_base + ofs);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+static inline void __iwl_write8(const char *f, u32 l, struct iwl_priv *priv,
+				 u32 ofs, u8 val)
+{
+	IWL_DEBUG_IO(priv, "write8(0x%08X, 0x%02X) - %s %d\n", ofs, val, f, l);
+	_iwl_write8(priv, ofs, val);
+}
+#define iwl_write8(priv, ofs, val) \
+	__iwl_write8(__FILE__, __LINE__, priv, ofs, val)
+#else
+#define iwl_write8(priv, ofs, val) _iwl_write8(priv, ofs, val)
+#endif
+
+
+static inline void _iwl_write32(struct iwl_priv *priv, u32 ofs, u32 val)
+{
+	trace_iwlwifi_dev_iowrite32(priv, ofs, val);
+	iowrite32(val, priv->hw_base + ofs);
+}
+
 #ifdef CONFIG_IWLWIFI_DEBUG
 static inline void __iwl_write32(const char *f, u32 l, struct iwl_priv *priv,
 				 u32 ofs, u32 val)
@@ -75,7 +101,13 @@ static inline void __iwl_write32(const char *f, u32 l, struct iwl_priv *priv,
 #define iwl_write32(priv, ofs, val) _iwl_write32(priv, ofs, val)
 #endif
 
-#define _iwl_read32(priv, ofs) ioread32((priv)->hw_base + (ofs))
+static inline u32 _iwl_read32(struct iwl_priv *priv, u32 ofs)
+{
+	u32 val = ioread32(priv->hw_base + ofs);
+	trace_iwlwifi_dev_ioread32(priv, ofs, val);
+	return val;
+}
+
 #ifdef CONFIG_IWLWIFI_DEBUG
 static inline u32 __iwl_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
 {
@@ -188,6 +220,26 @@ static inline int _iwl_grab_nic_access(struct iwl_priv *priv)
 
 	/* this bit wakes up the NIC */
 	_iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+	/*
+	 * These bits say the device is running, and should keep running for
+	 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
+	 * but they do not indicate that embedded SRAM is restored yet;
+	 * 3945 and 4965 have volatile SRAM, and must save/restore contents
+	 * to/from host DRAM when sleeping/waking for power-saving.
+	 * Each direction takes approximately 1/4 millisecond; with this
+	 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
+	 * series of register accesses are expected (e.g. reading Event Log),
+	 * to keep device from sleeping.
+	 *
+	 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
+	 * SRAM is okay/restored.  We don't check that here because this call
+	 * is just for hardware register access; but GP1 MAC_SLEEP check is a
+	 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
+	 *
+	 * 5000 series and later (including 1000 series) have non-volatile SRAM,
+	 * and do not save/restore SRAM when power cycling.
+	 */
 	ret = _iwl_poll_bit(priv, CSR_GP_CNTRL,
 			   CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
 			   (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index f420c99e7240..46c7a95b88f0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -42,15 +42,11 @@
 #include "iwl-core.h"
 #include "iwl-io.h"
 
-#ifdef CONFIG_IWLWIFI_DEBUG
-static const char *led_type_str[] = {
-	__stringify(IWL_LED_TRG_TX),
-	__stringify(IWL_LED_TRG_RX),
-	__stringify(IWL_LED_TRG_ASSOC),
-	__stringify(IWL_LED_TRG_RADIO),
-	NULL
-};
-#endif /* CONFIG_IWLWIFI_DEBUG */
+/* default: IWL_LED_BLINK(0) using blinking index table */
+static int led_mode;
+module_param(led_mode, int, S_IRUGO);
+MODULE_PARM_DESC(led_mode, "led mode: 0=blinking, 1=On(RF On)/Off(RF Off), "
+			   "(default 0)\n");
 
 
 static const struct {
@@ -65,11 +61,11 @@ static const struct {
 	{70, 65, 65},
 	{50, 75, 75},
 	{20, 85, 85},
-	{15, 95, 95 },
-	{10, 110, 110},
-	{5, 130, 130},
+	{10, 95, 95},
+	{5, 110, 110},
+	{1, 130, 130},
 	{0, 167, 167},
-/* SOLID_ON */
+	/* SOLID_ON */
 	{-1, IWL_LED_SOLID, 0}
 };
 
@@ -78,191 +74,74 @@ static const struct {
 #define IWL_MAX_BLINK_TBL (ARRAY_SIZE(blink_tbl) - 1) /* exclude SOLID_ON */
 #define IWL_SOLID_BLINK_IDX (ARRAY_SIZE(blink_tbl) - 1)
 
-/*  [0-256] -> [0..8] FIXME: we need [0..10] */
-static inline int iwl_brightness_to_idx(enum led_brightness brightness)
-{
-	return fls(0x000000FF & (u32)brightness);
-}
-
-/* Send led command */
-static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
+/*
+ * Adjust led blink rate to compensate on a MAC Clock difference on every HW
+ * Led blink rate analysis showed an average deviation of 0% on 3945,
+ * 5% on 4965 HW and 20% on 5000 series and up.
+ * Need to compensate on the led on/off time per HW according to the deviation
+ * to achieve the desired led frequency
+ * The calculation is: (100-averageDeviation)/100 * blinkTime
+ * For code efficiency the calculation will be:
+ *     compensation = (100 - averageDeviation) * 64 / 100
+ *     NewBlinkTime = (compensation * BlinkTime) / 64
+ */
+static inline u8 iwl_blink_compensation(struct iwl_priv *priv,
+				    u8 time, u16 compensation)
 {
-	struct iwl_host_cmd cmd = {
-		.id = REPLY_LEDS_CMD,
-		.len = sizeof(struct iwl_led_cmd),
-		.data = led_cmd,
-		.flags = CMD_ASYNC,
-		.callback = NULL,
-	};
-	u32 reg;
-
-	reg = iwl_read32(priv, CSR_LED_REG);
-	if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
-		iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
+	if (!compensation) {
+		IWL_ERR(priv, "undefined blink compensation: "
+			"use pre-defined blinking time\n");
+		return time;
+	}
 
-	return iwl_send_cmd(priv, &cmd);
+	return (u8)((time * compensation) >> 6);
 }
 
 /* Set led pattern command */
-static int iwl_led_pattern(struct iwl_priv *priv, int led_id,
-			       unsigned int idx)
+static int iwl_led_pattern(struct iwl_priv *priv, unsigned int idx)
 {
 	struct iwl_led_cmd led_cmd = {
-		.id = led_id,
+		.id = IWL_LED_LINK,
 		.interval = IWL_DEF_LED_INTRVL
 	};
 
 	BUG_ON(idx > IWL_MAX_BLINK_TBL);
 
-	led_cmd.on = blink_tbl[idx].on_time;
-	led_cmd.off = blink_tbl[idx].off_time;
-
-	return iwl_send_led_cmd(priv, &led_cmd);
-}
-
-/* Set led register off */
-static int iwl_led_on_reg(struct iwl_priv *priv, int led_id)
-{
-	IWL_DEBUG_LED(priv, "led on %d\n", led_id);
-	iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
-	return 0;
-}
+	IWL_DEBUG_LED(priv, "Led blink time compensation= %u\n",
+			priv->cfg->led_compensation);
+	led_cmd.on =
+		iwl_blink_compensation(priv, blink_tbl[idx].on_time,
+					priv->cfg->led_compensation);
+	led_cmd.off =
+		iwl_blink_compensation(priv, blink_tbl[idx].off_time,
+					priv->cfg->led_compensation);
 
-#if 0
-/* Set led on command */
-static int iwl_led_on(struct iwl_priv *priv, int led_id)
-{
-	struct iwl_led_cmd led_cmd = {
-		.id = led_id,
-		.on = IWL_LED_SOLID,
-		.off = 0,
-		.interval = IWL_DEF_LED_INTRVL
-	};
-	return iwl_send_led_cmd(priv, &led_cmd);
+	return priv->cfg->ops->led->cmd(priv, &led_cmd);
 }
 
-/* Set led off command */
-int iwl_led_off(struct iwl_priv *priv, int led_id)
+int iwl_led_start(struct iwl_priv *priv)
 {
-	struct iwl_led_cmd led_cmd = {
-		.id = led_id,
-		.on = 0,
-		.off = 0,
-		.interval = IWL_DEF_LED_INTRVL
-	};
-	IWL_DEBUG_LED(priv, "led off %d\n", led_id);
-	return iwl_send_led_cmd(priv, &led_cmd);
+	return priv->cfg->ops->led->on(priv);
 }
-#endif
-
+EXPORT_SYMBOL(iwl_led_start);
 
-/* Set led register off */
-static int iwl_led_off_reg(struct iwl_priv *priv, int led_id)
-{
-	IWL_DEBUG_LED(priv, "LED Reg off\n");
-	iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_OFF);
-	return 0;
-}
-
-/*
- * Set led register in case of disassociation according to rfkill state
- */
-static int iwl_led_associate(struct iwl_priv *priv, int led_id)
+int iwl_led_associate(struct iwl_priv *priv)
 {
 	IWL_DEBUG_LED(priv, "Associated\n");
-	priv->allow_blinking = 1;
-	return iwl_led_on_reg(priv, led_id);
-}
-static int iwl_led_disassociate(struct iwl_priv *priv, int led_id)
-{
-	priv->allow_blinking = 0;
-
-	return 0;
-}
-
-/*
- * brightness call back function for Tx/Rx LED
- */
-static int iwl_led_associated(struct iwl_priv *priv, int led_id)
-{
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
-	    !test_bit(STATUS_READY, &priv->status))
-		return 0;
-
+	if (led_mode == IWL_LED_BLINK)
+		priv->allow_blinking = 1;
+	priv->last_blink_time = jiffies;
 
-	/* start counting Tx/Rx bytes */
-	if (!priv->last_blink_time && priv->allow_blinking)
-		priv->last_blink_time = jiffies;
 	return 0;
 }
 
-/*
- * brightness call back for association and radio
- */
-static void iwl_led_brightness_set(struct led_classdev *led_cdev,
-				       enum led_brightness brightness)
+int iwl_led_disassociate(struct iwl_priv *priv)
 {
-	struct iwl_led *led = container_of(led_cdev, struct iwl_led, led_dev);
-	struct iwl_priv *priv = led->priv;
-
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-		return;
-
-
-	IWL_DEBUG_LED(priv, "Led type = %s brightness = %d\n",
-			led_type_str[led->type], brightness);
-	switch (brightness) {
-	case LED_FULL:
-		if (led->led_on)
-			led->led_on(priv, IWL_LED_LINK);
-		break;
-	case LED_OFF:
-		if (led->led_off)
-			led->led_off(priv, IWL_LED_LINK);
-		break;
-	default:
-		if (led->led_pattern) {
-			int idx = iwl_brightness_to_idx(brightness);
-			led->led_pattern(priv, IWL_LED_LINK, idx);
-		}
-		break;
-	}
-}
-
-
-
-/*
- * Register led class with the system
- */
-static int iwl_leds_register_led(struct iwl_priv *priv, struct iwl_led *led,
-				   enum led_type type, u8 set_led,
-				   char *trigger)
-{
-	struct device *device = wiphy_dev(priv->hw->wiphy);
-	int ret;
-
-	led->led_dev.name = led->name;
-	led->led_dev.brightness_set = iwl_led_brightness_set;
-	led->led_dev.default_trigger = trigger;
-
-	led->priv = priv;
-	led->type = type;
-
-	ret = led_classdev_register(device, &led->led_dev);
-	if (ret) {
-		IWL_ERR(priv, "Error: failed to register led handler.\n");
-		return ret;
-	}
-
-	led->registered = 1;
-
-	if (set_led && led->led_on)
-		led->led_on(priv, IWL_LED_LINK);
+	priv->allow_blinking = 0;
 
 	return 0;
 }
 
-
 /*
  * calculate blink rate according to last second Tx/Rx activities
  */
@@ -288,7 +167,7 @@ static int iwl_get_blink_rate(struct iwl_priv *priv)
 		i = IWL_MAX_BLINK_TBL;
 	else
 		for (i = 0; i < IWL_MAX_BLINK_TBL; i++)
-			if (tpt  > (blink_tbl[i].tpt * IWL_1MB_RATE))
+			if (tpt > (blink_tbl[i].tpt * IWL_1MB_RATE))
 				break;
 
 	IWL_DEBUG_LED(priv, "LED BLINK IDX=%d\n", i);
@@ -317,8 +196,7 @@ void iwl_leds_background(struct iwl_priv *priv)
 		priv->last_blink_time = 0;
 		if (priv->last_blink_rate != IWL_SOLID_BLINK_IDX) {
 			priv->last_blink_rate = IWL_SOLID_BLINK_IDX;
-			iwl_led_pattern(priv, IWL_LED_LINK,
-					    IWL_SOLID_BLINK_IDX);
+			iwl_led_pattern(priv, IWL_SOLID_BLINK_IDX);
 		}
 		return;
 	}
@@ -331,111 +209,17 @@ void iwl_leds_background(struct iwl_priv *priv)
 
 	/* call only if blink rate change */
 	if (blink_idx != priv->last_blink_rate)
-		iwl_led_pattern(priv, IWL_LED_LINK, blink_idx);
+		iwl_led_pattern(priv, blink_idx);
 
 	priv->last_blink_time = jiffies;
 	priv->last_blink_rate = blink_idx;
 }
+EXPORT_SYMBOL(iwl_leds_background);
 
-/* Register all led handler */
-int iwl_leds_register(struct iwl_priv *priv)
+void iwl_leds_init(struct iwl_priv *priv)
 {
-	char *trigger;
-	int ret;
-
 	priv->last_blink_rate = 0;
-	priv->led_tpt = 0;
 	priv->last_blink_time = 0;
 	priv->allow_blinking = 0;
-
-	trigger = ieee80211_get_radio_led_name(priv->hw);
-	snprintf(priv->led[IWL_LED_TRG_RADIO].name,
-		 sizeof(priv->led[IWL_LED_TRG_RADIO].name), "iwl-%s::radio",
-		 wiphy_name(priv->hw->wiphy));
-
-	priv->led[IWL_LED_TRG_RADIO].led_on = iwl_led_on_reg;
-	priv->led[IWL_LED_TRG_RADIO].led_off = iwl_led_off_reg;
-	priv->led[IWL_LED_TRG_RADIO].led_pattern = NULL;
-
-	ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_RADIO],
-				   IWL_LED_TRG_RADIO, 1, trigger);
-	if (ret)
-		goto exit_fail;
-
-	trigger = ieee80211_get_assoc_led_name(priv->hw);
-	snprintf(priv->led[IWL_LED_TRG_ASSOC].name,
-		 sizeof(priv->led[IWL_LED_TRG_ASSOC].name), "iwl-%s::assoc",
-		 wiphy_name(priv->hw->wiphy));
-
-	ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_ASSOC],
-				   IWL_LED_TRG_ASSOC, 0, trigger);
-
-	/* for assoc always turn led on */
-	priv->led[IWL_LED_TRG_ASSOC].led_on = iwl_led_associate;
-	priv->led[IWL_LED_TRG_ASSOC].led_off = iwl_led_disassociate;
-	priv->led[IWL_LED_TRG_ASSOC].led_pattern = NULL;
-
-	if (ret)
-		goto exit_fail;
-
-	trigger = ieee80211_get_rx_led_name(priv->hw);
-	snprintf(priv->led[IWL_LED_TRG_RX].name,
-		 sizeof(priv->led[IWL_LED_TRG_RX].name), "iwl-%s::RX",
-		 wiphy_name(priv->hw->wiphy));
-
-	ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_RX],
-				   IWL_LED_TRG_RX, 0, trigger);
-
-	priv->led[IWL_LED_TRG_RX].led_on = iwl_led_associated;
-	priv->led[IWL_LED_TRG_RX].led_off = iwl_led_associated;
-	priv->led[IWL_LED_TRG_RX].led_pattern = iwl_led_pattern;
-
-	if (ret)
-		goto exit_fail;
-
-	trigger = ieee80211_get_tx_led_name(priv->hw);
-	snprintf(priv->led[IWL_LED_TRG_TX].name,
-		 sizeof(priv->led[IWL_LED_TRG_TX].name), "iwl-%s::TX",
-		 wiphy_name(priv->hw->wiphy));
-
-	ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_TX],
-				   IWL_LED_TRG_TX, 0, trigger);
-
-	priv->led[IWL_LED_TRG_TX].led_on = iwl_led_associated;
-	priv->led[IWL_LED_TRG_TX].led_off = iwl_led_associated;
-	priv->led[IWL_LED_TRG_TX].led_pattern = iwl_led_pattern;
-
-	if (ret)
-		goto exit_fail;
-
-	return 0;
-
-exit_fail:
-	iwl_leds_unregister(priv);
-	return ret;
 }
-EXPORT_SYMBOL(iwl_leds_register);
-
-/* unregister led class */
-static void iwl_leds_unregister_led(struct iwl_led *led, u8 set_led)
-{
-	if (!led->registered)
-		return;
-
-	led_classdev_unregister(&led->led_dev);
-
-	if (set_led)
-		led->led_dev.brightness_set(&led->led_dev, LED_OFF);
-	led->registered = 0;
-}
-
-/* Unregister all led handlers */
-void iwl_leds_unregister(struct iwl_priv *priv)
-{
-	iwl_leds_unregister_led(&priv->led[IWL_LED_TRG_ASSOC], 0);
-	iwl_leds_unregister_led(&priv->led[IWL_LED_TRG_RX], 0);
-	iwl_leds_unregister_led(&priv->led[IWL_LED_TRG_TX], 0);
-	iwl_leds_unregister_led(&priv->led[IWL_LED_TRG_RADIO], 1);
-}
-EXPORT_SYMBOL(iwl_leds_unregister);
-
+EXPORT_SYMBOL(iwl_leds_init);
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
index ef9b174c37ff..f47f053f02ea 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -30,9 +30,6 @@
 
 struct iwl_priv;
 
-#ifdef CONFIG_IWLWIFI_LEDS
-#include <linux/leds.h>
-
 #define IWL_LED_SOLID 11
 #define IWL_LED_NAME_LEN 31
 #define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
@@ -47,38 +44,23 @@ enum led_type {
 	IWL_LED_TRG_RADIO,
 	IWL_LED_TRG_MAX,
 };
-#endif
-
-#ifdef CONFIG_IWLWIFI_LEDS
-
-struct iwl_led {
-	struct iwl_priv *priv;
-	struct led_classdev led_dev;
-	char name[32];
 
-	int (*led_on) (struct iwl_priv *priv, int led_id);
-	int (*led_off) (struct iwl_priv *priv, int led_id);
-	int (*led_pattern) (struct iwl_priv *priv, int led_id, unsigned int idx);
-
-	enum led_type type;
-	unsigned int registered;
+/*
+ * LED mode
+ *    IWL_LED_BLINK:    adjust led blink rate based on blink table
+ *    IWL_LED_RF_STATE: turn LED on/off based on RF state
+ *			LED ON  = RF ON
+ *			LED OFF = RF OFF
+ */
+enum iwl_led_mode {
+	IWL_LED_BLINK,
+	IWL_LED_RF_STATE,
 };
 
-int iwl_leds_register(struct iwl_priv *priv);
-void iwl_leds_unregister(struct iwl_priv *priv);
+void iwl_leds_init(struct iwl_priv *priv);
 void iwl_leds_background(struct iwl_priv *priv);
+int iwl_led_start(struct iwl_priv *priv);
+int iwl_led_associate(struct iwl_priv *priv);
+int iwl_led_disassociate(struct iwl_priv *priv);
 
-#else
-static inline int iwl_leds_register(struct iwl_priv *priv)
-{
-	return 0;
-}
-static inline void iwl_leds_unregister(struct iwl_priv *priv)
-{
-}
-static inline void iwl_leds_background(struct iwl_priv *priv)
-{
-}
-
-#endif /* CONFIG_IWLWIFI_LEDS */
 #endif /* __iwl_leds_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 60be976afff8..8ccc0bb1d9ed 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -66,7 +66,7 @@ MODULE_PARM_DESC(no_sleep_autoadjust,
 
 struct iwl_power_vec_entry {
 	struct iwl_powertable_cmd cmd;
-	u8 no_dtim;
+	u8 no_dtim;	/* number of skip dtim */
 };
 
 #define IWL_DTIM_RANGE_0_MAX	2
@@ -83,8 +83,9 @@ struct iwl_power_vec_entry {
 				     cpu_to_le32(X4)}
 /* default power management (not Tx power) table values */
 /* for DTIM period 0 through IWL_DTIM_RANGE_0_MAX */
+/* DTIM 0 - 2 */
 static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = {
-	{{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
+	{{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 1, 2, 2, 0xFF)}, 0},
 	{{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
 	{{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF)}, 0},
 	{{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF)}, 1},
@@ -93,15 +94,17 @@ static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = {
 
 
 /* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */
+/* DTIM 3 - 10 */
 static const struct iwl_power_vec_entry range_1[IWL_POWER_NUM] = {
 	{{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
 	{{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0},
 	{{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9)}, 0},
 	{{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10)}, 1},
-	{{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 7, 10, 10)}, 2}
+	{{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 6, 10, 10)}, 2}
 };
 
 /* for DTIM period > IWL_DTIM_RANGE_1_MAX */
+/* DTIM 11 - */
 static const struct iwl_power_vec_entry range_2[IWL_POWER_NUM] = {
 	{{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
 	{{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
@@ -115,13 +118,15 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
 				 enum iwl_power_level lvl, int period)
 {
 	const struct iwl_power_vec_entry *table;
-	int max_sleep, i;
-	bool skip;
+	int max_sleep[IWL_POWER_VEC_SIZE] = { 0 };
+	int i;
+	u8 skip;
+	u32 slp_itrvl;
 
 	table = range_2;
-	if (period < IWL_DTIM_RANGE_1_MAX)
+	if (period <= IWL_DTIM_RANGE_1_MAX)
 		table = range_1;
-	if (period < IWL_DTIM_RANGE_0_MAX)
+	if (period <= IWL_DTIM_RANGE_0_MAX)
 		table = range_0;
 
 	BUG_ON(lvl < 0 || lvl >= IWL_POWER_NUM);
@@ -129,34 +134,60 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
 	*cmd = table[lvl].cmd;
 
 	if (period == 0) {
-		skip = false;
+		skip = 0;
 		period = 1;
+		for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
+			max_sleep[i] =  1;
+
 	} else {
-		skip = !!table[lvl].no_dtim;
+		skip = table[lvl].no_dtim;
+		for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
+			max_sleep[i] = le32_to_cpu(cmd->sleep_interval[i]);
+		max_sleep[IWL_POWER_VEC_SIZE - 1] = skip + 1;
 	}
 
-	if (skip) {
-		__le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
-		max_sleep = le32_to_cpu(slp_itrvl);
-		if (max_sleep == 0xFF)
-			max_sleep = period * (skip + 1);
-		else if (max_sleep > period)
-			max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
+	slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
+	/* figure out the listen interval based on dtim period and skip */
+	if (slp_itrvl == 0xFF)
+		cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
+			cpu_to_le32(period * (skip + 1));
+
+	slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
+	if (slp_itrvl > period)
+		cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
+			cpu_to_le32((slp_itrvl / period) * period);
+
+	if (skip)
 		cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
-	} else {
-		max_sleep = period;
+	else
 		cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
-	}
 
-	for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
-		if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
-			cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
+	slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
+	if (slp_itrvl > IWL_CONN_MAX_LISTEN_INTERVAL)
+		cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
+			cpu_to_le32(IWL_CONN_MAX_LISTEN_INTERVAL);
+
+	/* enforce max sleep interval */
+	for (i = IWL_POWER_VEC_SIZE - 1; i >= 0 ; i--) {
+		if (le32_to_cpu(cmd->sleep_interval[i]) >
+		    (max_sleep[i] * period))
+			cmd->sleep_interval[i] =
+				cpu_to_le32(max_sleep[i] * period);
+		if (i != (IWL_POWER_VEC_SIZE - 1)) {
+			if (le32_to_cpu(cmd->sleep_interval[i]) >
+			    le32_to_cpu(cmd->sleep_interval[i+1]))
+				cmd->sleep_interval[i] =
+					cmd->sleep_interval[i+1];
+		}
+	}
 
 	if (priv->power_data.pci_pm)
 		cmd->flags |= IWL_POWER_PCI_PM_MSK;
 	else
 		cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
 
+	IWL_DEBUG_POWER(priv, "numSkipDtim = %u, dtimPeriod = %d\n",
+			skip, period);
 	IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1);
 }
 
@@ -165,26 +196,26 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
  *=============================================================================
  *                 Condition Nxt State  Condition Nxt State Condition Nxt State
  *-----------------------------------------------------------------------------
- *     IWL_TI_0     T >= 115   CT_KILL  115>T>=105   TI_1      N/A      N/A
- *     IWL_TI_1     T >= 115   CT_KILL  115>T>=110   TI_2     T<=95     TI_0
- *     IWL_TI_2     T >= 115   CT_KILL                        T<=100    TI_1
+ *     IWL_TI_0     T >= 114   CT_KILL  114>T>=105   TI_1      N/A      N/A
+ *     IWL_TI_1     T >= 114   CT_KILL  114>T>=110   TI_2     T<=95     TI_0
+ *     IWL_TI_2     T >= 114   CT_KILL                        T<=100    TI_1
  *    IWL_CT_KILL      N/A       N/A       N/A        N/A     T<=95     TI_0
  *=============================================================================
  */
 static const struct iwl_tt_trans tt_range_0[IWL_TI_STATE_MAX - 1] = {
 	{IWL_TI_0, IWL_ABSOLUTE_ZERO, 104},
-	{IWL_TI_1, 105, CT_KILL_THRESHOLD},
-	{IWL_TI_CT_KILL, CT_KILL_THRESHOLD + 1, IWL_ABSOLUTE_MAX}
+	{IWL_TI_1, 105, CT_KILL_THRESHOLD - 1},
+	{IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
 };
 static const struct iwl_tt_trans tt_range_1[IWL_TI_STATE_MAX - 1] = {
 	{IWL_TI_0, IWL_ABSOLUTE_ZERO, 95},
-	{IWL_TI_2, 110, CT_KILL_THRESHOLD},
-	{IWL_TI_CT_KILL, CT_KILL_THRESHOLD + 1, IWL_ABSOLUTE_MAX}
+	{IWL_TI_2, 110, CT_KILL_THRESHOLD - 1},
+	{IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
 };
 static const struct iwl_tt_trans tt_range_2[IWL_TI_STATE_MAX - 1] = {
 	{IWL_TI_1, IWL_ABSOLUTE_ZERO, 100},
-	{IWL_TI_CT_KILL, CT_KILL_THRESHOLD + 1, IWL_ABSOLUTE_MAX},
-	{IWL_TI_CT_KILL, CT_KILL_THRESHOLD + 1, IWL_ABSOLUTE_MAX}
+	{IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX},
+	{IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
 };
 static const struct iwl_tt_trans tt_range_3[IWL_TI_STATE_MAX - 1] = {
 	{IWL_TI_0, IWL_ABSOLUTE_ZERO, CT_KILL_EXIT_THRESHOLD},
@@ -294,6 +325,9 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
 
 	if (priv->cfg->broken_powersave)
 		iwl_power_sleep_cam_cmd(priv, &cmd);
+	else if (priv->cfg->supports_idle &&
+		 priv->hw->conf.flags & IEEE80211_CONF_IDLE)
+		iwl_static_sleep_cmd(priv, &cmd, IWL_POWER_INDEX_5, 20);
 	else if (tt->state >= IWL_TI_1)
 		iwl_static_sleep_cmd(priv, &cmd, tt->tt_power_mode, dtimper);
 	else if (!enabled)
@@ -348,6 +382,23 @@ bool iwl_ht_enabled(struct iwl_priv *priv)
 }
 EXPORT_SYMBOL(iwl_ht_enabled);
 
+bool iwl_within_ct_kill_margin(struct iwl_priv *priv)
+{
+	s32 temp = priv->temperature; /* degrees CELSIUS except 4965 */
+	bool within_margin = false;
+
+	if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)
+		temp = KELVIN_TO_CELSIUS(priv->temperature);
+
+	if (!priv->thermal_throttle.advanced_tt)
+		within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
+				CT_KILL_THRESHOLD_LEGACY) ? true : false;
+	else
+		within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
+				CT_KILL_THRESHOLD) ? true : false;
+	return within_margin;
+}
+
 enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv)
 {
 	struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
@@ -372,6 +423,7 @@ enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv)
 }
 
 #define CT_KILL_EXIT_DURATION (5)	/* 5 seconds duration */
+#define CT_KILL_WAITING_DURATION (300)	/* 300ms duration */
 
 /*
  * toggle the bit to wake up uCode and check the temperature
@@ -409,6 +461,7 @@ static void iwl_tt_check_exit_ct_kill(unsigned long data)
 		/* Reschedule the ct_kill timer to occur in
 		 * CT_KILL_EXIT_DURATION seconds to ensure we get a
 		 * thermal update */
+		IWL_DEBUG_POWER(priv, "schedule ct_kill exit timer\n");
 		mod_timer(&priv->thermal_throttle.ct_kill_exit_tm, jiffies +
 			  CT_KILL_EXIT_DURATION * HZ);
 	}
@@ -432,6 +485,33 @@ static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
 	}
 }
 
+static void iwl_tt_ready_for_ct_kill(unsigned long data)
+{
+	struct iwl_priv *priv = (struct iwl_priv *)data;
+	struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
+
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+		return;
+
+	/* temperature timer expired, ready to go into CT_KILL state */
+	if (tt->state != IWL_TI_CT_KILL) {
+		IWL_DEBUG_POWER(priv, "entering CT_KILL state when temperature timer expired\n");
+		tt->state = IWL_TI_CT_KILL;
+		set_bit(STATUS_CT_KILL, &priv->status);
+		iwl_perform_ct_kill_task(priv, true);
+	}
+}
+
+static void iwl_prepare_ct_kill_task(struct iwl_priv *priv)
+{
+	IWL_DEBUG_POWER(priv, "Prepare to enter IWL_TI_CT_KILL\n");
+	/* make request to retrieve statistics information */
+	iwl_send_statistics_request(priv, CMD_SYNC, false);
+	/* Reschedule the ct_kill wait timer */
+	mod_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
+		 jiffies + msecs_to_jiffies(CT_KILL_WAITING_DURATION));
+}
+
 #define IWL_MINIMAL_POWER_THRESHOLD		(CT_KILL_THRESHOLD_LEGACY)
 #define IWL_REDUCED_PERFORMANCE_THRESHOLD_2	(100)
 #define IWL_REDUCED_PERFORMANCE_THRESHOLD_1	(90)
@@ -445,7 +525,7 @@ static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
  *	Throttle early enough to lower the power consumption before
  *	drastic steps are needed
  */
-static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp)
+static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
 {
 	struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
 	enum iwl_tt_state old_state;
@@ -474,6 +554,8 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp)
 #ifdef CONFIG_IWLWIFI_DEBUG
 	tt->tt_previous_temp = temp;
 #endif
+	/* stop ct_kill_waiting_tm timer */
+	del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
 	if (tt->state != old_state) {
 		switch (tt->state) {
 		case IWL_TI_0:
@@ -494,17 +576,28 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp)
 			break;
 		}
 		mutex_lock(&priv->mutex);
-		if (iwl_power_update_mode(priv, true)) {
+		if (old_state == IWL_TI_CT_KILL)
+			clear_bit(STATUS_CT_KILL, &priv->status);
+		if (tt->state != IWL_TI_CT_KILL &&
+		    iwl_power_update_mode(priv, true)) {
 			/* TT state not updated
 			 * try again during next temperature read
 			 */
+			if (old_state == IWL_TI_CT_KILL)
+				set_bit(STATUS_CT_KILL, &priv->status);
 			tt->state = old_state;
 			IWL_ERR(priv, "Cannot update power mode, "
 					"TT state not updated\n");
 		} else {
-			if (tt->state == IWL_TI_CT_KILL)
-				iwl_perform_ct_kill_task(priv, true);
-			else if (old_state == IWL_TI_CT_KILL &&
+			if (tt->state == IWL_TI_CT_KILL) {
+				if (force) {
+					set_bit(STATUS_CT_KILL, &priv->status);
+					iwl_perform_ct_kill_task(priv, true);
+				} else {
+					iwl_prepare_ct_kill_task(priv);
+					tt->state = old_state;
+				}
+			} else if (old_state == IWL_TI_CT_KILL &&
 				 tt->state != IWL_TI_CT_KILL)
 				iwl_perform_ct_kill_task(priv, false);
 			IWL_DEBUG_POWER(priv, "Temperature state changed %u\n",
@@ -531,13 +624,13 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp)
  *=============================================================================
  *                 Condition Nxt State  Condition Nxt State Condition Nxt State
  *-----------------------------------------------------------------------------
- *     IWL_TI_0     T >= 115   CT_KILL  115>T>=105   TI_1      N/A      N/A
- *     IWL_TI_1     T >= 115   CT_KILL  115>T>=110   TI_2     T<=95     TI_0
- *     IWL_TI_2     T >= 115   CT_KILL                        T<=100    TI_1
+ *     IWL_TI_0     T >= 114   CT_KILL  114>T>=105   TI_1      N/A      N/A
+ *     IWL_TI_1     T >= 114   CT_KILL  114>T>=110   TI_2     T<=95     TI_0
+ *     IWL_TI_2     T >= 114   CT_KILL                        T<=100    TI_1
  *    IWL_CT_KILL      N/A       N/A       N/A        N/A     T<=95     TI_0
  *=============================================================================
  */
-static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp)
+static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
 {
 	struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
 	int i;
@@ -582,6 +675,8 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp)
 			break;
 		}
 	}
+	/* stop ct_kill_waiting_tm timer */
+	del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
 	if (changed) {
 		struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
 
@@ -613,12 +708,17 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp)
 			iwl_set_rxon_ht(priv, &priv->current_ht_config);
 		}
 		mutex_lock(&priv->mutex);
-		if (iwl_power_update_mode(priv, true)) {
+		if (old_state == IWL_TI_CT_KILL)
+			clear_bit(STATUS_CT_KILL, &priv->status);
+		if (tt->state != IWL_TI_CT_KILL &&
+		    iwl_power_update_mode(priv, true)) {
 			/* TT state not updated
 			 * try again during next temperature read
 			 */
 			IWL_ERR(priv, "Cannot update power mode, "
 					"TT state not updated\n");
+			if (old_state == IWL_TI_CT_KILL)
+				set_bit(STATUS_CT_KILL, &priv->status);
 			tt->state = old_state;
 		} else {
 			IWL_DEBUG_POWER(priv,
@@ -626,9 +726,15 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp)
 					tt->state);
 			if (old_state != IWL_TI_CT_KILL &&
 			    tt->state == IWL_TI_CT_KILL) {
-				IWL_DEBUG_POWER(priv, "Enter IWL_TI_CT_KILL\n");
-				iwl_perform_ct_kill_task(priv, true);
-
+				if (force) {
+					IWL_DEBUG_POWER(priv,
+						"Enter IWL_TI_CT_KILL\n");
+					set_bit(STATUS_CT_KILL, &priv->status);
+					iwl_perform_ct_kill_task(priv, true);
+				} else {
+					iwl_prepare_ct_kill_task(priv);
+					tt->state = old_state;
+				}
 			} else if (old_state == IWL_TI_CT_KILL &&
 				  tt->state != IWL_TI_CT_KILL) {
 				IWL_DEBUG_POWER(priv, "Exit IWL_TI_CT_KILL\n");
@@ -665,10 +771,11 @@ static void iwl_bg_ct_enter(struct work_struct *work)
 			      "- ucode going to sleep!\n");
 		if (!priv->thermal_throttle.advanced_tt)
 			iwl_legacy_tt_handler(priv,
-					      IWL_MINIMAL_POWER_THRESHOLD);
+					      IWL_MINIMAL_POWER_THRESHOLD,
+					      true);
 		else
 			iwl_advance_tt_handler(priv,
-					       CT_KILL_THRESHOLD + 1);
+					       CT_KILL_THRESHOLD + 1, true);
 	}
 }
 
@@ -695,11 +802,18 @@ static void iwl_bg_ct_exit(struct work_struct *work)
 		IWL_ERR(priv,
 			"Device temperature below critical"
 			"- ucode awake!\n");
+		/*
+		 * exit from CT_KILL state
+		 * reset the current temperature reading
+		 */
+		priv->temperature = 0;
 		if (!priv->thermal_throttle.advanced_tt)
 			iwl_legacy_tt_handler(priv,
-					IWL_REDUCED_PERFORMANCE_THRESHOLD_2);
+					      IWL_REDUCED_PERFORMANCE_THRESHOLD_2,
+					      true);
 		else
-			iwl_advance_tt_handler(priv, CT_KILL_EXIT_THRESHOLD);
+			iwl_advance_tt_handler(priv, CT_KILL_EXIT_THRESHOLD,
+					       true);
 	}
 }
 
@@ -735,9 +849,9 @@ static void iwl_bg_tt_work(struct work_struct *work)
 		temp = KELVIN_TO_CELSIUS(priv->temperature);
 
 	if (!priv->thermal_throttle.advanced_tt)
-		iwl_legacy_tt_handler(priv, temp);
+		iwl_legacy_tt_handler(priv, temp, false);
 	else
-		iwl_advance_tt_handler(priv, temp);
+		iwl_advance_tt_handler(priv, temp, false);
 }
 
 void iwl_tt_handler(struct iwl_priv *priv)
@@ -768,16 +882,18 @@ void iwl_tt_initialize(struct iwl_priv *priv)
 	tt->state = IWL_TI_0;
 	init_timer(&priv->thermal_throttle.ct_kill_exit_tm);
 	priv->thermal_throttle.ct_kill_exit_tm.data = (unsigned long)priv;
-	priv->thermal_throttle.ct_kill_exit_tm.function = iwl_tt_check_exit_ct_kill;
-
+	priv->thermal_throttle.ct_kill_exit_tm.function =
+		iwl_tt_check_exit_ct_kill;
+	init_timer(&priv->thermal_throttle.ct_kill_waiting_tm);
+	priv->thermal_throttle.ct_kill_waiting_tm.data = (unsigned long)priv;
+	priv->thermal_throttle.ct_kill_waiting_tm.function =
+		iwl_tt_ready_for_ct_kill;
 	/* setup deferred ct kill work */
 	INIT_WORK(&priv->tt_work, iwl_bg_tt_work);
 	INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
 	INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
 
-	switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
-	case CSR_HW_REV_TYPE_6x00:
-	case CSR_HW_REV_TYPE_6x50:
+	if (priv->cfg->adv_thermal_throttle) {
 		IWL_DEBUG_POWER(priv, "Advanced Thermal Throttling\n");
 		tt->restriction = kzalloc(sizeof(struct iwl_tt_restriction) *
 					 IWL_TI_STATE_MAX, GFP_KERNEL);
@@ -810,11 +926,9 @@ void iwl_tt_initialize(struct iwl_priv *priv)
 				&restriction_range[0], size);
 			priv->thermal_throttle.advanced_tt = true;
 		}
-		break;
-	default:
+	} else {
 		IWL_DEBUG_POWER(priv, "Legacy Thermal Throttling\n");
 		priv->thermal_throttle.advanced_tt = false;
-		break;
 	}
 }
 EXPORT_SYMBOL(iwl_tt_initialize);
@@ -826,6 +940,8 @@ void iwl_tt_exit(struct iwl_priv *priv)
 
 	/* stop ct_kill_exit_tm timer if activated */
 	del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm);
+	/* stop ct_kill_waiting_tm timer if activated */
+	del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
 	cancel_work_sync(&priv->tt_work);
 	cancel_work_sync(&priv->ct_enter);
 	cancel_work_sync(&priv->ct_exit);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index df6f6a49712b..310c32e8f698 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -33,6 +33,7 @@
 #define IWL_ABSOLUTE_ZERO		0
 #define IWL_ABSOLUTE_MAX		0xFFFFFFFF
 #define IWL_TT_INCREASE_MARGIN	5
+#define IWL_TT_CT_KILL_MARGIN	3
 
 enum iwl_antenna_ok {
 	IWL_ANT_OK_NONE,
@@ -110,6 +111,7 @@ struct iwl_tt_mgmt {
 	struct iwl_tt_restriction *restriction;
 	struct iwl_tt_trans *transaction;
 	struct timer_list ct_kill_exit_tm;
+	struct timer_list ct_kill_waiting_tm;
 };
 
 enum iwl_power_level {
@@ -129,6 +131,7 @@ struct iwl_power_mgr {
 
 int iwl_power_update_mode(struct iwl_priv *priv, bool force);
 bool iwl_ht_enabled(struct iwl_priv *priv);
+bool iwl_within_ct_kill_margin(struct iwl_priv *priv);
 enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv);
 enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv);
 void iwl_tt_enter_ct_kill(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index d393e8f02102..6d95832db06d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -254,7 +254,8 @@
  * device.  A queue maps to only one (selectable by driver) Tx DMA channel,
  * but one DMA channel may take input from several queues.
  *
- * Tx DMA channels have dedicated purposes.  For 4965, they are used as follows:
+ * Tx DMA channels have dedicated purposes.  For 4965, they are used as follows
+ * (cf. default_queue_to_tx_fifo in iwl-4965.c):
  *
  * 0 -- EDCA BK (background) frames, lowest priority
  * 1 -- EDCA BE (best effort) frames, normal priority
@@ -265,9 +266,21 @@
  * 6 -- HCCA long frames
  * 7 -- not used by driver (device-internal only)
  *
+ * For 5000 series and up, they are used slightly differently
+ * (cf. iwl5000_default_queue_to_tx_fifo in iwl-5000.c):
+ *
+ * 0 -- EDCA BK (background) frames, lowest priority
+ * 1 -- EDCA BE (best effort) frames, normal priority
+ * 2 -- EDCA VI (video) frames, higher priority
+ * 3 -- EDCA VO (voice) and management frames, highest priority
+ * 4 -- (TBD)
+ * 5 -- HCCA short frames
+ * 6 -- HCCA long frames
+ * 7 -- Commands
+ *
  * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6.
- * In addition, driver can map queues 7-15 to Tx DMA/FIFO channels 0-3 to
- * support 11n aggregation via EDCA DMA channels.
+ * In addition, driver can map the remaining queues to Tx DMA/FIFO
+ * channels 0-3 to support 11n aggregation via EDCA DMA channels.
  *
  * The driver sets up each queue to work in one of two modes:
  *
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 493626bcd3ec..6090bc15a6d5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -140,6 +140,8 @@ int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
 		reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
 
 		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+			IWL_DEBUG_INFO(priv, "Rx queue requesting wakeup, GP1 = 0x%x\n",
+				      reg);
 			iwl_set_bit(priv, CSR_GP_CNTRL,
 				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
 			goto exit_unlock;
@@ -200,7 +202,7 @@ int iwl_rx_queue_restock(struct iwl_priv *priv)
 		list_del(element);
 
 		/* Point to Rx buffer via next RBD in circular buffer */
-		rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->aligned_dma_addr);
+		rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->page_dma);
 		rxq->queue[rxq->write] = rxb;
 		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
 		rxq->free_count--;
@@ -239,8 +241,9 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
 	struct iwl_rx_queue *rxq = &priv->rxq;
 	struct list_head *element;
 	struct iwl_rx_mem_buffer *rxb;
-	struct sk_buff *skb;
+	struct page *page;
 	unsigned long flags;
+	gfp_t gfp_mask = priority;
 
 	while (1) {
 		spin_lock_irqsave(&rxq->lock, flags);
@@ -251,30 +254,35 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
 		spin_unlock_irqrestore(&rxq->lock, flags);
 
 		if (rxq->free_count > RX_LOW_WATERMARK)
-			priority |= __GFP_NOWARN;
-		/* Alloc a new receive buffer */
-		skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
-						priority);
+			gfp_mask |= __GFP_NOWARN;
+
+		if (priv->hw_params.rx_page_order > 0)
+			gfp_mask |= __GFP_COMP;
 
-		if (!skb) {
+		/* Alloc a new receive buffer */
+		page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
+		if (!page) {
 			if (net_ratelimit())
-				IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
+				IWL_DEBUG_INFO(priv, "alloc_pages failed, "
+					       "order: %d\n",
+					       priv->hw_params.rx_page_order);
+
 			if ((rxq->free_count <= RX_LOW_WATERMARK) &&
 			    net_ratelimit())
-				IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n",
+				IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
 					 priority == GFP_ATOMIC ?  "GFP_ATOMIC" : "GFP_KERNEL",
 					 rxq->free_count);
 			/* We don't reschedule replenish work here -- we will
 			 * call the restock method and if it still needs
 			 * more buffers it will schedule replenish */
-			break;
+			return;
 		}
 
 		spin_lock_irqsave(&rxq->lock, flags);
 
 		if (list_empty(&rxq->rx_used)) {
 			spin_unlock_irqrestore(&rxq->lock, flags);
-			dev_kfree_skb_any(skb);
+			__free_pages(page, priv->hw_params.rx_page_order);
 			return;
 		}
 		element = rxq->rx_used.next;
@@ -283,24 +291,21 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
 
 		spin_unlock_irqrestore(&rxq->lock, flags);
 
-		rxb->skb = skb;
-		/* Get physical address of RB/SKB */
-		rxb->real_dma_addr = pci_map_single(
-					priv->pci_dev,
-					rxb->skb->data,
-					priv->hw_params.rx_buf_size + 256,
-					PCI_DMA_FROMDEVICE);
+		rxb->page = page;
+		/* Get physical address of the RB */
+		rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
+				PAGE_SIZE << priv->hw_params.rx_page_order,
+				PCI_DMA_FROMDEVICE);
 		/* dma address must be no more than 36 bits */
-		BUG_ON(rxb->real_dma_addr & ~DMA_BIT_MASK(36));
+		BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
 		/* and also 256 byte aligned! */
-		rxb->aligned_dma_addr = ALIGN(rxb->real_dma_addr, 256);
-		skb_reserve(rxb->skb, rxb->aligned_dma_addr - rxb->real_dma_addr);
+		BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
 
 		spin_lock_irqsave(&rxq->lock, flags);
 
 		list_add_tail(&rxb->list, &rxq->rx_free);
 		rxq->free_count++;
-		priv->alloc_rxb_skb++;
+		priv->alloc_rxb_page++;
 
 		spin_unlock_irqrestore(&rxq->lock, flags);
 	}
@@ -336,12 +341,14 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
 {
 	int i;
 	for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
-		if (rxq->pool[i].skb != NULL) {
-			pci_unmap_single(priv->pci_dev,
-					 rxq->pool[i].real_dma_addr,
-					 priv->hw_params.rx_buf_size + 256,
-					 PCI_DMA_FROMDEVICE);
-			dev_kfree_skb(rxq->pool[i].skb);
+		if (rxq->pool[i].page != NULL) {
+			pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
+				PAGE_SIZE << priv->hw_params.rx_page_order,
+				PCI_DMA_FROMDEVICE);
+			__free_pages(rxq->pool[i].page,
+				     priv->hw_params.rx_page_order);
+			rxq->pool[i].page = NULL;
+			priv->alloc_rxb_page--;
 		}
 	}
 
@@ -405,14 +412,14 @@ void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
 	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
 		/* In the reset function, these buffers may have been allocated
 		 * to an SKB, so we need to unmap and free potential storage */
-		if (rxq->pool[i].skb != NULL) {
-			pci_unmap_single(priv->pci_dev,
-					 rxq->pool[i].real_dma_addr,
-					 priv->hw_params.rx_buf_size + 256,
-					 PCI_DMA_FROMDEVICE);
-			priv->alloc_rxb_skb--;
-			dev_kfree_skb(rxq->pool[i].skb);
-			rxq->pool[i].skb = NULL;
+		if (rxq->pool[i].page != NULL) {
+			pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
+				PAGE_SIZE << priv->hw_params.rx_page_order,
+				PCI_DMA_FROMDEVICE);
+			priv->alloc_rxb_page--;
+			__free_pages(rxq->pool[i].page,
+				     priv->hw_params.rx_page_order);
+			rxq->pool[i].page = NULL;
 		}
 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
 	}
@@ -470,7 +477,8 @@ int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
 			   (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
 			   (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
 
-	iwl_write32(priv, CSR_INT_COALESCING, 0x40);
+	/* Set interrupt coalescing timer to 64 x 32 = 2048 usecs */
+	iwl_write8(priv, CSR_INT_COALESCING, 0x40);
 
 	return 0;
 }
@@ -491,7 +499,7 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
 				struct iwl_rx_mem_buffer *rxb)
 
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_missed_beacon_notif *missed_beacon;
 
 	missed_beacon = &pkt->u.missed_beacon;
@@ -548,13 +556,51 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
 			priv->last_rx_noise);
 }
 
+#ifdef CONFIG_IWLWIFI_DEBUG
+/*
+ *  based on the assumption of all statistics counter are in DWORD
+ *  FIXME: This function is for debugging, do not deal with
+ *  the case of counters roll-over.
+ */
+static void iwl_accumulative_statistics(struct iwl_priv *priv,
+					__le32 *stats)
+{
+	int i;
+	__le32 *prev_stats;
+	u32 *accum_stats;
+
+	prev_stats = (__le32 *)&priv->statistics;
+	accum_stats = (u32 *)&priv->accum_statistics;
+
+	for (i = sizeof(__le32); i < sizeof(struct iwl_notif_statistics);
+	     i += sizeof(__le32), stats++, prev_stats++, accum_stats++)
+		if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats))
+			*accum_stats += (le32_to_cpu(*stats) -
+				le32_to_cpu(*prev_stats));
+
+	/* reset accumulative statistics for "no-counter" type statistics */
+	priv->accum_statistics.general.temperature =
+		priv->statistics.general.temperature;
+	priv->accum_statistics.general.temperature_m =
+		priv->statistics.general.temperature_m;
+	priv->accum_statistics.general.ttl_timestamp =
+		priv->statistics.general.ttl_timestamp;
+	priv->accum_statistics.tx.tx_power.ant_a =
+		priv->statistics.tx.tx_power.ant_a;
+	priv->accum_statistics.tx.tx_power.ant_b =
+		priv->statistics.tx.tx_power.ant_b;
+	priv->accum_statistics.tx.tx_power.ant_c =
+		priv->statistics.tx.tx_power.ant_c;
+}
+#endif
+
 #define REG_RECALIB_PERIOD (60)
 
 void iwl_rx_statistics(struct iwl_priv *priv,
 			      struct iwl_rx_mem_buffer *rxb)
 {
 	int change;
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 
 	IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
 		     (int)sizeof(priv->statistics),
@@ -566,6 +612,9 @@ void iwl_rx_statistics(struct iwl_priv *priv,
 		    STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
 		   (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
 
+#ifdef CONFIG_IWLWIFI_DEBUG
+	iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
+#endif
 	memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
 
 	set_bit(STATUS_STATISTICS, &priv->status);
@@ -582,14 +631,29 @@ void iwl_rx_statistics(struct iwl_priv *priv,
 		iwl_rx_calc_noise(priv);
 		queue_work(priv->workqueue, &priv->run_time_calib_work);
 	}
-
-	iwl_leds_background(priv);
-
 	if (priv->cfg->ops->lib->temp_ops.temperature && change)
 		priv->cfg->ops->lib->temp_ops.temperature(priv);
 }
 EXPORT_SYMBOL(iwl_rx_statistics);
 
+void iwl_reply_statistics(struct iwl_priv *priv,
+			      struct iwl_rx_mem_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+	if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
+		memset(&priv->statistics, 0,
+			sizeof(struct iwl_notif_statistics));
+#ifdef CONFIG_IWLWIFI_DEBUG
+		memset(&priv->accum_statistics, 0,
+			sizeof(struct iwl_notif_statistics));
+#endif
+		IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
+	}
+	iwl_rx_statistics(priv, rxb);
+}
+EXPORT_SYMBOL(iwl_reply_statistics);
+
 #define PERFECT_RSSI (-20) /* dBm */
 #define WORST_RSSI (-95)   /* dBm */
 #define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
@@ -878,6 +942,10 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
 					struct iwl_rx_mem_buffer *rxb,
 					struct ieee80211_rx_status *stats)
 {
+	struct sk_buff *skb;
+	int ret = 0;
+	__le16 fc = hdr->frame_control;
+
 	/* We only process data packets if the interface is open */
 	if (unlikely(!priv->is_open)) {
 		IWL_DEBUG_DROP_LIMIT(priv,
@@ -890,15 +958,44 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
 	    iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
 		return;
 
-	/* Resize SKB from mac header to end of packet */
-	skb_reserve(rxb->skb, (void *)hdr - (void *)rxb->skb->data);
-	skb_put(rxb->skb, len);
+	skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC);
+	if (!skb) {
+		IWL_ERR(priv, "alloc_skb failed\n");
+		return;
+	}
+
+	skb_reserve(skb, IWL_LINK_HDR_MAX);
+	skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
+
+	/* mac80211 currently doesn't support paged SKB. Convert it to
+	 * linear SKB for management frame and data frame requires
+	 * software decryption or software defragementation. */
+	if (ieee80211_is_mgmt(fc) ||
+	    ieee80211_has_protected(fc) ||
+	    ieee80211_has_morefrags(fc) ||
+	    le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
+		ret = skb_linearize(skb);
+	else
+		ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
+			 0 : -ENOMEM;
+
+	if (ret) {
+		kfree_skb(skb);
+		goto out;
+	}
+
+	/*
+	 * XXX: We cannot touch the page and its virtual memory (hdr) after
+	 * here. It might have already been freed by the above skb change.
+	 */
 
-	iwl_update_stats(priv, false, hdr->frame_control, len);
-	memcpy(IEEE80211_SKB_RXCB(rxb->skb), stats, sizeof(*stats));
-	ieee80211_rx_irqsafe(priv->hw, rxb->skb);
-	priv->alloc_rxb_skb--;
-	rxb->skb = NULL;
+	iwl_update_stats(priv, false, fc, len);
+	memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+	ieee80211_rx(priv->hw, skb);
+ out:
+	priv->alloc_rxb_page--;
+	rxb->page = NULL;
 }
 
 /* This is necessary only for a number of statistics, see the caller. */
@@ -926,13 +1023,12 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
 {
 	struct ieee80211_hdr *header;
 	struct ieee80211_rx_status rx_status;
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_rx_phy_res *phy_res;
 	__le32 rx_pkt_status;
 	struct iwl4965_rx_mpdu_res_start *amsdu;
 	u32 len;
 	u32 ampdu_status;
-	u16 fc;
 	u32 rate_n_flags;
 
 	/**
@@ -1065,20 +1161,8 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
 		priv->last_tsf = le64_to_cpu(phy_res->timestamp);
 	}
 
-	fc = le16_to_cpu(header->frame_control);
-	switch (fc & IEEE80211_FCTL_FTYPE) {
-	case IEEE80211_FTYPE_MGMT:
-	case IEEE80211_FTYPE_DATA:
-		if (priv->iw_mode == NL80211_IFTYPE_AP)
-			iwl_update_ps_mode(priv, fc  & IEEE80211_FCTL_PM,
-						header->addr2);
-		/* fall through */
-	default:
-		iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
-				rxb, &rx_status);
-		break;
-
-	}
+	iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
+				    rxb, &rx_status);
 }
 EXPORT_SYMBOL(iwl_rx_reply_rx);
 
@@ -1087,7 +1171,7 @@ EXPORT_SYMBOL(iwl_rx_reply_rx);
 void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
 				    struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	priv->last_phy_res[0] = 1;
 	memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
 	       sizeof(struct iwl_rx_phy_res));
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 4f3a108fa990..a2b2b8315ff9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -27,7 +27,6 @@
  *****************************************************************************/
 #include <linux/types.h>
 #include <linux/etherdevice.h>
-#include <net/lib80211.h>
 #include <net/mac80211.h>
 
 #include "iwl-eeprom.h"
@@ -112,7 +111,7 @@ EXPORT_SYMBOL(iwl_scan_cancel_timeout);
 static int iwl_send_scan_abort(struct iwl_priv *priv)
 {
 	int ret = 0;
-	struct iwl_rx_packet *res;
+	struct iwl_rx_packet *pkt;
 	struct iwl_host_cmd cmd = {
 		.id = REPLY_SCAN_ABORT_CMD,
 		.flags = CMD_WANT_SKB,
@@ -132,21 +131,21 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
 		return ret;
 	}
 
-	res = (struct iwl_rx_packet *)cmd.reply_skb->data;
-	if (res->u.status != CAN_ABORT_STATUS) {
+	pkt = (struct iwl_rx_packet *)cmd.reply_page;
+	if (pkt->u.status != CAN_ABORT_STATUS) {
 		/* The scan abort will return 1 for success or
 		 * 2 for "failure".  A failure condition can be
 		 * due to simply not being in an active scan which
 		 * can occur if we send the scan abort before we
 		 * the microcode has notified us that a scan is
 		 * completed. */
-		IWL_DEBUG_INFO(priv, "SCAN_ABORT returned %d.\n", res->u.status);
+		IWL_DEBUG_INFO(priv, "SCAN_ABORT returned %d.\n", pkt->u.status);
 		clear_bit(STATUS_SCAN_ABORTING, &priv->status);
 		clear_bit(STATUS_SCAN_HW, &priv->status);
 	}
 
-	priv->alloc_rxb_skb--;
-	dev_kfree_skb_any(cmd.reply_skb);
+	priv->alloc_rxb_page--;
+	free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
 
 	return ret;
 }
@@ -156,7 +155,7 @@ static void iwl_rx_reply_scan(struct iwl_priv *priv,
 			      struct iwl_rx_mem_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_scanreq_notification *notif =
 	    (struct iwl_scanreq_notification *)pkt->u.raw;
 
@@ -168,7 +167,7 @@ static void iwl_rx_reply_scan(struct iwl_priv *priv,
 static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
 				    struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_scanstart_notification *notif =
 	    (struct iwl_scanstart_notification *)pkt->u.raw;
 	priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
@@ -187,7 +186,7 @@ static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
 				      struct iwl_rx_mem_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_scanresults_notification *notif =
 	    (struct iwl_scanresults_notification *)pkt->u.raw;
 
@@ -214,7 +213,7 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
 				       struct iwl_rx_mem_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
 
 	IWL_DEBUG_SCAN(priv, "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
@@ -402,6 +401,7 @@ void iwl_init_scan_params(struct iwl_priv *priv)
 	if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
 		priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
 }
+EXPORT_SYMBOL(iwl_init_scan_params);
 
 static int iwl_scan_initiate(struct iwl_priv *priv)
 {
@@ -581,6 +581,7 @@ static void iwl_bg_request_scan(struct work_struct *data)
 	u8 rate;
 	bool is_active = false;
 	int  chan_mod;
+	u8 active_chains;
 
 	conf = ieee80211_get_hw_conf(priv->hw);
 
@@ -734,9 +735,22 @@ static void iwl_bg_request_scan(struct work_struct *data)
 	rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
 	scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
 
+	/* In power save mode use one chain, otherwise use all chains */
+	if (test_bit(STATUS_POWER_PMI, &priv->status)) {
+		/* rx_ant has been set to all valid chains previously */
+		active_chains = rx_ant &
+				((u8)(priv->chain_noise_data.active_chains));
+		if (!active_chains)
+			active_chains = rx_ant;
+
+		IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
+				priv->chain_noise_data.active_chains);
+
+		rx_ant = first_antenna(active_chains);
+	}
 	/* MIMO is not used here, but value is required */
-	rx_chain |= ANT_ABC << RXON_RX_CHAIN_VALID_POS;
-	rx_chain |= ANT_ABC << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
+	rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
+	rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
 	rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
 	rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
 	scan->rx_chain = cpu_to_le16(rx_chain);
diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.c b/drivers/net/wireless/iwlwifi/iwl-spectrum.c
index 022bcf115731..1ea5cd345fe8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-spectrum.c
+++ b/drivers/net/wireless/iwlwifi/iwl-spectrum.c
@@ -177,7 +177,7 @@ static int iwl_get_measurement(struct iwl_priv *priv,
 static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
 					  struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
 
 	if (!report->state) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index c6633fec8216..cd6a6901216e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -99,32 +99,25 @@ static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
 
 static void iwl_add_sta_callback(struct iwl_priv *priv,
 				 struct iwl_device_cmd *cmd,
-				 struct sk_buff *skb)
+				 struct iwl_rx_packet *pkt)
 {
-	struct iwl_rx_packet *res = NULL;
 	struct iwl_addsta_cmd *addsta =
 		(struct iwl_addsta_cmd *)cmd->cmd.payload;
 	u8 sta_id = addsta->sta.sta_id;
 
-	if (!skb) {
-		IWL_ERR(priv, "Error: Response NULL in REPLY_ADD_STA.\n");
-		return;
-	}
-
-	res = (struct iwl_rx_packet *)skb->data;
-	if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
+	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
 		IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
-			  res->hdr.flags);
+			  pkt->hdr.flags);
 		return;
 	}
 
-	switch (res->u.add_sta.status) {
+	switch (pkt->u.add_sta.status) {
 	case ADD_STA_SUCCESS_MSK:
 		iwl_sta_ucode_activate(priv, sta_id);
 		 /* fall through */
 	default:
 		IWL_DEBUG_HC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
-			     res->u.add_sta.status);
+			     pkt->u.add_sta.status);
 		break;
 	}
 }
@@ -132,7 +125,7 @@ static void iwl_add_sta_callback(struct iwl_priv *priv,
 int iwl_send_add_sta(struct iwl_priv *priv,
 		     struct iwl_addsta_cmd *sta, u8 flags)
 {
-	struct iwl_rx_packet *res = NULL;
+	struct iwl_rx_packet *pkt = NULL;
 	int ret = 0;
 	u8 data[sizeof(*sta)];
 	struct iwl_host_cmd cmd = {
@@ -152,15 +145,15 @@ int iwl_send_add_sta(struct iwl_priv *priv,
 	if (ret || (flags & CMD_ASYNC))
 		return ret;
 
-	res = (struct iwl_rx_packet *)cmd.reply_skb->data;
-	if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
+	pkt = (struct iwl_rx_packet *)cmd.reply_page;
+	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
 		IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
-			  res->hdr.flags);
+			  pkt->hdr.flags);
 		ret = -EIO;
 	}
 
 	if (ret == 0) {
-		switch (res->u.add_sta.status) {
+		switch (pkt->u.add_sta.status) {
 		case ADD_STA_SUCCESS_MSK:
 			iwl_sta_ucode_activate(priv, sta->sta.sta_id);
 			IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
@@ -172,8 +165,8 @@ int iwl_send_add_sta(struct iwl_priv *priv,
 		}
 	}
 
-	priv->alloc_rxb_skb--;
-	dev_kfree_skb_any(cmd.reply_skb);
+	priv->alloc_rxb_page--;
+	free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
 
 	return ret;
 }
@@ -189,6 +182,11 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
 		goto done;
 
 	mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
+	IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n",
+			(mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
+			"static" :
+			(mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
+			"dynamic" : "disabled");
 
 	sta_flags = priv->stations[index].sta.station_flags;
 
@@ -324,26 +322,19 @@ static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const char *addr)
 
 static void iwl_remove_sta_callback(struct iwl_priv *priv,
 				    struct iwl_device_cmd *cmd,
-				    struct sk_buff *skb)
+				    struct iwl_rx_packet *pkt)
 {
-	struct iwl_rx_packet *res = NULL;
 	struct iwl_rem_sta_cmd *rm_sta =
-		 (struct iwl_rem_sta_cmd *)cmd->cmd.payload;
+			(struct iwl_rem_sta_cmd *)cmd->cmd.payload;
 	const char *addr = rm_sta->addr;
 
-	if (!skb) {
-		IWL_ERR(priv, "Error: Response NULL in REPLY_REMOVE_STA.\n");
-		return;
-	}
-
-	res = (struct iwl_rx_packet *)skb->data;
-	if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
+	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
 		IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
-		res->hdr.flags);
+		pkt->hdr.flags);
 		return;
 	}
 
-	switch (res->u.rem_sta.status) {
+	switch (pkt->u.rem_sta.status) {
 	case REM_STA_SUCCESS_MSK:
 		iwl_sta_ucode_deactivate(priv, addr);
 		break;
@@ -356,7 +347,7 @@ static void iwl_remove_sta_callback(struct iwl_priv *priv,
 static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
 				   u8 flags)
 {
-	struct iwl_rx_packet *res = NULL;
+	struct iwl_rx_packet *pkt;
 	int ret;
 
 	struct iwl_rem_sta_cmd rm_sta_cmd;
@@ -381,15 +372,15 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
 	if (ret || (flags & CMD_ASYNC))
 		return ret;
 
-	res = (struct iwl_rx_packet *)cmd.reply_skb->data;
-	if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
+	pkt = (struct iwl_rx_packet *)cmd.reply_page;
+	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
 		IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
-			  res->hdr.flags);
+			  pkt->hdr.flags);
 		ret = -EIO;
 	}
 
 	if (!ret) {
-		switch (res->u.rem_sta.status) {
+		switch (pkt->u.rem_sta.status) {
 		case REM_STA_SUCCESS_MSK:
 			iwl_sta_ucode_deactivate(priv, addr);
 			IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
@@ -401,8 +392,8 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
 		}
 	}
 
-	priv->alloc_rxb_skb--;
-	dev_kfree_skb_any(cmd.reply_skb);
+	priv->alloc_rxb_page--;
+	free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
 
 	return ret;
 }
@@ -1026,7 +1017,7 @@ int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
 	 */
 	if (priv->current_ht_config.is_ht) {
 		rcu_read_lock();
-		sta = ieee80211_find_sta(priv->hw, addr);
+		sta = ieee80211_find_sta(priv->vif, addr);
 		if (sta) {
 			memcpy(&ht_config, &sta->ht_cap, sizeof(ht_config));
 			cur_ht_config = &ht_config;
@@ -1044,6 +1035,68 @@ int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
 EXPORT_SYMBOL(iwl_rxon_add_station);
 
 /**
+ * iwl_sta_init_bcast_lq - Initialize a bcast station's hardware rate table
+ *
+ * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
+ *       calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
+ *       which requires station table entry to exist).
+ */
+static void iwl_sta_init_bcast_lq(struct iwl_priv *priv)
+{
+	int i, r;
+	struct iwl_link_quality_cmd link_cmd = {
+		.reserved1 = 0,
+	};
+	u32 rate_flags;
+
+	/* Set up the rate scaling to start at selected rate, fall back
+	 * all the way down to 1M in IEEE order, and then spin on 1M */
+	if (priv->band == IEEE80211_BAND_5GHZ)
+		r = IWL_RATE_6M_INDEX;
+	else
+		r = IWL_RATE_1M_INDEX;
+
+	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+		rate_flags = 0;
+		if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
+			rate_flags |= RATE_MCS_CCK_MSK;
+
+		rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
+				RATE_MCS_ANT_POS;
+
+		link_cmd.rs_table[i].rate_n_flags =
+			iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
+		r = iwl_get_prev_ieee_rate(r);
+	}
+
+	link_cmd.general_params.single_stream_ant_msk =
+				first_antenna(priv->hw_params.valid_tx_ant);
+	link_cmd.general_params.dual_stream_ant_msk = 3;
+	link_cmd.agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+	link_cmd.agg_params.agg_time_limit =
+		cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+
+	/* Update the rate scaling for control frame Tx to AP */
+	link_cmd.sta_id = priv->hw_params.bcast_sta_id;
+
+	iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD,
+			       sizeof(link_cmd), &link_cmd, NULL);
+}
+
+
+/**
+ * iwl_add_bcast_station - add broadcast station into station table.
+ */
+void iwl_add_bcast_station(struct iwl_priv *priv)
+{
+	iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL);
+
+	/* Set up default rate scaling table in device's station table */
+	iwl_sta_init_bcast_lq(priv);
+}
+EXPORT_SYMBOL(iwl_add_bcast_station);
+
+/**
  * iwl_get_sta_id - Find station's index within station table
  *
  * If new IBSS station, create new entry in station table
@@ -1163,7 +1216,7 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid)
 }
 EXPORT_SYMBOL(iwl_sta_rx_agg_stop);
 
-static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
+void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
 {
 	unsigned long flags;
 
@@ -1171,27 +1224,26 @@ static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
 	priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
 	priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
 	priv->stations[sta_id].sta.sta.modify_mask = 0;
+	priv->stations[sta_id].sta.sleep_tx_count = 0;
 	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
 	spin_unlock_irqrestore(&priv->sta_lock, flags);
 
 	iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
 }
+EXPORT_SYMBOL(iwl_sta_modify_ps_wake);
 
-void iwl_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr)
+void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
 {
-	/* FIXME: need locking over ps_status ??? */
-	u8 sta_id = iwl_find_station(priv, addr);
+	unsigned long flags;
 
-	if (sta_id != IWL_INVALID_STATION) {
-		u8 sta_awake = priv->stations[sta_id].
-				ps_status == STA_PS_STATUS_WAKE;
+	spin_lock_irqsave(&priv->sta_lock, flags);
+	priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
+	priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
+	priv->stations[sta_id].sta.sta.modify_mask =
+					STA_MODIFY_SLEEP_TX_COUNT_MSK;
+	priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
+	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+	spin_unlock_irqrestore(&priv->sta_lock, flags);
 
-		if (sta_awake && ps_bit)
-			priv->stations[sta_id].ps_status = STA_PS_STATUS_SLEEP;
-		else if (!sta_awake && !ps_bit) {
-			iwl_sta_modify_ps_wake(priv, sta_id);
-			priv->stations[sta_id].ps_status = STA_PS_STATUS_WAKE;
-		}
-	}
+	iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
 }
-
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index 6deebade6361..8d052de2d405 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -52,6 +52,7 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
 			const u8 *addr, u32 iv32, u16 *phase1key);
 
 int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap);
+void iwl_add_bcast_station(struct iwl_priv *priv);
 int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap);
 void iwl_clear_stations_table(struct iwl_priv *priv);
 int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
@@ -65,5 +66,6 @@ void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid);
 int iwl_sta_rx_agg_start(struct iwl_priv *priv,
 			 const u8 *addr, int tid, u16 ssn);
 int iwl_sta_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid);
-void iwl_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr);
+void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id);
+void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt);
 #endif /* __iwl_sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index b7e196e3c8d3..58b132f9cf28 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -97,7 +97,8 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
 		reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
 
 		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
-			IWL_DEBUG_INFO(priv, "Requesting wakeup, GP1 = 0x%x\n", reg);
+			IWL_DEBUG_INFO(priv, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
+				      txq_id, reg);
 			iwl_set_bit(priv, CSR_GP_CNTRL,
 				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
 			return ret;
@@ -132,7 +133,7 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
 	struct iwl_tx_queue *txq = &priv->txq[txq_id];
 	struct iwl_queue *q = &txq->q;
 	struct pci_dev *dev = priv->pci_dev;
-	int i, len;
+	int i;
 
 	if (q->n_bd == 0)
 		return;
@@ -142,8 +143,6 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
 	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
 		priv->cfg->ops->lib->txq_free_tfd(priv, txq);
 
-	len = sizeof(struct iwl_device_cmd) * q->n_window;
-
 	/* De-alloc array of command/tx buffers */
 	for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
 		kfree(txq->cmd[i]);
@@ -181,14 +180,11 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
 	struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
 	struct iwl_queue *q = &txq->q;
 	struct pci_dev *dev = priv->pci_dev;
-	int i, len;
+	int i;
 
 	if (q->n_bd == 0)
 		return;
 
-	len = sizeof(struct iwl_device_cmd) * q->n_window;
-	len += IWL_MAX_SCAN_SIZE;
-
 	/* De-alloc array of command/tx buffers */
 	for (i = 0; i <= TFD_CMD_SLOTS; i++)
 		kfree(txq->cmd[i]);
@@ -370,8 +366,13 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
 
 	txq->need_update = 0;
 
-	/* aggregation TX queues will get their ID when aggregation begins */
-	if (txq_id <= IWL_TX_FIFO_AC3)
+	/*
+	 * Aggregation TX queues will get their ID when aggregation begins;
+	 * they overwrite the setting done here. The command FIFO doesn't
+	 * need an swq_id so don't set one to catch errors, all others can
+	 * be set up to the identity mapping.
+	 */
+	if (txq_id != IWL_CMD_QUEUE_NUM)
 		txq->swq_id = txq_id;
 
 	/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
@@ -406,15 +407,19 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
 	int txq_id;
 
 	/* Tx queues */
-	for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
-		if (txq_id == IWL_CMD_QUEUE_NUM)
-			iwl_cmd_queue_free(priv);
-		else
-			iwl_tx_queue_free(priv, txq_id);
-
+	if (priv->txq)
+		for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
+		     txq_id++)
+			if (txq_id == IWL_CMD_QUEUE_NUM)
+				iwl_cmd_queue_free(priv);
+			else
+				iwl_tx_queue_free(priv, txq_id);
 	iwl_free_dma_ptr(priv, &priv->kw);
 
 	iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
+
+	/* free tx queue structure */
+	iwl_free_txq_mem(priv);
 }
 EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
 
@@ -446,6 +451,12 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
 		IWL_ERR(priv, "Keep Warm allocation failed\n");
 		goto error_kw;
 	}
+
+	/* allocate tx queue structure */
+	ret = iwl_alloc_txq_mem(priv);
+	if (ret)
+		goto error;
+
 	spin_lock_irqsave(&priv->lock, flags);
 
 	/* Turn off all Tx DMA fifos */
@@ -582,9 +593,7 @@ static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
 	u8 rate_plcp;
 
 	/* Set retry limit on DATA packets and Probe Responses*/
-	if (priv->data_retry_limit != -1)
-		data_retry_limit = priv->data_retry_limit;
-	else if (ieee80211_is_probe_resp(fc))
+	if (ieee80211_is_probe_resp(fc))
 		data_retry_limit = 3;
 	else
 		data_retry_limit = IWL_DEFAULT_TX_RETRY;
@@ -701,6 +710,8 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_sta *sta = info->control.sta;
+	struct iwl_station_priv *sta_priv = NULL;
 	struct iwl_tx_queue *txq;
 	struct iwl_queue *q;
 	struct iwl_device_cmd *out_cmd;
@@ -710,7 +721,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
 	dma_addr_t phys_addr;
 	dma_addr_t txcmd_phys;
 	dma_addr_t scratch_phys;
-	u16 len, len_org;
+	u16 len, len_org, firstlen, secondlen;
 	u16 seq_number = 0;
 	__le16 fc;
 	u8 hdr_len;
@@ -763,6 +774,24 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
 
 	IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
 
+	if (sta)
+		sta_priv = (void *)sta->drv_priv;
+
+	if (sta_priv && sta_id != priv->hw_params.bcast_sta_id &&
+	    sta_priv->asleep) {
+		WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE));
+		/*
+		 * This sends an asynchronous command to the device,
+		 * but we can rely on it being processed before the
+		 * next frame is processed -- and the next frame to
+		 * this station is the one that will consume this
+		 * counter.
+		 * For now set the counter to just 1 since we do not
+		 * support uAPSD yet.
+		 */
+		iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
+	}
+
 	txq_id = skb_get_queue_mapping(skb);
 	if (ieee80211_is_data_qos(fc)) {
 		qc = ieee80211_get_qos_ctl(hdr);
@@ -843,7 +872,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
 		sizeof(struct iwl_cmd_header) + hdr_len;
 
 	len_org = len;
-	len = (len + 3) & ~3;
+	firstlen = len = (len + 3) & ~3;
 
 	if (len_org != len)
 		len_org = 1;
@@ -877,7 +906,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
 
 	/* Set up TFD's 2nd entry to point directly to remainder of skb,
 	 * if any (802.11 null frames have no payload). */
-	len = skb->len - hdr_len;
+	secondlen = len = skb->len - hdr_len;
 	if (len) {
 		phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
 					   len, PCI_DMA_TODEVICE);
@@ -911,11 +940,28 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
 	pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
 				       len, PCI_DMA_BIDIRECTIONAL);
 
+	trace_iwlwifi_dev_tx(priv,
+			     &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
+			     sizeof(struct iwl_tfd),
+			     &out_cmd->hdr, firstlen,
+			     skb->data + hdr_len, secondlen);
+
 	/* Tell device the write index *just past* this latest filled TFD */
 	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
 	ret = iwl_txq_update_write_ptr(priv, txq);
 	spin_unlock_irqrestore(&priv->lock, flags);
 
+	/*
+	 * At this point the frame is "transmitted" successfully
+	 * and we will get a TX status notification eventually,
+	 * regardless of the value of ret. "ret" only indicates
+	 * whether or not we should update the write pointer.
+	 */
+
+	/* avoid atomic ops if it isn't an associated client */
+	if (sta_priv && sta_priv->client)
+		atomic_inc(&sta_priv->pending_frames);
+
 	if (ret)
 		return ret;
 
@@ -970,13 +1016,20 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
 	BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
 	       !(cmd->flags & CMD_SIZE_HUGE));
 
-	if (iwl_is_rfkill(priv)) {
-		IWL_DEBUG_INFO(priv, "Not sending command - RF KILL\n");
+	if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
+		IWL_WARN(priv, "Not sending command - %s KILL\n",
+			 iwl_is_rfkill(priv) ? "RF" : "CT");
 		return -EIO;
 	}
 
 	if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
-		IWL_ERR(priv, "No space for Tx\n");
+		IWL_ERR(priv, "No space in command queue\n");
+		if (iwl_within_ct_kill_margin(priv))
+			iwl_tt_enter_ct_kill(priv);
+		else {
+			IWL_ERR(priv, "Restarting adapter due to queue full\n");
+			queue_work(priv->workqueue, &priv->restart);
+		}
 		return -ENOSPC;
 	}
 
@@ -1039,6 +1092,8 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
 	pci_unmap_addr_set(out_meta, mapping, phys_addr);
 	pci_unmap_len_set(out_meta, len, fix_size);
 
+	trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags);
+
 	priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
 						   phys_addr, fix_size, 1,
 						   U32_PAD(cmd->len));
@@ -1051,6 +1106,24 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
 	return ret ? ret : idx;
 }
 
+static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+	struct ieee80211_sta *sta;
+	struct iwl_station_priv *sta_priv;
+
+	sta = ieee80211_find_sta(priv->vif, hdr->addr1);
+	if (sta) {
+		sta_priv = (void *)sta->drv_priv;
+		/* avoid atomic ops if this isn't a client */
+		if (sta_priv->client &&
+		    atomic_dec_return(&sta_priv->pending_frames) == 0)
+			ieee80211_sta_block_awake(priv->hw, sta, false);
+	}
+
+	ieee80211_tx_status_irqsafe(priv->hw, skb);
+}
+
 int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
 {
 	struct iwl_tx_queue *txq = &priv->txq[txq_id];
@@ -1070,7 +1143,7 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
 	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
 
 		tx_info = &txq->txb[txq->q.read_ptr];
-		ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
+		iwl_tx_status(priv, tx_info->skb[0]);
 		tx_info->skb[0] = NULL;
 
 		if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
@@ -1105,11 +1178,6 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
 		return;
 	}
 
-	pci_unmap_single(priv->pci_dev,
-		pci_unmap_addr(&txq->meta[cmd_idx], mapping),
-		pci_unmap_len(&txq->meta[cmd_idx], len),
-		PCI_DMA_BIDIRECTIONAL);
-
 	for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
 	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
 
@@ -1132,7 +1200,7 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
  */
 void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
 	int txq_id = SEQ_TO_QUEUE(sequence);
 	int index = SEQ_TO_INDEX(sequence);
@@ -1157,12 +1225,17 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
 	cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
 	meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index];
 
+	pci_unmap_single(priv->pci_dev,
+			 pci_unmap_addr(meta, mapping),
+			 pci_unmap_len(meta, len),
+			 PCI_DMA_BIDIRECTIONAL);
+
 	/* Input error checking is done when commands are added to queue. */
 	if (meta->flags & CMD_WANT_SKB) {
-		meta->source->reply_skb = rxb->skb;
-		rxb->skb = NULL;
+		meta->source->reply_page = (unsigned long)rxb_addr(rxb);
+		rxb->page = NULL;
 	} else if (meta->callback)
-		meta->callback(priv, cmd, rxb->skb);
+		meta->callback(priv, cmd, pkt);
 
 	iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
 
@@ -1240,7 +1313,7 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
 	if (tid_data->tfds_in_queue == 0) {
 		IWL_DEBUG_HT(priv, "HW queue is empty\n");
 		tid_data->agg.state = IWL_AGG_ON;
-		ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid);
+		ieee80211_start_tx_ba_cb_irqsafe(priv->vif, ra, tid);
 	} else {
 		IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
 			     tid_data->tfds_in_queue);
@@ -1313,7 +1386,7 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
 	if (ret)
 		return ret;
 
-	ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid);
+	ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
 
 	return 0;
 }
@@ -1337,7 +1410,7 @@ int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
 			priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
 							     ssn, tx_fifo);
 			tid_data->agg.state = IWL_AGG_OFF;
-			ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
+			ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid);
 		}
 		break;
 	case IWL_EMPTYING_HW_QUEUE_ADDBA:
@@ -1345,7 +1418,7 @@ int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
 		if (tid_data->tfds_in_queue == 0) {
 			IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
 			tid_data->agg.state = IWL_AGG_ON;
-			ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
+			ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid);
 		}
 		break;
 	}
@@ -1409,7 +1482,7 @@ static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
 
 	info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
 	memset(&info->status, 0, sizeof(info->status));
-	info->flags = IEEE80211_TX_STAT_ACK;
+	info->flags |= IEEE80211_TX_STAT_ACK;
 	info->flags |= IEEE80211_TX_STAT_AMPDU;
 	info->status.ampdu_ack_map = successes;
 	info->status.ampdu_ack_len = agg->frame_count;
@@ -1429,7 +1502,7 @@ static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
 void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
 					   struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
 	struct iwl_tx_queue *txq = NULL;
 	struct iwl_ht_agg *agg;
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index d00a80334095..2a28a1f8b1fe 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -42,7 +42,6 @@
 #include <linux/if_arp.h>
 
 #include <net/ieee80211_radiotap.h>
-#include <net/lib80211.h>
 #include <net/mac80211.h>
 
 #include <asm/div64.h>
@@ -77,11 +76,9 @@
 #define VS
 #endif
 
-#define IWL39_VERSION "1.2.26k" VD VS
+#define DRV_VERSION  IWLWIFI_VERSION VD VS
 #define DRV_COPYRIGHT	"Copyright(c) 2003-2009 Intel Corporation"
 #define DRV_AUTHOR     "<ilw@linux.intel.com>"
-#define DRV_VERSION     IWL39_VERSION
-
 
 MODULE_DESCRIPTION(DRV_DESCRIPTION);
 MODULE_VERSION(DRV_VERSION);
@@ -90,7 +87,6 @@ MODULE_LICENSE("GPL");
 
  /* module parameters */
 struct iwl_mod_params iwl3945_mod_params = {
-	.num_of_queues = IWL39_NUM_QUEUES, /* Not used */
 	.sw_crypto = 1,
 	.restart_fw = 1,
 	/* the rest are 0 by default */
@@ -368,13 +364,13 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
 				      struct sk_buff *skb_frag,
 				      int sta_id)
 {
-	struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
+	struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
 	struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
 
 	switch (keyinfo->alg) {
 	case ALG_CCMP:
-		tx->sec_ctl = TX_CMD_SEC_CCM;
-		memcpy(tx->key, keyinfo->key, keyinfo->keylen);
+		tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+		memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
 		IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
 		break;
 
@@ -382,13 +378,13 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
 		break;
 
 	case ALG_WEP:
-		tx->sec_ctl = TX_CMD_SEC_WEP |
+		tx_cmd->sec_ctl = TX_CMD_SEC_WEP |
 		    (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
 
 		if (keyinfo->keylen == 13)
-			tx->sec_ctl |= TX_CMD_SEC_KEY128;
+			tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
 
-		memcpy(&tx->key[3], keyinfo->key, keyinfo->keylen);
+		memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
 
 		IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
 			     "with key %d\n", info->control.hw_key->hw_key_idx);
@@ -408,12 +404,11 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
 				  struct ieee80211_tx_info *info,
 				  struct ieee80211_hdr *hdr, u8 std_id)
 {
-	struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
-	__le32 tx_flags = tx->tx_flags;
+	struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
+	__le32 tx_flags = tx_cmd->tx_flags;
 	__le16 fc = hdr->frame_control;
-	u8 rc_flags = info->control.rates[0].flags;
 
-	tx->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+	tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
 	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
 		tx_flags |= TX_CMD_FLG_ACK_MSK;
 		if (ieee80211_is_mgmt(fc))
@@ -426,25 +421,19 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
 		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
 	}
 
-	tx->sta_id = std_id;
+	tx_cmd->sta_id = std_id;
 	if (ieee80211_has_morefrags(fc))
 		tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
 
 	if (ieee80211_is_data_qos(fc)) {
 		u8 *qc = ieee80211_get_qos_ctl(hdr);
-		tx->tid_tspec = qc[0] & 0xf;
+		tx_cmd->tid_tspec = qc[0] & 0xf;
 		tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
 	} else {
 		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
 	}
 
-	if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
-		tx_flags |= TX_CMD_FLG_RTS_MSK;
-		tx_flags &= ~TX_CMD_FLG_CTS_MSK;
-	} else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
-		tx_flags &= ~TX_CMD_FLG_RTS_MSK;
-		tx_flags |= TX_CMD_FLG_CTS_MSK;
-	}
+	priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
 
 	if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
 		tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
@@ -452,19 +441,16 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
 	tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
 	if (ieee80211_is_mgmt(fc)) {
 		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
-			tx->timeout.pm_frame_timeout = cpu_to_le16(3);
+			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
 		else
-			tx->timeout.pm_frame_timeout = cpu_to_le16(2);
+			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
 	} else {
-		tx->timeout.pm_frame_timeout = 0;
-#ifdef CONFIG_IWLWIFI_LEDS
-		priv->rxtxpackets += le16_to_cpu(cmd->cmd.tx.len);
-#endif
+		tx_cmd->timeout.pm_frame_timeout = 0;
 	}
 
-	tx->driver_txop = 0;
-	tx->tx_flags = tx_flags;
-	tx->next_frame_len = 0;
+	tx_cmd->driver_txop = 0;
+	tx_cmd->tx_flags = tx_flags;
+	tx_cmd->next_frame_len = 0;
 }
 
 /*
@@ -474,7 +460,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-	struct iwl3945_tx_cmd *tx;
+	struct iwl3945_tx_cmd *tx_cmd;
 	struct iwl_tx_queue *txq = NULL;
 	struct iwl_queue *q = NULL;
 	struct iwl_device_cmd *out_cmd;
@@ -573,9 +559,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
 	/* Init first empty entry in queue's array of Tx/cmd buffers */
 	out_cmd = txq->cmd[idx];
 	out_meta = &txq->meta[idx];
-	tx = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
+	tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
 	memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
-	memset(tx, 0, sizeof(*tx));
+	memset(tx_cmd, 0, sizeof(*tx_cmd));
 
 	/*
 	 * Set up the Tx-command (not MAC!) header.
@@ -588,7 +574,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
 				INDEX_TO_SEQ(q->write_ptr)));
 
 	/* Copy MAC header from skb into command buffer */
-	memcpy(tx->hdr, hdr, hdr_len);
+	memcpy(tx_cmd->hdr, hdr, hdr_len);
 
 
 	if (info->control.hw_key)
@@ -602,12 +588,12 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
 
 	/* Total # bytes to be transmitted */
 	len = (u16)skb->len;
-	tx->len = cpu_to_le16(len);
+	tx_cmd->len = cpu_to_le16(len);
 
 	iwl_dbg_log_tx_data_frame(priv, len, hdr);
 	iwl_update_stats(priv, true, fc, len);
-	tx->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
-	tx->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
+	tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
+	tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
 
 	if (!ieee80211_has_morefrags(hdr->frame_control)) {
 		txq->need_update = 1;
@@ -620,9 +606,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
 
 	IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
 		     le16_to_cpu(out_cmd->hdr.sequence));
-	IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx->tx_flags));
-	iwl_print_hex_dump(priv, IWL_DL_TX, tx, sizeof(*tx));
-	iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx->hdr,
+	IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
+	iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
+	iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
 			   ieee80211_hdrlen(fc));
 
 	/*
@@ -758,7 +744,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
 			       u8 type)
 {
 	struct iwl_spectrum_cmd spectrum;
-	struct iwl_rx_packet *res;
+	struct iwl_rx_packet *pkt;
 	struct iwl_host_cmd cmd = {
 		.id = REPLY_SPECTRUM_MEASUREMENT_CMD,
 		.data = (void *)&spectrum,
@@ -803,18 +789,18 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
 	if (rc)
 		return rc;
 
-	res = (struct iwl_rx_packet *)cmd.reply_skb->data;
-	if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
+	pkt = (struct iwl_rx_packet *)cmd.reply_page;
+	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
 		IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
 		rc = -EIO;
 	}
 
-	spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
+	spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
 	switch (spectrum_resp_status) {
 	case 0:		/* Command will be handled */
-		if (res->u.spectrum.id != 0xff) {
+		if (pkt->u.spectrum.id != 0xff) {
 			IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n",
-						res->u.spectrum.id);
+						pkt->u.spectrum.id);
 			priv->measurement_status &= ~MEASUREMENT_READY;
 		}
 		priv->measurement_status |= MEASUREMENT_ACTIVE;
@@ -826,7 +812,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
 		break;
 	}
 
-	dev_kfree_skb_any(cmd.reply_skb);
+	free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
 
 	return rc;
 }
@@ -835,7 +821,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
 static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
 			       struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_alive_resp *palive;
 	struct delayed_work *pwork;
 
@@ -872,7 +858,7 @@ static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
 				 struct iwl_rx_mem_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
-	struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 #endif
 
 	IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
@@ -908,7 +894,7 @@ static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
 				struct iwl_rx_mem_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
-	struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
 	u8 rate = beacon->beacon_notify_hdr.rate;
 
@@ -931,7 +917,7 @@ static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
 static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
 				    struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
 	unsigned long status = priv->status;
 
@@ -1095,7 +1081,7 @@ static int iwl3945_rx_queue_restock(struct iwl_priv *priv)
 		list_del(element);
 
 		/* Point to Rx buffer via next RBD in circular buffer */
-		rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->real_dma_addr);
+		rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma);
 		rxq->queue[rxq->write] = rxb;
 		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
 		rxq->free_count--;
@@ -1135,8 +1121,9 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
 	struct iwl_rx_queue *rxq = &priv->rxq;
 	struct list_head *element;
 	struct iwl_rx_mem_buffer *rxb;
-	struct sk_buff *skb;
+	struct page *page;
 	unsigned long flags;
+	gfp_t gfp_mask = priority;
 
 	while (1) {
 		spin_lock_irqsave(&rxq->lock, flags);
@@ -1148,10 +1135,14 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
 		spin_unlock_irqrestore(&rxq->lock, flags);
 
 		if (rxq->free_count > RX_LOW_WATERMARK)
-			priority |= __GFP_NOWARN;
+			gfp_mask |= __GFP_NOWARN;
+
+		if (priv->hw_params.rx_page_order > 0)
+			gfp_mask |= __GFP_COMP;
+
 		/* Alloc a new receive buffer */
-		skb = alloc_skb(priv->hw_params.rx_buf_size, priority);
-		if (!skb) {
+		page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
+		if (!page) {
 			if (net_ratelimit())
 				IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
 			if ((rxq->free_count <= RX_LOW_WATERMARK) &&
@@ -1168,7 +1159,7 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
 		spin_lock_irqsave(&rxq->lock, flags);
 		if (list_empty(&rxq->rx_used)) {
 			spin_unlock_irqrestore(&rxq->lock, flags);
-			dev_kfree_skb_any(skb);
+			__free_pages(page, priv->hw_params.rx_page_order);
 			return;
 		}
 		element = rxq->rx_used.next;
@@ -1176,26 +1167,18 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
 		list_del(element);
 		spin_unlock_irqrestore(&rxq->lock, flags);
 
-		rxb->skb = skb;
-
-		/* If radiotap head is required, reserve some headroom here.
-		 * The physical head count is a variable rx_stats->phy_count.
-		 * We reserve 4 bytes here. Plus these extra bytes, the
-		 * headroom of the physical head should be enough for the
-		 * radiotap head that iwl3945 supported. See iwl3945_rt.
-		 */
-		skb_reserve(rxb->skb, 4);
-
+		rxb->page = page;
 		/* Get physical address of RB/SKB */
-		rxb->real_dma_addr = pci_map_single(priv->pci_dev,
-						rxb->skb->data,
-						priv->hw_params.rx_buf_size,
-						PCI_DMA_FROMDEVICE);
+		rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
+				PAGE_SIZE << priv->hw_params.rx_page_order,
+				PCI_DMA_FROMDEVICE);
 
 		spin_lock_irqsave(&rxq->lock, flags);
+
 		list_add_tail(&rxb->list, &rxq->rx_free);
-		priv->alloc_rxb_skb++;
 		rxq->free_count++;
+		priv->alloc_rxb_page++;
+
 		spin_unlock_irqrestore(&rxq->lock, flags);
 	}
 }
@@ -1211,14 +1194,14 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
 	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
 		/* In the reset function, these buffers may have been allocated
 		 * to an SKB, so we need to unmap and free potential storage */
-		if (rxq->pool[i].skb != NULL) {
-			pci_unmap_single(priv->pci_dev,
-					 rxq->pool[i].real_dma_addr,
-					 priv->hw_params.rx_buf_size,
-					 PCI_DMA_FROMDEVICE);
-			priv->alloc_rxb_skb--;
-			dev_kfree_skb(rxq->pool[i].skb);
-			rxq->pool[i].skb = NULL;
+		if (rxq->pool[i].page != NULL) {
+			pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
+				PAGE_SIZE << priv->hw_params.rx_page_order,
+				PCI_DMA_FROMDEVICE);
+			priv->alloc_rxb_page--;
+			__free_pages(rxq->pool[i].page,
+				     priv->hw_params.rx_page_order);
+			rxq->pool[i].page = NULL;
 		}
 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
 	}
@@ -1226,8 +1209,8 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
 	/* Set us so that we have processed and used all buffers, but have
 	 * not restocked the Rx queue with fresh buffers */
 	rxq->read = rxq->write = 0;
-	rxq->free_count = 0;
 	rxq->write_actual = 0;
+	rxq->free_count = 0;
 	spin_unlock_irqrestore(&rxq->lock, flags);
 }
 
@@ -1260,12 +1243,14 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
 {
 	int i;
 	for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
-		if (rxq->pool[i].skb != NULL) {
-			pci_unmap_single(priv->pci_dev,
-					 rxq->pool[i].real_dma_addr,
-					 priv->hw_params.rx_buf_size,
-					 PCI_DMA_FROMDEVICE);
-			dev_kfree_skb(rxq->pool[i].skb);
+		if (rxq->pool[i].page != NULL) {
+			pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
+				PAGE_SIZE << priv->hw_params.rx_page_order,
+				PCI_DMA_FROMDEVICE);
+			__free_pages(rxq->pool[i].page,
+				     priv->hw_params.rx_page_order);
+			rxq->pool[i].page = NULL;
+			priv->alloc_rxb_page--;
 		}
 	}
 
@@ -1381,7 +1366,7 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
 	i = rxq->read;
 
 	/* calculate total frames need to be restock after handling RX */
-	total_empty = r - priv->rxq.write_actual;
+	total_empty = r - rxq->write_actual;
 	if (total_empty < 0)
 		total_empty += RX_QUEUE_SIZE;
 
@@ -1401,10 +1386,13 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
 
 		rxq->queue[i] = NULL;
 
-		pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
-				priv->hw_params.rx_buf_size,
-				PCI_DMA_FROMDEVICE);
-		pkt = (struct iwl_rx_packet *)rxb->skb->data;
+		pci_unmap_page(priv->pci_dev, rxb->page_dma,
+			       PAGE_SIZE << priv->hw_params.rx_page_order,
+			       PCI_DMA_FROMDEVICE);
+		pkt = rxb_addr(rxb);
+
+		trace_iwlwifi_dev_rx(priv, pkt,
+			le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
 
 		/* Reclaim a command buffer only if this packet is a response
 		 *   to a (driver-originated) command.
@@ -1422,44 +1410,55 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
 		if (priv->rx_handlers[pkt->hdr.cmd]) {
 			IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
 				get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
-			priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
 			priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
+			priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
 		} else {
 			/* No handling needed */
-			IWL_DEBUG_RX(priv, "r %d i %d No handler needed for %s, 0x%02x\n",
+			IWL_DEBUG_RX(priv,
+				"r %d i %d No handler needed for %s, 0x%02x\n",
 				r, i, get_cmd_string(pkt->hdr.cmd),
 				pkt->hdr.cmd);
 		}
 
+		/*
+		 * XXX: After here, we should always check rxb->page
+		 * against NULL before touching it or its virtual
+		 * memory (pkt). Because some rx_handler might have
+		 * already taken or freed the pages.
+		 */
+
 		if (reclaim) {
-			/* Invoke any callbacks, transfer the skb to caller, and
-			 * fire off the (possibly) blocking iwl_send_cmd()
+			/* Invoke any callbacks, transfer the buffer to caller,
+			 * and fire off the (possibly) blocking iwl_send_cmd()
 			 * as we reclaim the driver command queue */
-			if (rxb && rxb->skb)
+			if (rxb->page)
 				iwl_tx_cmd_complete(priv, rxb);
 			else
 				IWL_WARN(priv, "Claim null rxb?\n");
 		}
 
-		/* For now we just don't re-use anything.  We can tweak this
-		 * later to try and re-use notification packets and SKBs that
-		 * fail to Rx correctly */
-		if (rxb->skb != NULL) {
-			priv->alloc_rxb_skb--;
-			dev_kfree_skb_any(rxb->skb);
-			rxb->skb = NULL;
-		}
-
+		/* Reuse the page if possible. For notification packets and
+		 * SKBs that fail to Rx correctly, add them back into the
+		 * rx_free list for reuse later. */
 		spin_lock_irqsave(&rxq->lock, flags);
-		list_add_tail(&rxb->list, &priv->rxq.rx_used);
+		if (rxb->page != NULL) {
+			rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
+				0, PAGE_SIZE << priv->hw_params.rx_page_order,
+				PCI_DMA_FROMDEVICE);
+			list_add_tail(&rxb->list, &rxq->rx_free);
+			rxq->free_count++;
+		} else
+			list_add_tail(&rxb->list, &rxq->rx_used);
+
 		spin_unlock_irqrestore(&rxq->lock, flags);
+
 		i = (i + 1) & RX_QUEUE_MASK;
 		/* If there are a lot of unused frames,
 		 * restock the Rx queue so ucode won't assert. */
 		if (fill_rx) {
 			count++;
 			if (count >= 8) {
-				priv->rxq.read = i;
+				rxq->read = i;
 				iwl3945_rx_replenish_now(priv);
 				count = 0;
 			}
@@ -1467,7 +1466,7 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
 	}
 
 	/* Backtrack one entry */
-	priv->rxq.read = i;
+	rxq->read = i;
 	if (fill_rx)
 		iwl3945_rx_replenish_now(priv);
 	else
@@ -1482,7 +1481,6 @@ static inline void iwl_synchronize_irq(struct iwl_priv *priv)
 	tasklet_kill(&priv->irq_tasklet);
 }
 
-#ifdef CONFIG_IWLWIFI_DEBUG
 static const char *desc_lookup(int i)
 {
 	switch (i) {
@@ -1551,8 +1549,9 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
 			"%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
 			desc_lookup(desc), desc, time, blink1, blink2,
 			ilink1, ilink2, data1);
+		trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, 0,
+					0, blink1, blink2, ilink1, ilink2);
 	}
-
 }
 
 #define EVENT_START_OFFSET  (6 * sizeof(u32))
@@ -1569,6 +1568,7 @@ static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
 	u32 event_size;	/* 2 u32s, or 3 u32s if timestamp recorded */
 	u32 ptr;        /* SRAM byte address of log data */
 	u32 ev, time, data; /* event log data */
+	unsigned long reg_flags;
 
 	if (num_events == 0)
 		return;
@@ -1582,25 +1582,71 @@ static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
 
 	ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
 
+	/* Make sure device is powered up for SRAM reads */
+	spin_lock_irqsave(&priv->reg_lock, reg_flags);
+	iwl_grab_nic_access(priv);
+
+	/* Set starting address; reads will auto-increment */
+	_iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
+	rmb();
+
 	/* "time" is actually "data" for mode 0 (no timestamp).
 	 * place event id # at far right for easier visual parsing. */
 	for (i = 0; i < num_events; i++) {
-		ev = iwl_read_targ_mem(priv, ptr);
-		ptr += sizeof(u32);
-		time = iwl_read_targ_mem(priv, ptr);
-		ptr += sizeof(u32);
+		ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+		time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
 		if (mode == 0) {
 			/* data, ev */
 			IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
+			trace_iwlwifi_dev_ucode_event(priv, 0, time, ev);
 		} else {
-			data = iwl_read_targ_mem(priv, ptr);
-			ptr += sizeof(u32);
+			data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
 			IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", time, data, ev);
+			trace_iwlwifi_dev_ucode_event(priv, time, data, ev);
 		}
 	}
+
+	/* Allow device to power down */
+	iwl_release_nic_access(priv);
+	spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
 }
 
-void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
+/**
+ * iwl3945_print_last_event_logs - Dump the newest # of event log to syslog
+ */
+static void iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
+				      u32 num_wraps, u32 next_entry,
+				      u32 size, u32 mode)
+{
+	/*
+	 * display the newest DEFAULT_LOG_ENTRIES entries
+	 * i.e the entries just before the next ont that uCode would fill.
+	 */
+	if (num_wraps) {
+		if (next_entry < size) {
+			iwl3945_print_event_log(priv,
+					capacity - (size - next_entry),
+					size - next_entry, mode);
+			iwl3945_print_event_log(priv, 0,
+				    next_entry, mode);
+		} else
+			iwl3945_print_event_log(priv, next_entry - size,
+				    size, mode);
+	} else {
+		if (next_entry < size)
+			iwl3945_print_event_log(priv, 0, next_entry, mode);
+		else
+			iwl3945_print_event_log(priv, next_entry - size,
+					    size, mode);
+	}
+}
+
+/* For sanity check only.  Actual size is determined by uCode, typ. 512 */
+#define IWL3945_MAX_EVENT_LOG_SIZE (512)
+
+#define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20)
+
+void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
 {
 	u32 base;       /* SRAM byte address of event log header */
 	u32 capacity;   /* event log capacity in # entries */
@@ -1621,6 +1667,18 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
 	num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
 	next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
 
+	if (capacity > IWL3945_MAX_EVENT_LOG_SIZE) {
+		IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
+			capacity, IWL3945_MAX_EVENT_LOG_SIZE);
+		capacity = IWL3945_MAX_EVENT_LOG_SIZE;
+	}
+
+	if (next_entry > IWL3945_MAX_EVENT_LOG_SIZE) {
+		IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
+			next_entry, IWL3945_MAX_EVENT_LOG_SIZE);
+		next_entry = IWL3945_MAX_EVENT_LOG_SIZE;
+	}
+
 	size = num_wraps ? capacity : next_entry;
 
 	/* bail out if nothing in log */
@@ -1629,30 +1687,40 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
 		return;
 	}
 
-	IWL_ERR(priv, "Start IWL Event Log Dump: display count %d, wraps %d\n",
-		  size, num_wraps);
+#ifdef CONFIG_IWLWIFI_DEBUG
+	if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS))
+		size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
+			? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
+#else
+	size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
+		? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
+#endif
+
+	IWL_ERR(priv, "Start IWL Event Log Dump: display last %d count\n",
+		  size);
 
-	/* if uCode has wrapped back to top of log, start at the oldest entry,
-	 * i.e the next one that uCode would fill. */
-	if (num_wraps)
-		iwl3945_print_event_log(priv, next_entry,
+#ifdef CONFIG_IWLWIFI_DEBUG
+	if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
+		/* if uCode has wrapped back to top of log,
+		 * start at the oldest entry,
+		 * i.e the next one that uCode would fill.
+		 */
+		if (num_wraps)
+			iwl3945_print_event_log(priv, next_entry,
 				    capacity - next_entry, mode);
 
-	/* (then/else) start at top of log */
-	iwl3945_print_event_log(priv, 0, next_entry, mode);
-
-}
+		/* (then/else) start at top of log */
+		iwl3945_print_event_log(priv, 0, next_entry, mode);
+	} else
+		iwl3945_print_last_event_logs(priv, capacity, num_wraps,
+					next_entry, size, mode);
 #else
-void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
-{
-}
+	iwl3945_print_last_event_logs(priv, capacity, num_wraps,
+				next_entry, size, mode);
+#endif
 
-void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
-{
 }
 
-#endif
-
 static void iwl3945_irq_tasklet(struct iwl_priv *priv)
 {
 	u32 inta, handled = 0;
@@ -1685,6 +1753,8 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
 	}
 #endif
 
+	spin_unlock_irqrestore(&priv->lock, flags);
+
 	/* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
 	 * atomic, make sure that inta covers all the interrupts that
 	 * we've discovered, even if FH interrupt came in just after
@@ -1706,8 +1776,6 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
 
 		handled |= CSR_INT_BIT_HW_ERR;
 
-		spin_unlock_irqrestore(&priv->lock, flags);
-
 		return;
 	}
 
@@ -1799,7 +1867,6 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
 			"flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
 	}
 #endif
-	spin_unlock_irqrestore(&priv->lock, flags);
 }
 
 static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
@@ -2158,6 +2225,14 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
 		IWL_UCODE_API(priv->ucode_ver),
 		IWL_UCODE_SERIAL(priv->ucode_ver));
 
+	snprintf(priv->hw->wiphy->fw_version,
+		 sizeof(priv->hw->wiphy->fw_version),
+		 "%u.%u.%u.%u",
+		 IWL_UCODE_MAJOR(priv->ucode_ver),
+		 IWL_UCODE_MINOR(priv->ucode_ver),
+		 IWL_UCODE_API(priv->ucode_ver),
+		 IWL_UCODE_SERIAL(priv->ucode_ver));
+
 	IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
 		       priv->ucode_ver);
 	IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n",
@@ -2458,7 +2533,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
 	priv->active_rate = priv->rates_mask;
 	priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
 
-	iwl_power_update_mode(priv, false);
+	iwl_power_update_mode(priv, true);
 
 	if (iwl_is_associated(priv)) {
 		struct iwl3945_rxon_cmd *active_rxon =
@@ -2479,7 +2554,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
 
 	iwl3945_reg_txpower_periodic(priv);
 
-	iwl3945_led_register(priv);
+	iwl_leds_init(priv);
 
 	IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
 	set_bit(STATUS_READY, &priv->status);
@@ -2517,7 +2592,6 @@ static void __iwl3945_down(struct iwl_priv *priv)
 	if (!exit_pending)
 		set_bit(STATUS_EXIT_PENDING, &priv->status);
 
-	iwl3945_led_unregister(priv);
 	iwl_clear_stations_table(priv);
 
 	/* Unblock any waiting calls */
@@ -2563,23 +2637,15 @@ static void __iwl3945_down(struct iwl_priv *priv)
 			test_bit(STATUS_EXIT_PENDING, &priv->status) <<
 				STATUS_EXIT_PENDING;
 
-	priv->cfg->ops->lib->apm_ops.reset(priv);
-	spin_lock_irqsave(&priv->lock, flags);
-	iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-	spin_unlock_irqrestore(&priv->lock, flags);
-
 	iwl3945_hw_txq_ctx_stop(priv);
 	iwl3945_hw_rxq_stop(priv);
 
-	iwl_write_prph(priv, APMG_CLK_DIS_REG,
-				APMG_CLK_VAL_DMA_CLK_RQT);
-
+	/* Power-down device's busmaster DMA clocks */
+	iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
 	udelay(5);
 
-	if (exit_pending)
-		priv->cfg->ops->lib->apm_ops.stop(priv);
-	else
-		priv->cfg->ops->lib->apm_ops.reset(priv);
+	/* Stop the device, and put it in low power state */
+	priv->cfg->ops->lib->apm_ops.stop(priv);
 
  exit:
 	memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
@@ -2724,19 +2790,34 @@ static void iwl3945_bg_alive_start(struct work_struct *data)
 	mutex_unlock(&priv->mutex);
 }
 
+/*
+ * 3945 cannot interrupt driver when hardware rf kill switch toggles;
+ * driver must poll CSR_GP_CNTRL_REG register for change.  This register
+ * *is* readable even when device has been SW_RESET into low power mode
+ * (e.g. during RF KILL).
+ */
 static void iwl3945_rfkill_poll(struct work_struct *data)
 {
 	struct iwl_priv *priv =
 	    container_of(data, struct iwl_priv, rfkill_poll.work);
+	bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status);
+	bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL)
+			& CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
 
-	if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
-		clear_bit(STATUS_RF_KILL_HW, &priv->status);
-	else
-		set_bit(STATUS_RF_KILL_HW, &priv->status);
+	if (new_rfkill != old_rfkill) {
+		if (new_rfkill)
+			set_bit(STATUS_RF_KILL_HW, &priv->status);
+		else
+			clear_bit(STATUS_RF_KILL_HW, &priv->status);
+
+		wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill);
 
-	wiphy_rfkill_set_hw_state(priv->hw->wiphy,
-			test_bit(STATUS_RF_KILL_HW, &priv->status));
+		IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n",
+				new_rfkill ? "disable radio" : "enable radio");
+	}
 
+	/* Keep this running, even if radio now enabled.  This will be
+	 * cancelled in mac_start() if system decides to start again */
 	queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
 			   round_jiffies_relative(2 * HZ));
 
@@ -3152,6 +3233,8 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)
 	 * no need to poll the killswitch state anymore */
 	cancel_delayed_work(&priv->rfkill_poll);
 
+	iwl_led_start(priv);
+
 	priv->is_open = 1;
 	IWL_DEBUG_MAC80211(priv, "leave\n");
 	return 0;
@@ -3606,7 +3689,7 @@ static ssize_t show_statistics(struct device *d,
 		return -EAGAIN;
 
 	mutex_lock(&priv->mutex);
-	rc = iwl_send_statistics_request(priv, 0);
+	rc = iwl_send_statistics_request(priv, CMD_SYNC, false);
 	mutex_unlock(&priv->mutex);
 
 	if (rc) {
@@ -3795,7 +3878,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
 	/* Clear the driver's (not device's) station table */
 	iwl_clear_stations_table(priv);
 
-	priv->data_retry_limit = -1;
 	priv->ieee_channels = NULL;
 	priv->ieee_rates = NULL;
 	priv->band = IEEE80211_BAND_2GHZ;
@@ -3862,10 +3944,8 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
 		BIT(NL80211_IFTYPE_STATION) |
 		BIT(NL80211_IFTYPE_ADHOC);
 
-	hw->wiphy->custom_regulatory = true;
-
-	/* Firmware does not support this */
-	hw->wiphy->disable_beacon_hints = true;
+	hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY |
+			    WIPHY_FLAG_DISABLE_BEACON_HINTS;
 
 	hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
 	/* we create the 802.11 header and a zero-length SSID element */
@@ -3982,13 +4062,6 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
 	 */
 	spin_lock_init(&priv->reg_lock);
 
-	/* amp init */
-	err = priv->cfg->ops->lib->apm_ops.init(priv);
-	if (err < 0) {
-		IWL_DEBUG_INFO(priv, "Failed to init the card\n");
-		goto out_iounmap;
-	}
-
 	/***********************
 	 * 4. Read EEPROM
 	 * ********************/
@@ -4054,6 +4127,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
 			     &priv->bands[IEEE80211_BAND_2GHZ].channels[5]);
 	iwl3945_setup_deferred_work(priv);
 	iwl3945_setup_rx_handlers(priv);
+	iwl_power_initialize(priv);
 
 	/*********************************
 	 * 8. Setup and Register mac80211
@@ -4124,6 +4198,15 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
 		iwl3945_down(priv);
 	}
 
+	/*
+	 * Make sure device is reset to low power before unloading driver.
+	 * This may be redundant with iwl_down(), but there are paths to
+	 * run iwl_down() without calling apm_ops.stop(), and there are
+	 * paths to avoid running iwl_down() at all before leaving driver.
+	 * This (inexpensive) call *makes sure* device is reset.
+	 */
+	priv->cfg->ops->lib->apm_ops.stop(priv);
+
 	/* make sure we flush any pending irq or
 	 * tasklet for the driver
 	 */
@@ -4226,18 +4309,19 @@ static void __exit iwl3945_exit(void)
 
 MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX));
 
-module_param_named(antenna, iwl3945_mod_params.antenna, int, 0444);
+module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO);
 MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
-module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, 0444);
+module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO);
 MODULE_PARM_DESC(swcrypto,
 		 "using software crypto (default 1 [software])\n");
 #ifdef CONFIG_IWLWIFI_DEBUG
-module_param_named(debug, iwl_debug_level, uint, 0644);
+module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(debug, "debug output mask");
 #endif
-module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan, int, 0444);
+module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
+		   int, S_IRUGO);
 MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
-module_param_named(fw_restart3945, iwl3945_mod_params.restart_fw, int, 0444);
+module_param_named(fw_restart3945, iwl3945_mod_params.restart_fw, int, S_IRUGO);
 MODULE_PARM_DESC(fw_restart3945, "restart firmware in case of error");
 
 module_exit(iwl3945_exit);
diff --git a/drivers/net/wireless/iwmc3200wifi/Kconfig b/drivers/net/wireless/iwmc3200wifi/Kconfig
index c25a04371ca8..b9d34a766964 100644
--- a/drivers/net/wireless/iwmc3200wifi/Kconfig
+++ b/drivers/net/wireless/iwmc3200wifi/Kconfig
@@ -1,8 +1,9 @@
 config IWM
 	tristate "Intel Wireless Multicomm 3200 WiFi driver"
-	depends on MMC && WLAN_80211 && EXPERIMENTAL
+	depends on MMC && EXPERIMENTAL
 	depends on CFG80211
 	select FW_LOADER
+	select IWMC3200TOP
 	help
 	  The Intel Wireless Multicomm 3200 hardware is a combo
 	  card with GPS, Bluetooth, WiMax and 802.11 radios. It
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index f3c55658225b..7c4f44a9c3e6 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -405,39 +405,21 @@ static int iwm_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
 {
 	struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
 	struct ieee80211_channel *chan = params->channel;
-	struct cfg80211_bss *bss;
 
 	if (!test_bit(IWM_STATUS_READY, &iwm->status))
 		return -EIO;
 
-	/* UMAC doesn't support creating IBSS network with specified bssid.
-	 * This should be removed after we have join only mode supported. */
+	/* UMAC doesn't support creating or joining an IBSS network
+	 * with specified bssid. */
 	if (params->bssid)
 		return -EOPNOTSUPP;
 
-	bss = cfg80211_get_ibss(iwm_to_wiphy(iwm), NULL,
-				params->ssid, params->ssid_len);
-	if (!bss) {
-		iwm_scan_one_ssid(iwm, params->ssid, params->ssid_len);
-		schedule_timeout_interruptible(2 * HZ);
-		bss = cfg80211_get_ibss(iwm_to_wiphy(iwm), NULL,
-					params->ssid, params->ssid_len);
-	}
-	/* IBSS join only mode is not supported by UMAC ATM */
-	if (bss) {
-		cfg80211_put_bss(bss);
-		return -EOPNOTSUPP;
-	}
-
 	iwm->channel = ieee80211_frequency_to_channel(chan->center_freq);
 	iwm->umac_profile->ibss.band = chan->band;
 	iwm->umac_profile->ibss.channel = iwm->channel;
 	iwm->umac_profile->ssid.ssid_len = params->ssid_len;
 	memcpy(iwm->umac_profile->ssid.ssid, params->ssid, params->ssid_len);
 
-	if (params->bssid)
-		memcpy(&iwm->umac_profile->bssid[0], params->bssid, ETH_ALEN);
-
 	return iwm_send_mlme_profile(iwm);
 }
 
@@ -490,12 +472,12 @@ static int iwm_set_wpa_version(struct iwm_priv *iwm, u32 wpa_version)
 		return 0;
 	}
 
+	if (wpa_version & NL80211_WPA_VERSION_1)
+		iwm->umac_profile->sec.flags = UMAC_SEC_FLG_WPA_ON_MSK;
+
 	if (wpa_version & NL80211_WPA_VERSION_2)
 		iwm->umac_profile->sec.flags = UMAC_SEC_FLG_RSNA_ON_MSK;
 
-	if (wpa_version & NL80211_WPA_VERSION_1)
-		iwm->umac_profile->sec.flags |= UMAC_SEC_FLG_WPA_ON_MSK;
-
 	return 0;
 }
 
@@ -646,6 +628,13 @@ static int iwm_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
 		iwm->default_key = sme->key_idx;
 	}
 
+	/* WPA and open AUTH type from wpa_s means WPS (a.k.a. WSC) */
+	if ((iwm->umac_profile->sec.flags &
+	     (UMAC_SEC_FLG_WPA_ON_MSK | UMAC_SEC_FLG_RSNA_ON_MSK)) &&
+	    iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_OPEN) {
+			iwm->umac_profile->sec.flags = UMAC_SEC_FLG_WSC_ON_MSK;
+	}
+
 	ret = iwm_send_mlme_profile(iwm);
 
 	if (iwm->umac_profile->sec.auth_type != UMAC_AUTH_TYPE_LEGACY_PSK ||
@@ -682,10 +671,24 @@ static int iwm_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
 static int iwm_cfg80211_set_txpower(struct wiphy *wiphy,
 				    enum tx_power_setting type, int dbm)
 {
+	struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
+	int ret;
+
 	switch (type) {
 	case TX_POWER_AUTOMATIC:
 		return 0;
+	case TX_POWER_FIXED:
+		if (!test_bit(IWM_STATUS_READY, &iwm->status))
+			return 0;
+
+		ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
+					      CFG_TX_PWR_LIMIT_USR, dbm * 2);
+		if (ret < 0)
+			return ret;
+
+		return iwm_tx_power_trigger(iwm);
 	default:
+		IWM_ERR(iwm, "Unsupported power type: %d\n", type);
 		return -EOPNOTSUPP;
 	}
 
@@ -696,7 +699,7 @@ static int iwm_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm)
 {
 	struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
 
-	*dbm = iwm->txpower;
+	*dbm = iwm->txpower >> 1;
 
 	return 0;
 }
@@ -722,6 +725,33 @@ static int iwm_cfg80211_set_power_mgmt(struct wiphy *wiphy,
 				       CFG_POWER_INDEX, iwm->conf.power_index);
 }
 
+int iwm_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
+			   struct cfg80211_pmksa *pmksa)
+{
+	struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
+
+	return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_ADD);
+}
+
+int iwm_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
+			   struct cfg80211_pmksa *pmksa)
+{
+	struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
+
+	return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_DEL);
+}
+
+int iwm_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
+{
+	struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
+	struct cfg80211_pmksa pmksa;
+
+	memset(&pmksa, 0, sizeof(struct cfg80211_pmksa));
+
+	return iwm_send_pmkid_update(iwm, &pmksa, IWM_CMD_PMKID_FLUSH);
+}
+
+
 static struct cfg80211_ops iwm_cfg80211_ops = {
 	.change_virtual_intf = iwm_cfg80211_change_iface,
 	.add_key = iwm_cfg80211_add_key,
@@ -738,6 +768,9 @@ static struct cfg80211_ops iwm_cfg80211_ops = {
 	.set_tx_power = iwm_cfg80211_set_txpower,
 	.get_tx_power = iwm_cfg80211_get_txpower,
 	.set_power_mgmt = iwm_cfg80211_set_power_mgmt,
+	.set_pmksa = iwm_cfg80211_set_pmksa,
+	.del_pmksa = iwm_cfg80211_del_pmksa,
+	.flush_pmksa = iwm_cfg80211_flush_pmksa,
 };
 
 static const u32 cipher_suites[] = {
@@ -783,6 +816,7 @@ struct wireless_dev *iwm_wdev_alloc(int sizeof_bus, struct device *dev)
 
 	set_wiphy_dev(wdev->wiphy, dev);
 	wdev->wiphy->max_scan_ssids = UMAC_WIFI_IF_PROBE_OPTION_MAX;
+	wdev->wiphy->max_num_pmkids = UMAC_MAX_NUM_PMKIDS;
 	wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
 				       BIT(NL80211_IFTYPE_ADHOC);
 	wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &iwm_band_2ghz;
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
index 84158b6d35d8..777584d76a88 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ b/drivers/net/wireless/iwmc3200wifi/commands.c
@@ -77,6 +77,11 @@ int iwm_send_wifi_if_cmd(struct iwm_priv *iwm, void *payload, u16 payload_size,
 	int ret;
 	u8 oid = hdr->oid;
 
+	if (!test_bit(IWM_STATUS_READY, &iwm->status)) {
+		IWM_ERR(iwm, "Interface is not ready yet");
+		return -EAGAIN;
+	}
+
 	umac_cmd.id = UMAC_CMD_OPCODE_WIFI_IF_WRAPPER;
 	umac_cmd.resp = resp;
 
@@ -94,6 +99,10 @@ int iwm_send_wifi_if_cmd(struct iwm_priv *iwm, void *payload, u16 payload_size,
 	return ret;
 }
 
+static int modparam_wiwi = COEX_MODE_CM;
+module_param_named(wiwi, modparam_wiwi, int, 0644);
+MODULE_PARM_DESC(wiwi, "Wifi-WiMAX coexistence: 1=SA, 2=XOR, 3=CM (default)");
+
 static struct coex_event iwm_sta_xor_prio_tbl[COEX_EVENTS_NUM] =
 {
 	{4, 3, 0, COEX_UNASSOC_IDLE_FLAGS},
@@ -117,18 +126,18 @@ static struct coex_event iwm_sta_xor_prio_tbl[COEX_EVENTS_NUM] =
 static struct coex_event iwm_sta_cm_prio_tbl[COEX_EVENTS_NUM] =
 {
 	{1, 1, 0, COEX_UNASSOC_IDLE_FLAGS},
-	{4, 3, 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
+	{4, 4, 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
 	{3, 3, 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
-	{5, 5, 0, COEX_CALIBRATION_FLAGS},
+	{6, 6, 0, COEX_CALIBRATION_FLAGS},
 	{3, 3, 0, COEX_PERIODIC_CALIBRATION_FLAGS},
-	{5, 4, 0, COEX_CONNECTION_ESTAB_FLAGS},
+	{6, 5, 0, COEX_CONNECTION_ESTAB_FLAGS},
 	{4, 4, 0, COEX_ASSOCIATED_IDLE_FLAGS},
 	{4, 4, 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
 	{4, 4, 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
 	{4, 4, 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
 	{1, 1, 0, COEX_RF_ON_FLAGS},
 	{1, 1, 0, COEX_RF_OFF_FLAGS},
-	{6, 6, 0, COEX_STAND_ALONE_DEBUG_FLAGS},
+	{7, 7, 0, COEX_STAND_ALONE_DEBUG_FLAGS},
 	{5, 4, 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
 	{1, 1, 0, COEX_RSRVD1_FLAGS},
 	{1, 1, 0, COEX_RSRVD2_FLAGS}
@@ -143,7 +152,7 @@ int iwm_send_prio_table(struct iwm_priv *iwm)
 
 	coex_table_cmd.flags = COEX_FLAGS_STA_TABLE_VALID_MSK;
 
-	switch (iwm->conf.coexist_mode) {
+	switch (modparam_wiwi) {
 	case COEX_MODE_XOR:
 	case COEX_MODE_CM:
 		coex_enabled = 1;
@@ -168,7 +177,7 @@ int iwm_send_prio_table(struct iwm_priv *iwm)
 					COEX_FLAGS_ASSOC_WAKEUP_UMASK_MSK |
 					COEX_FLAGS_UNASSOC_WAKEUP_UMASK_MSK;
 
-		switch (iwm->conf.coexist_mode) {
+		switch (modparam_wiwi) {
 		case COEX_MODE_XOR:
 			memcpy(coex_table_cmd.sta_prio, iwm_sta_xor_prio_tbl,
 			       sizeof(iwm_sta_xor_prio_tbl));
@@ -179,7 +188,7 @@ int iwm_send_prio_table(struct iwm_priv *iwm)
 			break;
 		default:
 			IWM_ERR(iwm, "Invalid coex_mode 0x%x\n",
-				iwm->conf.coexist_mode);
+				modparam_wiwi);
 			break;
 		}
 	} else
@@ -187,7 +196,7 @@ int iwm_send_prio_table(struct iwm_priv *iwm)
 
 	return iwm_send_lmac_ptrough_cmd(iwm, COEX_PRIORITY_TABLE_CMD,
 				&coex_table_cmd,
-				sizeof(struct iwm_coex_prio_table_cmd), 1);
+				sizeof(struct iwm_coex_prio_table_cmd), 0);
 }
 
 int iwm_send_init_calib_cfg(struct iwm_priv *iwm, u8 calib_requested)
@@ -275,6 +284,17 @@ int iwm_send_calib_results(struct iwm_priv *iwm)
 	return ret;
 }
 
+int iwm_send_ct_kill_cfg(struct iwm_priv *iwm, u8 entry, u8 exit)
+{
+	struct iwm_ct_kill_cfg_cmd cmd;
+
+	cmd.entry_threshold = entry;
+	cmd.exit_threshold = exit;
+
+	return iwm_send_lmac_ptrough_cmd(iwm, REPLY_CT_KILL_CONFIG_CMD, &cmd,
+					 sizeof(struct iwm_ct_kill_cfg_cmd), 0);
+}
+
 int iwm_send_umac_reset(struct iwm_priv *iwm, __le32 reset_flags, bool resp)
 {
 	struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
@@ -380,7 +400,7 @@ int iwm_send_umac_config(struct iwm_priv *iwm, __le32 reset_flags)
 		return ret;
 
 	ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
-				      CFG_COEX_MODE, iwm->conf.coexist_mode);
+				      CFG_COEX_MODE, modparam_wiwi);
 	if (ret < 0)
 		return ret;
 
@@ -778,11 +798,24 @@ int iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
 		return ret;
 
 	ret = wait_event_interruptible_timeout(iwm->mlme_queue,
-				(iwm->umac_profile_active == 0), 2 * HZ);
+				(iwm->umac_profile_active == 0), 5 * HZ);
 
 	return ret ? 0 : -EBUSY;
 }
 
+int iwm_tx_power_trigger(struct iwm_priv *iwm)
+{
+	struct iwm_umac_pwr_trigger pwr_trigger;
+
+	pwr_trigger.hdr.oid = UMAC_WIFI_IF_CMD_TX_PWR_TRIGGER;
+	pwr_trigger.hdr.buf_size =
+		cpu_to_le16(sizeof(struct iwm_umac_pwr_trigger) -
+			    sizeof(struct iwm_umac_wifi_if));
+
+
+	return iwm_send_wifi_if_cmd(iwm, &pwr_trigger, sizeof(pwr_trigger), 1);
+}
+
 int iwm_send_umac_stats_req(struct iwm_priv *iwm, u32 flags)
 {
 	struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
@@ -900,3 +933,58 @@ int iwm_target_reset(struct iwm_priv *iwm)
 
 	return iwm_hal_send_target_cmd(iwm, &target_cmd, NULL);
 }
+
+int iwm_send_umac_stop_resume_tx(struct iwm_priv *iwm,
+				 struct iwm_umac_notif_stop_resume_tx *ntf)
+{
+	struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
+	struct iwm_umac_cmd umac_cmd;
+	struct iwm_umac_cmd_stop_resume_tx stp_res_cmd;
+	struct iwm_sta_info *sta_info;
+	u8 sta_id = STA_ID_N_COLOR_ID(ntf->sta_id);
+	int i;
+
+	sta_info = &iwm->sta_table[sta_id];
+	if (!sta_info->valid) {
+		IWM_ERR(iwm, "Invalid STA: %d\n", sta_id);
+		return -EINVAL;
+	}
+
+	umac_cmd.id = UMAC_CMD_OPCODE_STOP_RESUME_STA_TX;
+	umac_cmd.resp = 0;
+
+	stp_res_cmd.flags = ntf->flags;
+	stp_res_cmd.sta_id = ntf->sta_id;
+	stp_res_cmd.stop_resume_tid_msk = ntf->stop_resume_tid_msk;
+	for (i = 0; i < IWM_UMAC_TID_NR; i++)
+		stp_res_cmd.last_seq_num[i] =
+			sta_info->tid_info[i].last_seq_num;
+
+	return iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, &stp_res_cmd,
+				 sizeof(struct iwm_umac_cmd_stop_resume_tx));
+
+}
+
+int iwm_send_pmkid_update(struct iwm_priv *iwm,
+			  struct cfg80211_pmksa *pmksa, u32 command)
+{
+	struct iwm_umac_pmkid_update update;
+	int ret;
+
+	memset(&update, 0, sizeof(struct iwm_umac_pmkid_update));
+
+	update.command = cpu_to_le32(command);
+	if (pmksa->bssid)
+		memcpy(&update.bssid, pmksa->bssid, ETH_ALEN);
+	if (pmksa->pmkid)
+		memcpy(&update.pmkid, pmksa->pmkid, WLAN_PMKID_LEN);
+
+	ret = iwm_send_wifi_if_cmd(iwm, &update,
+				   sizeof(struct iwm_umac_pmkid_update), 0);
+	if (ret) {
+		IWM_ERR(iwm, "PMKID update command failed\n");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h
index e24d5b633997..06af0552cd75 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.h
+++ b/drivers/net/wireless/iwmc3200wifi/commands.h
@@ -102,7 +102,6 @@ enum {
 	CFG_SCAN_NUM_PASSIVE_CHAN_PER_PARTIAL_SCAN,
 	CFG_TLC_SUPPORTED_TX_HT_RATES,
 	CFG_TLC_SUPPORTED_TX_RATES,
-	CFG_TLC_VALID_ANTENNA,
 	CFG_TLC_SPATIAL_STREAM_SUPPORTED,
 	CFG_TLC_RETRY_PER_RATE,
 	CFG_TLC_RETRY_PER_HT_RATE,
@@ -136,6 +135,10 @@ enum {
 	CFG_TLC_RENEW_ADDBA_DELAY,
 	CFG_TLC_NUM_OF_MULTISEC_TO_COUN_LOAD,
 	CFG_TLC_IS_STABLE_IN_HT,
+	CFG_TLC_SR_SIC_1ST_FAIL,
+	CFG_TLC_SR_SIC_1ST_PASS,
+	CFG_TLC_SR_SIC_TOTAL_FAIL,
+	CFG_TLC_SR_SIC_TOTAL_PASS,
 	CFG_RLC_CHAIN_CTRL,
 	CFG_TRK_TABLE_OP_MODE,
 	CFG_TRK_TABLE_RSSI_THRESHOLD,
@@ -147,6 +150,58 @@ enum {
 	CFG_MLME_DBG_NOTIF_BLOCK,
 	CFG_BT_OFF_BECONS_INTERVALS,
 	CFG_BT_FRAG_DURATION,
+	CFG_ACTIVE_CHAINS,
+	CFG_CALIB_CTRL,
+	CFG_CAPABILITY_SUPPORTED_HT_RATES,
+	CFG_HT_MAC_PARAM_INFO,
+	CFG_MIMO_PS_MODE,
+	CFG_HT_DEFAULT_CAPABILIES_INFO,
+	CFG_LED_SC_RESOLUTION_FACTOR,
+	CFG_PTAM_ENERGY_CCK_DET_DEFAULT,
+	CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_DEFAULT,
+	CFG_PTAM_CORR40_4_TH_ADD_MIN_DEFAULT,
+	CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_DEFAULT,
+	CFG_PTAM_CORR32_4_TH_ADD_MIN_DEFAULT,
+	CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_DEFAULT,
+	CFG_PTAM_CORR32_1_TH_ADD_MIN_DEFAULT,
+	CFG_PTAM_ENERGY_CCK_DET_MIN_VAL,
+	CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_MIN_VAL,
+	CFG_PTAM_CORR40_4_TH_ADD_MIN_MIN_VAL,
+	CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_MIN_VAL,
+	CFG_PTAM_CORR32_4_TH_ADD_MIN_MIN_VAL,
+	CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_MIN_VAL,
+	CFG_PTAM_CORR32_1_TH_ADD_MIN_MIN_VAL,
+	CFG_PTAM_ENERGY_CCK_DET_MAX_VAL,
+	CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_MAX_VAL,
+	CFG_PTAM_CORR40_4_TH_ADD_MIN_MAX_VAL,
+	CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_MAX_VAL,
+	CFG_PTAM_CORR32_4_TH_ADD_MIN_MAX_VAL,
+	CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_MAX_VAL,
+	CFG_PTAM_CORR32_1_TH_ADD_MIN_MAX_VAL,
+	CFG_PTAM_ENERGY_CCK_DET_STEP_VAL,
+	CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_STEP_VAL,
+	CFG_PTAM_CORR40_4_TH_ADD_MIN_STEP_VAL,
+	CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_STEP_VAL,
+	CFG_PTAM_CORR32_4_TH_ADD_MIN_STEP_VAL,
+	CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_STEP_VAL,
+	CFG_PTAM_CORR32_1_TH_ADD_MIN_STEP_VAL,
+	CFG_PTAM_LINK_SENS_FA_OFDM_MAX,
+	CFG_PTAM_LINK_SENS_FA_OFDM_MIN,
+	CFG_PTAM_LINK_SENS_FA_CCK_MAX,
+	CFG_PTAM_LINK_SENS_FA_CCK_MIN,
+	CFG_PTAM_LINK_SENS_NRG_DIFF,
+	CFG_PTAM_LINK_SENS_NRG_MARGIN,
+	CFG_PTAM_LINK_SENS_MAX_NUMBER_OF_TIMES_IN_CCK_NO_FA,
+	CFG_PTAM_LINK_SENS_AUTO_CORR_MAX_TH_CCK,
+	CFG_AGG_MGG_TID_LOAD_ADDBA_THRESHOLD,
+	CFG_AGG_MGG_TID_LOAD_DELBA_THRESHOLD,
+	CFG_AGG_MGG_ADDBA_BUF_SIZE,
+	CFG_AGG_MGG_ADDBA_INACTIVE_TIMEOUT,
+	CFG_AGG_MGG_ADDBA_DEBUG_FLAGS,
+	CFG_SCAN_PERIODIC_RSSI_HIGH_THRESHOLD,
+	CFG_SCAN_PERIODIC_COEF_RSSI_HIGH,
+	CFG_11D_ENABLED,
+	CFG_11H_FEATURE_FLAGS,
 
 	/* <-- LAST --> */
 	CFG_TBL_FIX_LAST
@@ -155,7 +210,8 @@ enum {
 /* variable size table */
 enum {
 	CFG_NET_ADDR = 0,
-	CFG_PROFILE,
+	CFG_LED_PATTERN_TABLE,
+
 	/* <-- LAST --> */
 	CFG_TBL_VAR_LAST
 };
@@ -288,6 +344,9 @@ struct iwm_umac_cmd_scan_request {
 /* iwm_umac_security.flag is WSC mode on -- bits [2:2] */
 #define UMAC_SEC_FLG_WSC_ON_POS		2
 #define UMAC_SEC_FLG_WSC_ON_SEED	1
+#define UMAC_SEC_FLG_WSC_ON_MSK         (UMAC_SEC_FLG_WSC_ON_SEED << \
+					 UMAC_SEC_FLG_WSC_ON_POS)
+
 
 /* Legacy profile can use only WEP40 and WEP104 for encryption and
  * OPEN or PSK for authentication */
@@ -382,10 +441,34 @@ struct iwm_umac_tx_key_id {
 	u8 reserved[3];
 } __attribute__ ((packed));
 
+struct iwm_umac_pwr_trigger {
+	struct iwm_umac_wifi_if hdr;
+	__le32 reseved;
+} __attribute__ ((packed));
+
 struct iwm_umac_cmd_stats_req {
 	__le32 flags;
 } __attribute__ ((packed));
 
+struct iwm_umac_cmd_stop_resume_tx {
+	u8 flags;
+	u8 sta_id;
+	__le16 stop_resume_tid_msk;
+	__le16 last_seq_num[IWM_UMAC_TID_NR];
+	u16 reserved;
+} __attribute__ ((packed));
+
+#define IWM_CMD_PMKID_ADD   1
+#define IWM_CMD_PMKID_DEL   2
+#define IWM_CMD_PMKID_FLUSH 3
+
+struct iwm_umac_pmkid_update {
+	__le32 command;
+	u8 bssid[ETH_ALEN];
+	__le16 reserved;
+	u8 pmkid[WLAN_PMKID_LEN];
+} __attribute__ ((packed));
+
 /* LMAC commands */
 int iwm_read_mac(struct iwm_priv *iwm, u8 *mac);
 int iwm_send_prio_table(struct iwm_priv *iwm);
@@ -393,6 +476,7 @@ int iwm_send_init_calib_cfg(struct iwm_priv *iwm, u8 calib_requested);
 int iwm_send_periodic_calib_cfg(struct iwm_priv *iwm, u8 calib_requested);
 int iwm_send_calib_results(struct iwm_priv *iwm);
 int iwm_store_rxiq_calib_result(struct iwm_priv *iwm);
+int iwm_send_ct_kill_cfg(struct iwm_priv *iwm, u8 entry, u8 exit);
 
 /* UMAC commands */
 int iwm_send_wifi_if_cmd(struct iwm_priv *iwm, void *payload, u16 payload_size,
@@ -407,11 +491,16 @@ int iwm_invalidate_mlme_profile(struct iwm_priv *iwm);
 int iwm_send_packet(struct iwm_priv *iwm, struct sk_buff *skb, int pool_id);
 int iwm_set_tx_key(struct iwm_priv *iwm, u8 key_idx);
 int iwm_set_key(struct iwm_priv *iwm, bool remove, struct iwm_key *key);
+int iwm_tx_power_trigger(struct iwm_priv *iwm);
 int iwm_send_umac_stats_req(struct iwm_priv *iwm, u32 flags);
 int iwm_send_umac_channel_list(struct iwm_priv *iwm);
 int iwm_scan_ssids(struct iwm_priv *iwm, struct cfg80211_ssid *ssids,
 		   int ssid_num);
 int iwm_scan_one_ssid(struct iwm_priv *iwm, u8 *ssid, int ssid_len);
+int iwm_send_umac_stop_resume_tx(struct iwm_priv *iwm,
+				 struct iwm_umac_notif_stop_resume_tx *ntf);
+int iwm_send_pmkid_update(struct iwm_priv *iwm,
+			  struct cfg80211_pmksa *pmksa, u32 command);
 
 /* UDMA commands */
 int iwm_target_reset(struct iwm_priv *iwm);
diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
index 1465379f900a..be992ca41cf1 100644
--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
@@ -158,6 +158,29 @@ static ssize_t iwm_debugfs_txq_read(struct file *filp, char __user *buffer,
 		}
 
 		spin_unlock_irqrestore(&txq->queue.lock, flags);
+
+		spin_lock_irqsave(&txq->stopped_queue.lock, flags);
+
+		len += snprintf(buf + len, buf_len - len,
+				"\tStopped Queue len:   %d\n",
+				skb_queue_len(&txq->stopped_queue));
+		for (j = 0; j < skb_queue_len(&txq->stopped_queue); j++) {
+			struct iwm_tx_info *tx_info;
+
+			skb = skb->next;
+			tx_info = skb_to_tx_info(skb);
+
+			len += snprintf(buf + len, buf_len - len,
+					"\tSKB #%d\n", j);
+			len += snprintf(buf + len, buf_len - len,
+					"\t\tsta:   %d\n", tx_info->sta);
+			len += snprintf(buf + len, buf_len - len,
+					"\t\tcolor: %d\n", tx_info->color);
+			len += snprintf(buf + len, buf_len - len,
+					"\t\ttid:   %d\n", tx_info->tid);
+		}
+
+		spin_unlock_irqrestore(&txq->stopped_queue.lock, flags);
 	}
 
 	ret = simple_read_from_buffer(buffer, len, ppos, buf, buf_len);
diff --git a/drivers/net/wireless/iwmc3200wifi/eeprom.c b/drivers/net/wireless/iwmc3200wifi/eeprom.c
index 365910fbe01e..8091421ee5e5 100644
--- a/drivers/net/wireless/iwmc3200wifi/eeprom.c
+++ b/drivers/net/wireless/iwmc3200wifi/eeprom.c
@@ -66,6 +66,10 @@ static struct iwm_eeprom_entry eeprom_map[] = {
 	[IWM_EEPROM_SKU_CAP] =
 	{"SKU capabilities", IWM_EEPROM_SKU_CAP_OFF, IWM_EEPROM_SKU_CAP_LEN},
 
+	[IWM_EEPROM_FAT_CHANNELS_CAP] =
+	{"HT channels capabilities", IWM_EEPROM_FAT_CHANNELS_CAP_OFF,
+	 IWM_EEPROM_FAT_CHANNELS_CAP_LEN},
+
 	[IWM_EEPROM_CALIB_RXIQ_OFFSET] =
 	{"RX IQ offset", IWM_EEPROM_CALIB_RXIQ_OFF, IWM_EEPROM_INDIRECT_LEN},
 
@@ -146,6 +150,52 @@ u8 *iwm_eeprom_access(struct iwm_priv *iwm, u8 eeprom_id)
 	return iwm->eeprom + eeprom_map[eeprom_id].offset;
 }
 
+int iwm_eeprom_fat_channels(struct iwm_priv *iwm)
+{
+	struct wiphy *wiphy = iwm_to_wiphy(iwm);
+	struct ieee80211_supported_band *band;
+	u16 *channels, i;
+
+	channels = (u16 *)iwm_eeprom_access(iwm, IWM_EEPROM_FAT_CHANNELS_CAP);
+	if (IS_ERR(channels))
+		return PTR_ERR(channels);
+
+	band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	band->ht_cap.ht_supported = true;
+
+	for (i = 0; i < IWM_EEPROM_FAT_CHANNELS_24; i++)
+		if (!(channels[i] & IWM_EEPROM_FAT_CHANNEL_ENABLED))
+			band->ht_cap.ht_supported = false;
+
+	band = wiphy->bands[IEEE80211_BAND_5GHZ];
+	band->ht_cap.ht_supported = true;
+	for (i = IWM_EEPROM_FAT_CHANNELS_24; i < IWM_EEPROM_FAT_CHANNELS; i++)
+		if (!(channels[i] & IWM_EEPROM_FAT_CHANNEL_ENABLED))
+			band->ht_cap.ht_supported = false;
+
+	return 0;
+}
+
+u32 iwm_eeprom_wireless_mode(struct iwm_priv *iwm)
+{
+	u16 sku_cap;
+	u32 wireless_mode = 0;
+
+	sku_cap = *((u16 *)iwm_eeprom_access(iwm, IWM_EEPROM_SKU_CAP));
+
+	if (sku_cap & IWM_EEPROM_SKU_CAP_BAND_24GHZ)
+		wireless_mode |= WIRELESS_MODE_11G;
+
+	if (sku_cap & IWM_EEPROM_SKU_CAP_BAND_52GHZ)
+		wireless_mode |= WIRELESS_MODE_11A;
+
+	if (sku_cap & IWM_EEPROM_SKU_CAP_11N_ENABLE)
+		wireless_mode |= WIRELESS_MODE_11N;
+
+	return wireless_mode;
+}
+
+
 int iwm_eeprom_init(struct iwm_priv *iwm)
 {
 	int i, ret = 0;
diff --git a/drivers/net/wireless/iwmc3200wifi/eeprom.h b/drivers/net/wireless/iwmc3200wifi/eeprom.h
index cdb31a6a1f5f..4e3a3fdab0d3 100644
--- a/drivers/net/wireless/iwmc3200wifi/eeprom.h
+++ b/drivers/net/wireless/iwmc3200wifi/eeprom.h
@@ -48,6 +48,7 @@ enum {
 	IWM_EEPROM_CARD_ID,
 	IWM_EEPROM_RADIO_CONF,
 	IWM_EEPROM_SKU_CAP,
+	IWM_EEPROM_FAT_CHANNELS_CAP,
 
 	IWM_EEPROM_INDIRECT_OFFSET,
 	IWM_EEPROM_CALIB_RXIQ_OFFSET = IWM_EEPROM_INDIRECT_OFFSET,
@@ -58,14 +59,15 @@ enum {
 	IWM_EEPROM_LAST,
 };
 
-#define IWM_EEPROM_SIG_OFF             0x00
-#define IWM_EEPROM_VERSION_OFF        (0x54 << 1)
-#define IWM_EEPROM_OEM_HW_VERSION_OFF (0x56 << 1)
-#define IWM_EEPROM_MAC_VERSION_OFF    (0x30 << 1)
-#define IWM_EEPROM_CARD_ID_OFF        (0x5d << 1)
-#define IWM_EEPROM_RADIO_CONF_OFF     (0x58 << 1)
-#define IWM_EEPROM_SKU_CAP_OFF        (0x55 << 1)
-#define IWM_EEPROM_CALIB_CONFIG_OFF   (0x7c << 1)
+#define IWM_EEPROM_SIG_OFF                 0x00
+#define IWM_EEPROM_VERSION_OFF            (0x54 << 1)
+#define IWM_EEPROM_OEM_HW_VERSION_OFF     (0x56 << 1)
+#define IWM_EEPROM_MAC_VERSION_OFF        (0x30 << 1)
+#define IWM_EEPROM_CARD_ID_OFF            (0x5d << 1)
+#define IWM_EEPROM_RADIO_CONF_OFF         (0x58 << 1)
+#define IWM_EEPROM_SKU_CAP_OFF            (0x55 << 1)
+#define IWM_EEPROM_CALIB_CONFIG_OFF       (0x7c << 1)
+#define IWM_EEPROM_FAT_CHANNELS_CAP_OFF   (0xde << 1)
 
 #define IWM_EEPROM_SIG_LEN              4
 #define IWM_EEPROM_VERSION_LEN          2
@@ -74,6 +76,7 @@ enum {
 #define IWM_EEPROM_CARD_ID_LEN          2
 #define IWM_EEPROM_RADIO_CONF_LEN       2
 #define IWM_EEPROM_SKU_CAP_LEN          2
+#define IWM_EEPROM_FAT_CHANNELS_CAP_LEN 40
 #define IWM_EEPROM_INDIRECT_LEN		2
 
 #define IWM_MAX_EEPROM_DATA_LEN         240
@@ -87,6 +90,14 @@ enum {
 #define IWM_EEPROM_SKU_CAP_BAND_52GHZ           (1 << 5)
 #define IWM_EEPROM_SKU_CAP_11N_ENABLE           (1 << 6)
 
+#define IWM_EEPROM_FAT_CHANNELS 20
+/* 2.4 gHz FAT primary channels: 1, 2, 3, 4, 5, 6, 7, 8, 9 */
+#define IWM_EEPROM_FAT_CHANNELS_24 9
+/* 5.2 gHz FAT primary channels: 36,44,52,60,100,108,116,124,132,149,157 */
+#define IWM_EEPROM_FAT_CHANNELS_52 11
+
+#define IWM_EEPROM_FAT_CHANNEL_ENABLED (1 << 0)
+
 enum {
 	IWM_EEPROM_CALIB_CAL_HDR,
 	IWM_EEPROM_CALIB_TX_POWER,
@@ -110,5 +121,7 @@ struct iwm_eeprom_entry {
 int iwm_eeprom_init(struct iwm_priv *iwm);
 void iwm_eeprom_exit(struct iwm_priv *iwm);
 u8 *iwm_eeprom_access(struct iwm_priv *iwm, u8 eeprom_id);
+int iwm_eeprom_fat_channels(struct iwm_priv *iwm);
+u32 iwm_eeprom_wireless_mode(struct iwm_priv *iwm);
 
 #endif
diff --git a/drivers/net/wireless/iwmc3200wifi/fw.c b/drivers/net/wireless/iwmc3200wifi/fw.c
index 6b0bcad758ca..49067092d336 100644
--- a/drivers/net/wireless/iwmc3200wifi/fw.c
+++ b/drivers/net/wireless/iwmc3200wifi/fw.c
@@ -217,6 +217,13 @@ static int iwm_load_img(struct iwm_priv *iwm, const char *img_name)
 		 IWM_BUILD_YEAR(build_date), IWM_BUILD_MONTH(build_date),
 		 IWM_BUILD_DAY(build_date));
 
+	if (!strcmp(img_name, iwm->bus_ops->umac_name))
+		sprintf(iwm->umac_version, "%02X.%02X",
+			ver->major, ver->minor);
+
+	if (!strcmp(img_name, iwm->bus_ops->lmac_name))
+		sprintf(iwm->lmac_version, "%02X.%02X",
+			ver->major, ver->minor);
 
  err_release_fw:
 	release_firmware(fw);
@@ -398,6 +405,8 @@ int iwm_load_fw(struct iwm_priv *iwm)
 	iwm_send_prio_table(iwm);
 	iwm_send_calib_results(iwm);
 	iwm_send_periodic_calib_cfg(iwm, periodic_calib_map);
+	iwm_send_ct_kill_cfg(iwm, iwm->conf.ct_kill_entry,
+			     iwm->conf.ct_kill_exit);
 
 	return 0;
 
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 1b02a4e2a1ac..5a26bb05a33a 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -65,6 +65,8 @@ struct iwm_conf {
 	u32 sdio_ior_timeout;
 	unsigned long calib_map;
 	unsigned long expected_calib_map;
+	u8 ct_kill_entry;
+	u8 ct_kill_exit;
 	bool reset_on_fatal_err;
 	bool auto_connect;
 	bool wimax_not_present;
@@ -79,7 +81,6 @@ struct iwm_conf {
 	u32 assoc_timeout;
 	u32 roam_timeout;
 	u32 wireless_mode;
-	u32 coexist_mode;
 
 	u8 ibss_band;
 	u8 ibss_channel;
@@ -129,11 +130,18 @@ struct iwm_notif {
 	unsigned long buf_size;
 };
 
+struct iwm_tid_info {
+	__le16 last_seq_num;
+	bool stopped;
+	struct mutex mutex;
+};
+
 struct iwm_sta_info {
 	u8 addr[ETH_ALEN];
 	bool valid;
 	bool qos;
 	u8 color;
+	struct iwm_tid_info tid_info[IWM_UMAC_TID_NR];
 };
 
 struct iwm_tx_info {
@@ -183,6 +191,8 @@ struct iwm_key {
 struct iwm_tx_queue {
 	int id;
 	struct sk_buff_head queue;
+	struct sk_buff_head stopped_queue;
+	spinlock_t lock;
 	struct workqueue_struct *wq;
 	struct work_struct worker;
 	u8 concat_buf[IWM_HAL_CONCATENATE_BUF_SIZE];
@@ -276,12 +286,14 @@ struct iwm_priv {
 	struct iw_statistics wstats;
 	struct delayed_work stats_request;
 	struct delayed_work disconnect;
+	struct delayed_work ct_kill_delay;
 
 	struct iwm_debugfs dbg;
 
 	u8 *eeprom;
 	struct timer_list watchdog;
 	struct work_struct reset_worker;
+	struct work_struct auth_retry_worker;
 	struct mutex mutex;
 
 	u8 *req_ie;
@@ -290,6 +302,8 @@ struct iwm_priv {
 	int resp_ie_len;
 
 	struct iwm_fw_error_hdr *last_fw_err;
+	char umac_version[8];
+	char lmac_version[8];
 
 	char private[0] __attribute__((__aligned__(NETDEV_ALIGN)));
 };
@@ -335,6 +349,7 @@ int iwm_up(struct iwm_priv *iwm);
 int iwm_down(struct iwm_priv *iwm);
 
 /* TX API */
+u16 iwm_tid_to_queue(u16 tid);
 void iwm_tx_credit_inc(struct iwm_priv *iwm, int id, int total_freed_pages);
 void iwm_tx_worker(struct work_struct *work);
 int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
diff --git a/drivers/net/wireless/iwmc3200wifi/lmac.h b/drivers/net/wireless/iwmc3200wifi/lmac.h
index 6c1a14c4480f..a3a79b5e2898 100644
--- a/drivers/net/wireless/iwmc3200wifi/lmac.h
+++ b/drivers/net/wireless/iwmc3200wifi/lmac.h
@@ -187,6 +187,14 @@ struct iwm_coex_prio_table_cmd {
 				     COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK | \
 				     COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK)
 
+/* CT kill config command */
+struct iwm_ct_kill_cfg_cmd {
+	u32 exit_threshold;
+	u32 reserved;
+	u32 entry_threshold;
+} __attribute__ ((packed));
+
+
 /* LMAC OP CODES */
 #define REPLY_PAD			0x0
 #define REPLY_ALIVE			0x1
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
index 222eb2cf1b30..7f34d6dd3c41 100644
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ b/drivers/net/wireless/iwmc3200wifi/main.c
@@ -64,9 +64,10 @@ static struct iwm_conf def_iwm_conf = {
 				  BIT(PHY_CALIBRATE_TX_IQ_CMD)	|
 				  BIT(PHY_CALIBRATE_RX_IQ_CMD)	|
 				  BIT(SHILOH_PHY_CALIBRATE_BASE_BAND_CMD),
+	.ct_kill_entry		= 110,
+	.ct_kill_exit		= 110,
 	.reset_on_fatal_err	= 1,
 	.auto_connect		= 1,
-	.wimax_not_present	= 0,
 	.enable_qos		= 1,
 	.mode			= UMAC_MODE_BSS,
 
@@ -78,8 +79,8 @@ static struct iwm_conf def_iwm_conf = {
 
 	.assoc_timeout		= 2,
 	.roam_timeout		= 10,
-	.wireless_mode		= WIRELESS_MODE_11A | WIRELESS_MODE_11G,
-	.coexist_mode		= COEX_MODE_CM,
+	.wireless_mode		= WIRELESS_MODE_11A | WIRELESS_MODE_11G |
+				  WIRELESS_MODE_11N,
 
 	/* IBSS */
 	.ibss_band		= UMAC_BAND_2GHZ,
@@ -92,6 +93,10 @@ static int modparam_reset;
 module_param_named(reset, modparam_reset, bool, 0644);
 MODULE_PARM_DESC(reset, "reset on firmware errors (default 0 [not reset])");
 
+static int modparam_wimax_enable = 1;
+module_param_named(wimax_enable, modparam_wimax_enable, bool, 0644);
+MODULE_PARM_DESC(wimax_enable, "Enable wimax core (default 1 [wimax enabled])");
+
 int iwm_mode_to_nl80211_iftype(int mode)
 {
 	switch (mode) {
@@ -134,6 +139,17 @@ static void iwm_disconnect_work(struct work_struct *work)
 	cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0, GFP_KERNEL);
 }
 
+static void iwm_ct_kill_work(struct work_struct *work)
+{
+	struct iwm_priv *iwm =
+		container_of(work, struct iwm_priv, ct_kill_delay.work);
+	struct wiphy *wiphy = iwm_to_wiphy(iwm);
+
+	IWM_INFO(iwm, "CT kill delay timeout\n");
+
+	wiphy_rfkill_set_hw_state(wiphy, false);
+}
+
 static int __iwm_up(struct iwm_priv *iwm);
 static int __iwm_down(struct iwm_priv *iwm);
 
@@ -195,6 +211,33 @@ static void iwm_reset_worker(struct work_struct *work)
 	mutex_unlock(&iwm->mutex);
 }
 
+static void iwm_auth_retry_worker(struct work_struct *work)
+{
+	struct iwm_priv *iwm;
+	int i, ret;
+
+	iwm = container_of(work, struct iwm_priv, auth_retry_worker);
+	if (iwm->umac_profile_active) {
+		ret = iwm_invalidate_mlme_profile(iwm);
+		if (ret < 0)
+			return;
+	}
+
+	iwm->umac_profile->sec.auth_type = UMAC_AUTH_TYPE_LEGACY_PSK;
+
+	ret = iwm_send_mlme_profile(iwm);
+	if (ret < 0)
+		return;
+
+	for (i = 0; i < IWM_NUM_KEYS; i++)
+		if (iwm->keys[i].key_len)
+			iwm_set_key(iwm, 0, &iwm->keys[i]);
+
+	iwm_set_tx_key(iwm, iwm->default_key);
+}
+
+
+
 static void iwm_watchdog(unsigned long data)
 {
 	struct iwm_priv *iwm = (struct iwm_priv *)data;
@@ -207,7 +250,7 @@ static void iwm_watchdog(unsigned long data)
 
 int iwm_priv_init(struct iwm_priv *iwm)
 {
-	int i;
+	int i, j;
 	char name[32];
 
 	iwm->status = 0;
@@ -226,7 +269,9 @@ int iwm_priv_init(struct iwm_priv *iwm)
 	iwm->scan_id = 1;
 	INIT_DELAYED_WORK(&iwm->stats_request, iwm_statistics_request);
 	INIT_DELAYED_WORK(&iwm->disconnect, iwm_disconnect_work);
+	INIT_DELAYED_WORK(&iwm->ct_kill_delay, iwm_ct_kill_work);
 	INIT_WORK(&iwm->reset_worker, iwm_reset_worker);
+	INIT_WORK(&iwm->auth_retry_worker, iwm_auth_retry_worker);
 	INIT_LIST_HEAD(&iwm->bss_list);
 
 	skb_queue_head_init(&iwm->rx_list);
@@ -249,6 +294,8 @@ int iwm_priv_init(struct iwm_priv *iwm)
 			return -EAGAIN;
 
 		skb_queue_head_init(&iwm->txq[i].queue);
+		skb_queue_head_init(&iwm->txq[i].stopped_queue);
+		spin_lock_init(&iwm->txq[i].lock);
 	}
 
 	for (i = 0; i < IWM_NUM_KEYS; i++)
@@ -256,6 +303,12 @@ int iwm_priv_init(struct iwm_priv *iwm)
 
 	iwm->default_key = -1;
 
+	for (i = 0; i < IWM_STA_TABLE_NUM; i++)
+		for (j = 0; j < IWM_UMAC_TID_NR; j++) {
+			mutex_init(&iwm->sta_table[i].tid_info[j].mutex);
+			iwm->sta_table[i].tid_info[j].stopped = false;
+		}
+
 	init_timer(&iwm->watchdog);
 	iwm->watchdog.function = iwm_watchdog;
 	iwm->watchdog.data = (unsigned long)iwm;
@@ -436,7 +489,7 @@ static int iwm_config_boot_params(struct iwm_priv *iwm)
 	int ret;
 
 	/* check Wimax is off and config debug monitor */
-	if (iwm->conf.wimax_not_present) {
+	if (!modparam_wimax_enable) {
 		u32 data1 = 0x1f;
 		u32 addr1 = 0x606BE258;
 
@@ -529,6 +582,7 @@ void iwm_link_off(struct iwm_priv *iwm)
 
 	for (i = 0; i < IWM_TX_QUEUES; i++) {
 		skb_queue_purge(&iwm->txq[i].queue);
+		skb_queue_purge(&iwm->txq[i].stopped_queue);
 
 		iwm->txq[i].concat_count = 0;
 		iwm->txq[i].concat_ptr = iwm->txq[i].concat_buf;
@@ -587,6 +641,8 @@ static int __iwm_up(struct iwm_priv *iwm)
 {
 	int ret;
 	struct iwm_notif *notif_reboot, *notif_ack = NULL;
+	struct wiphy *wiphy = iwm_to_wiphy(iwm);
+	u32 wireless_mode;
 
 	ret = iwm_bus_enable(iwm);
 	if (ret) {
@@ -638,6 +694,8 @@ static int __iwm_up(struct iwm_priv *iwm)
 		IWM_ERR(iwm, "MAC reading failed\n");
 		goto err_disable;
 	}
+	memcpy(iwm_to_ndev(iwm)->perm_addr, iwm_to_ndev(iwm)->dev_addr,
+		ETH_ALEN);
 
 	/* We can load the FWs */
 	ret = iwm_load_fw(iwm);
@@ -646,6 +704,30 @@ static int __iwm_up(struct iwm_priv *iwm)
 		goto err_disable;
 	}
 
+	ret = iwm_eeprom_fat_channels(iwm);
+	if (ret) {
+		IWM_ERR(iwm, "Couldnt read HT channels EEPROM entries\n");
+		goto err_fw;
+	}
+
+	/*
+	 * Read our SKU capabilities.
+	 * If it's valid, we AND the configured wireless mode with the
+	 * device EEPROM value as the current profile wireless mode.
+	 */
+	wireless_mode = iwm_eeprom_wireless_mode(iwm);
+	if (wireless_mode) {
+		iwm->conf.wireless_mode &= wireless_mode;
+		if (iwm->umac_profile)
+			iwm->umac_profile->wireless_mode =
+					iwm->conf.wireless_mode;
+	} else
+		IWM_ERR(iwm, "Wrong SKU capabilities: 0x%x\n",
+			*((u16 *)iwm_eeprom_access(iwm, IWM_EEPROM_SKU_CAP)));
+
+	snprintf(wiphy->fw_version, sizeof(wiphy->fw_version), "L%s_U%s",
+		 iwm->lmac_version, iwm->umac_version);
+
 	/* We configure the UMAC and enable the wifi module */
 	ret = iwm_send_umac_config(iwm,
 			cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_CORE_EN) |
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
index 35ec006c2d2c..e4f0f8705f65 100644
--- a/drivers/net/wireless/iwmc3200wifi/netdev.c
+++ b/drivers/net/wireless/iwmc3200wifi/netdev.c
@@ -76,6 +76,14 @@ static int iwm_stop(struct net_device *ndev)
  */
 static const u16 iwm_1d_to_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
 
+u16 iwm_tid_to_queue(u16 tid)
+{
+	if (tid > IWM_UMAC_TID_NR - 2)
+		return -EINVAL;
+
+	return iwm_1d_to_queue[tid];
+}
+
 static u16 iwm_select_queue(struct net_device *dev, struct sk_buff *skb)
 {
 	skb->priority = cfg80211_classify8021d(skb);
@@ -152,6 +160,7 @@ void iwm_if_free(struct iwm_priv *iwm)
 	if (!iwm_to_ndev(iwm))
 		return;
 
+	cancel_delayed_work_sync(&iwm->ct_kill_delay);
 	free_netdev(iwm_to_ndev(iwm));
 	iwm_priv_deinit(iwm);
 	kfree(iwm->umac_profile);
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 771a301003c9..72c27a3e5528 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -423,7 +423,9 @@ static int iwm_ntf_rx_ticket(struct iwm_priv *iwm, u8 *buf,
 			if (IS_ERR(ticket_node))
 				return PTR_ERR(ticket_node);
 
-			IWM_DBG_RX(iwm, DBG, "TICKET RELEASE(%d)\n",
+			IWM_DBG_RX(iwm, DBG, "TICKET %s(%d)\n",
+				   ticket->action ==  IWM_RX_TICKET_RELEASE ?
+				   "RELEASE" : "DROP",
 				   ticket->id);
 			list_add_tail(&ticket_node->node, &iwm->rx_tickets);
 
@@ -500,6 +502,18 @@ static int iwm_mlme_assoc_start(struct iwm_priv *iwm, u8 *buf,
 	return 0;
 }
 
+static u8 iwm_is_open_wep_profile(struct iwm_priv *iwm)
+{
+	if ((iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_40 ||
+	     iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_104) &&
+	    (iwm->umac_profile->sec.ucast_cipher ==
+	     iwm->umac_profile->sec.mcast_cipher) &&
+	    (iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_OPEN))
+	       return 1;
+
+       return 0;
+}
+
 static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
 				   unsigned long buf_size,
 				   struct iwm_wifi_cmd *cmd)
@@ -565,11 +579,17 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
 			goto ibss;
 
 		if (!test_bit(IWM_STATUS_RESETTING, &iwm->status))
-			cfg80211_connect_result(iwm_to_ndev(iwm),
-						complete->bssid,
-						NULL, 0, NULL, 0,
-						WLAN_STATUS_UNSPECIFIED_FAILURE,
-						GFP_KERNEL);
+			if (!iwm_is_open_wep_profile(iwm)) {
+				cfg80211_connect_result(iwm_to_ndev(iwm),
+					       complete->bssid,
+					       NULL, 0, NULL, 0,
+					       WLAN_STATUS_UNSPECIFIED_FAILURE,
+					       GFP_KERNEL);
+			} else {
+				/* Let's try shared WEP auth */
+				IWM_ERR(iwm, "Trying WEP shared auth\n");
+				schedule_work(&iwm->auth_retry_worker);
+			}
 		else
 			cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0,
 					      GFP_KERNEL);
@@ -713,6 +733,19 @@ static int iwm_mlme_update_sta_table(struct iwm_priv *iwm, u8 *buf,
 	return 0;
 }
 
+static int iwm_mlme_medium_lost(struct iwm_priv *iwm, u8 *buf,
+				unsigned long buf_size,
+				struct iwm_wifi_cmd *cmd)
+{
+	struct wiphy *wiphy = iwm_to_wiphy(iwm);
+
+	IWM_DBG_NTF(iwm, DBG, "WiFi/WiMax coexistence radio is OFF\n");
+
+	wiphy_rfkill_set_hw_state(wiphy, true);
+
+	return 0;
+}
+
 static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
 				     unsigned long buf_size,
 				     struct iwm_wifi_cmd *cmd)
@@ -899,6 +932,8 @@ static int iwm_ntf_mlme(struct iwm_priv *iwm, u8 *buf,
 	case WIFI_IF_NTFY_EXTENDED_IE_REQUIRED:
 		IWM_DBG_MLME(iwm, DBG, "Extended IE required\n");
 		break;
+	case WIFI_IF_NTFY_RADIO_PREEMPTION:
+		return iwm_mlme_medium_lost(iwm, buf, buf_size, cmd);
 	case WIFI_IF_NTFY_BSS_TRK_TABLE_CHANGED:
 		return iwm_mlme_update_bss_table(iwm, buf, buf_size, cmd);
 	case WIFI_IF_NTFY_BSS_TRK_ENTRIES_REMOVED:
@@ -1052,12 +1087,83 @@ static int iwm_ntf_channel_info_list(struct iwm_priv *iwm, u8 *buf,
 	return 0;
 }
 
+static int iwm_ntf_stop_resume_tx(struct iwm_priv *iwm, u8 *buf,
+				  unsigned long buf_size,
+				  struct iwm_wifi_cmd *cmd)
+{
+	struct iwm_umac_notif_stop_resume_tx *stp_res_tx =
+		(struct iwm_umac_notif_stop_resume_tx *)buf;
+	struct iwm_sta_info *sta_info;
+	struct iwm_tid_info *tid_info;
+	u8 sta_id = STA_ID_N_COLOR_ID(stp_res_tx->sta_id);
+	u16 tid_msk = le16_to_cpu(stp_res_tx->stop_resume_tid_msk);
+	int bit, ret = 0;
+	bool stop = false;
+
+	IWM_DBG_NTF(iwm, DBG, "stop/resume notification:\n"
+		    "\tflags:       0x%x\n"
+		    "\tSTA id:      %d\n"
+		    "\tTID bitmask: 0x%x\n",
+		    stp_res_tx->flags, stp_res_tx->sta_id,
+		    stp_res_tx->stop_resume_tid_msk);
+
+	if (stp_res_tx->flags & UMAC_STOP_TX_FLAG)
+		stop = true;
+
+	sta_info = &iwm->sta_table[sta_id];
+	if (!sta_info->valid) {
+		IWM_ERR(iwm, "Stoping an invalid STA: %d %d\n",
+			sta_id, stp_res_tx->sta_id);
+		return -EINVAL;
+	}
+
+	for_each_bit(bit, (unsigned long *)&tid_msk, IWM_UMAC_TID_NR) {
+		tid_info = &sta_info->tid_info[bit];
+
+		mutex_lock(&tid_info->mutex);
+		tid_info->stopped = stop;
+		mutex_unlock(&tid_info->mutex);
+
+		if (!stop) {
+			struct iwm_tx_queue *txq;
+			u16 queue = iwm_tid_to_queue(bit);
+
+			if (queue < 0)
+				continue;
+
+			txq = &iwm->txq[queue];
+			/*
+			 * If we resume, we have to move our SKBs
+			 * back to the tx queue and queue some work.
+			 */
+			spin_lock_bh(&txq->lock);
+			skb_queue_splice_init(&txq->queue, &txq->stopped_queue);
+			spin_unlock_bh(&txq->lock);
+
+			queue_work(txq->wq, &txq->worker);
+		}
+
+	}
+
+	/* We send an ACK only for the stop case */
+	if (stop)
+		ret = iwm_send_umac_stop_resume_tx(iwm, stp_res_tx);
+
+	return ret;
+}
+
 static int iwm_ntf_wifi_if_wrapper(struct iwm_priv *iwm, u8 *buf,
 				   unsigned long buf_size,
 				   struct iwm_wifi_cmd *cmd)
 {
-	struct iwm_umac_wifi_if *hdr =
-			(struct iwm_umac_wifi_if *)cmd->buf.payload;
+	struct iwm_umac_wifi_if *hdr;
+
+	if (cmd == NULL) {
+		IWM_ERR(iwm, "Couldn't find expected wifi command\n");
+		return -EINVAL;
+	}
+
+	hdr = (struct iwm_umac_wifi_if *)cmd->buf.payload;
 
 	IWM_DBG_NTF(iwm, DBG, "WIFI_IF_WRAPPER cmd is delivered to UMAC: "
 		    "oid is 0x%x\n", hdr->oid);
@@ -1079,6 +1185,7 @@ static int iwm_ntf_wifi_if_wrapper(struct iwm_priv *iwm, u8 *buf,
 	return 0;
 }
 
+#define CT_KILL_DELAY (30 * HZ)
 static int iwm_ntf_card_state(struct iwm_priv *iwm, u8 *buf,
 			      unsigned long buf_size, struct iwm_wifi_cmd *cmd)
 {
@@ -1091,7 +1198,20 @@ static int iwm_ntf_card_state(struct iwm_priv *iwm, u8 *buf,
 		 flags & IWM_CARD_STATE_HW_DISABLED ? "ON" : "OFF",
 		 flags & IWM_CARD_STATE_CTKILL_DISABLED ? "ON" : "OFF");
 
-	wiphy_rfkill_set_hw_state(wiphy, flags & IWM_CARD_STATE_HW_DISABLED);
+	if (flags & IWM_CARD_STATE_CTKILL_DISABLED) {
+		/*
+		 * We got a CTKILL event: We bring the interface down in
+		 * oder to cool the device down, and try to bring it up
+		 * 30 seconds later. If it's still too hot, we'll go through
+		 * this code path again.
+		 */
+		cancel_delayed_work_sync(&iwm->ct_kill_delay);
+		schedule_delayed_work(&iwm->ct_kill_delay, CT_KILL_DELAY);
+	}
+
+	wiphy_rfkill_set_hw_state(wiphy, flags &
+				  (IWM_CARD_STATE_HW_DISABLED |
+				   IWM_CARD_STATE_CTKILL_DISABLED));
 
 	return 0;
 }
@@ -1282,6 +1402,14 @@ int iwm_rx_handle(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size)
 
 	switch (le32_to_cpu(hdr->cmd)) {
 	case UMAC_REBOOT_BARKER:
+		if (test_bit(IWM_STATUS_READY, &iwm->status)) {
+			IWM_ERR(iwm, "Unexpected BARKER\n");
+
+			schedule_work(&iwm->reset_worker);
+
+			return 0;
+		}
+
 		return iwm_notif_send(iwm, NULL, IWM_BARKER_REBOOT_NOTIFICATION,
 				      IWM_SRC_UDMA, buf, buf_size);
 	case UMAC_ACK_BARKER:
@@ -1308,6 +1436,7 @@ static const iwm_handler iwm_umac_handlers[] =
 	[UMAC_NOTIFY_OPCODE_STATS]		= iwm_ntf_statistics,
 	[UMAC_CMD_OPCODE_EEPROM_PROXY]		= iwm_ntf_eeprom_proxy,
 	[UMAC_CMD_OPCODE_GET_CHAN_INFO_LIST]	= iwm_ntf_channel_info_list,
+	[UMAC_CMD_OPCODE_STOP_RESUME_STA_TX]	= iwm_ntf_stop_resume_tx,
 	[REPLY_RX_MPDU_CMD]			= iwm_ntf_rx_packet,
 	[UMAC_CMD_OPCODE_WIFI_IF_WRAPPER]	= iwm_ntf_wifi_if_wrapper,
 };
@@ -1444,7 +1573,8 @@ static void iwm_rx_process_packet(struct iwm_priv *iwm,
 		}
 		break;
 	case IWM_RX_TICKET_DROP:
-		IWM_DBG_RX(iwm, DBG, "DROP packet\n");
+		IWM_DBG_RX(iwm, DBG, "DROP packet: 0x%x\n",
+			   le16_to_cpu(ticket_node->ticket->flags));
 		kfree_skb(packet->skb);
 		break;
 	default:
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c
index 8b1de84003ca..a7ec7eac9137 100644
--- a/drivers/net/wireless/iwmc3200wifi/sdio.c
+++ b/drivers/net/wireless/iwmc3200wifi/sdio.c
@@ -224,8 +224,6 @@ static int if_sdio_disable(struct iwm_priv *iwm)
 	struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
 	int ret;
 
-	iwm_reset(iwm);
-
 	sdio_claim_host(hw->func);
 	sdio_writeb(hw->func, 0, IWM_SDIO_INTR_ENABLE_ADDR, &ret);
 	if (ret < 0)
@@ -237,6 +235,8 @@ static int if_sdio_disable(struct iwm_priv *iwm)
 
 	iwm_sdio_rx_free(hw);
 
+	iwm_reset(iwm);
+
 	IWM_DBG_SDIO(iwm, INFO, "IWM SDIO disable\n");
 
 	return 0;
@@ -399,6 +399,9 @@ static struct iwm_if_ops if_sdio_ops = {
 	.calib_lmac_name = "iwmc3200wifi-calib-sdio.bin",
 	.lmac_name = "iwmc3200wifi-lmac-sdio.bin",
 };
+MODULE_FIRMWARE("iwmc3200wifi-umac-sdio.bin");
+MODULE_FIRMWARE("iwmc3200wifi-calib-sdio.bin");
+MODULE_FIRMWARE("iwmc3200wifi-lmac-sdio.bin");
 
 static int iwm_sdio_probe(struct sdio_func *func,
 			  const struct sdio_device_id *id)
@@ -493,8 +496,10 @@ static void iwm_sdio_remove(struct sdio_func *func)
 }
 
 static const struct sdio_device_id iwm_sdio_ids[] = {
-	{ SDIO_DEVICE(SDIO_VENDOR_ID_INTEL,
-		      SDIO_DEVICE_ID_INTEL_IWMC3200WIFI) },
+	/* Global/AGN SKU */
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1403) },
+	/* BGN SKU */
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1408) },
 	{ /* end: all zeroes */	},
 };
 MODULE_DEVICE_TABLE(sdio, iwm_sdio_ids);
diff --git a/drivers/net/wireless/iwmc3200wifi/tx.c b/drivers/net/wireless/iwmc3200wifi/tx.c
index e3b4f7902daf..55905f02309c 100644
--- a/drivers/net/wireless/iwmc3200wifi/tx.c
+++ b/drivers/net/wireless/iwmc3200wifi/tx.c
@@ -329,7 +329,7 @@ static int iwm_tx_build_packet(struct iwm_priv *iwm, struct sk_buff *skb,
 
 	memcpy(buf + sizeof(*hdr), skb->data, skb->len);
 
-	return 0;
+	return umac_cmd.seq_num;
 }
 
 static int iwm_tx_send_concat_packets(struct iwm_priv *iwm,
@@ -354,16 +354,15 @@ static int iwm_tx_send_concat_packets(struct iwm_priv *iwm,
 	return ret;
 }
 
-#define CONFIG_IWM_TX_CONCATENATED 1
-
 void iwm_tx_worker(struct work_struct *work)
 {
 	struct iwm_priv *iwm;
 	struct iwm_tx_info *tx_info = NULL;
 	struct sk_buff *skb;
-	int cmdlen, ret;
 	struct iwm_tx_queue *txq;
-	int pool_id;
+	struct iwm_sta_info *sta_info;
+	struct iwm_tid_info *tid_info;
+	int cmdlen, ret, pool_id;
 
 	txq = container_of(work, struct iwm_tx_queue, worker);
 	iwm = container_of(txq, struct iwm_priv, txq[txq->id]);
@@ -373,19 +372,46 @@ void iwm_tx_worker(struct work_struct *work)
 	while (!test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
 	       !skb_queue_empty(&txq->queue)) {
 
+		spin_lock_bh(&txq->lock);
 		skb = skb_dequeue(&txq->queue);
+		spin_unlock_bh(&txq->lock);
+
 		tx_info = skb_to_tx_info(skb);
+		sta_info = &iwm->sta_table[tx_info->sta];
+		if (!sta_info->valid) {
+			IWM_ERR(iwm, "Trying to send a frame to unknown STA\n");
+			kfree_skb(skb);
+			continue;
+		}
+
+		tid_info = &sta_info->tid_info[tx_info->tid];
+
+		mutex_lock(&tid_info->mutex);
+
+		/*
+		 * If the RAxTID is stopped, we queue the skb to the stopped
+		 * queue.
+		 * Whenever we'll get a UMAC notification to resume the tx flow
+		 * for this RAxTID, we'll merge back the stopped queue into the
+		 * regular queue. See iwm_ntf_stop_resume_tx() from rx.c.
+		 */
+		if (tid_info->stopped) {
+			IWM_DBG_TX(iwm, DBG, "%dx%d stopped\n",
+				   tx_info->sta, tx_info->tid);
+			spin_lock_bh(&txq->lock);
+			skb_queue_tail(&txq->stopped_queue, skb);
+			spin_unlock_bh(&txq->lock);
+
+			mutex_unlock(&tid_info->mutex);
+			continue;
+		}
+
 		cmdlen = IWM_UDMA_HDR_LEN + skb->len;
 
 		IWM_DBG_TX(iwm, DBG, "Tx frame on queue %d: skb: 0x%p, sta: "
 			   "%d, color: %d\n", txq->id, skb, tx_info->sta,
 			   tx_info->color);
 
-#if !CONFIG_IWM_TX_CONCATENATED
-		/* temporarily keep this to comparing the performance */
-		ret = iwm_send_packet(iwm, skb, pool_id);
-#else
-
 		if (txq->concat_count + cmdlen > IWM_HAL_CONCATENATE_BUF_SIZE)
 			iwm_tx_send_concat_packets(iwm, txq);
 
@@ -393,14 +419,21 @@ void iwm_tx_worker(struct work_struct *work)
 		if (ret) {
 			IWM_DBG_TX(iwm, DBG, "not enough tx_credit for queue "
 				   "%d, Tx worker stopped\n", txq->id);
+			spin_lock_bh(&txq->lock);
 			skb_queue_head(&txq->queue, skb);
+			spin_unlock_bh(&txq->lock);
+
+			mutex_unlock(&tid_info->mutex);
 			break;
 		}
 
 		txq->concat_ptr = txq->concat_buf + txq->concat_count;
-		iwm_tx_build_packet(iwm, skb, pool_id, txq->concat_ptr);
+		tid_info->last_seq_num =
+			iwm_tx_build_packet(iwm, skb, pool_id, txq->concat_ptr);
 		txq->concat_count += ALIGN(cmdlen, 16);
-#endif
+
+		mutex_unlock(&tid_info->mutex);
+
 		kfree_skb(skb);
 	}
 
@@ -419,14 +452,14 @@ int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 	struct iwm_priv *iwm = ndev_to_iwm(netdev);
 	struct net_device *ndev = iwm_to_ndev(iwm);
 	struct wireless_dev *wdev = iwm_to_wdev(iwm);
-	u8 *dst_addr;
 	struct iwm_tx_info *tx_info;
 	struct iwm_tx_queue *txq;
 	struct iwm_sta_info *sta_info;
-	u8 sta_id;
+	u8 *dst_addr, sta_id;
 	u16 queue;
 	int ret;
 
+
 	if (!test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) {
 		IWM_DBG_TX(iwm, DBG, "LINK: stop netif_all_queues: "
 			   "not associated\n");
@@ -440,7 +473,8 @@ int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 	txq = &iwm->txq[queue];
 
 	/* No free space for Tx, tx_worker is too slow */
-	if (skb_queue_len(&txq->queue) > IWM_TX_LIST_SIZE) {
+	if ((skb_queue_len(&txq->queue) > IWM_TX_LIST_SIZE) ||
+	    (skb_queue_len(&txq->stopped_queue) > IWM_TX_LIST_SIZE)) {
 		IWM_DBG_TX(iwm, DBG, "LINK: stop netif_subqueue[%d]\n", queue);
 		netif_stop_subqueue(netdev, queue);
 		return NETDEV_TX_BUSY;
@@ -477,7 +511,9 @@ int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 	else
 		tx_info->tid = IWM_UMAC_MGMT_TID;
 
+	spin_lock_bh(&iwm->txq[queue].lock);
 	skb_queue_tail(&iwm->txq[queue].queue, skb);
+	spin_unlock_bh(&iwm->txq[queue].lock);
 
 	queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker);
 
diff --git a/drivers/net/wireless/iwmc3200wifi/umac.h b/drivers/net/wireless/iwmc3200wifi/umac.h
index c5a14ae3160a..7f54a145ca65 100644
--- a/drivers/net/wireless/iwmc3200wifi/umac.h
+++ b/drivers/net/wireless/iwmc3200wifi/umac.h
@@ -83,6 +83,20 @@ struct iwm_udma_out_wifi_hdr {
 	((UMAC_HDI_ACT_TBL_IDX_RA_UMAC << UMAC_HDI_ACT_TBL_IDX_RA_POS) |\
 	(UMAC_HDI_ACT_TBL_IDX_TID_LMAC << UMAC_HDI_ACT_TBL_IDX_TID_POS))
 
+/* STA ID and color */
+#define STA_ID_SEED                        (0x0f)
+#define STA_ID_POS                         (0)
+#define STA_ID_MSK                         (STA_ID_SEED << STA_ID_POS)
+
+#define STA_COLOR_SEED                     (0x7)
+#define STA_COLOR_POS                      (4)
+#define STA_COLOR_MSK                      (STA_COLOR_SEED << STA_COLOR_POS)
+
+#define STA_ID_N_COLOR_COLOR(id_n_color) \
+	(((id_n_color) & STA_COLOR_MSK) >> STA_COLOR_POS)
+#define STA_ID_N_COLOR_ID(id_n_color) \
+	(((id_n_color) & STA_ID_MSK) >> STA_ID_POS)
+
 /* iwm_umac_notif_alive.page_grp_state Group number -- bits [3:0] */
 #define UMAC_ALIVE_PAGE_STS_GRP_NUM_POS		0
 #define UMAC_ALIVE_PAGE_STS_GRP_NUM_SEED	0xF
@@ -260,6 +274,9 @@ struct iwm_udma_out_wifi_hdr {
 #define UMAC_CMD_OPCODE_GET_CHAN_INFO_LIST	0x16
 #define UMAC_CMD_OPCODE_SET_PARAM_LIST		0x17
 #define UMAC_CMD_OPCODE_GET_PARAM_LIST		0x18
+#define UMAC_CMD_OPCODE_STOP_RESUME_STA_TX      0x19
+#define UMAC_CMD_OPCODE_TEST_BLOCK_ACK          0x1A
+
 #define UMAC_CMD_OPCODE_BASE_WRAPPER            0xFA
 #define UMAC_CMD_OPCODE_LMAC_WRAPPER            0xFB
 #define UMAC_CMD_OPCODE_HW_TEST_WRAPPER         0xFC
@@ -281,6 +298,7 @@ struct iwm_udma_out_wifi_hdr {
 #define UMAC_WIFI_IF_CMD_GLOBAL_TX_KEY_ID                0x1B
 #define UMAC_WIFI_IF_CMD_SET_HOST_EXTENDED_IE            0x1C
 #define UMAC_WIFI_IF_CMD_GET_SUPPORTED_CHANNELS          0x1E
+#define UMAC_WIFI_IF_CMD_PMKID_UPDATE                    0x1F
 #define UMAC_WIFI_IF_CMD_TX_PWR_TRIGGER                  0x20
 
 /* UMAC WiFi interface ports */
@@ -687,19 +705,24 @@ struct iwm_umac_notif_rx_ticket {
 /* Tx/Rx rates window (number of max of last update window per second) */
 #define UMAC_NTF_RATE_SAMPLE_NR	4
 
+/* Max numbers of bits required to go through all antennae in bitmasks */
+#define UMAC_PHY_NUM_CHAINS     3
+
 #define IWM_UMAC_MGMT_TID	8
-#define IWM_UMAC_TID_NR		8
+#define IWM_UMAC_TID_NR		9 /* 8 TIDs + MGMT */
 
 struct iwm_umac_notif_stats {
 	struct iwm_umac_wifi_in_hdr hdr;
 	__le32 flags;
 	__le32 timestamp;
-	__le16 tid_load[IWM_UMAC_TID_NR + 2]; /* 1 non-QoS + 1 dword align */
+	__le16 tid_load[IWM_UMAC_TID_NR + 1]; /* 1 non-QoS + 1 dword align */
 	__le16 tx_rate[UMAC_NTF_RATE_SAMPLE_NR];
 	__le16 rx_rate[UMAC_NTF_RATE_SAMPLE_NR];
+	__le32 chain_energy[UMAC_PHY_NUM_CHAINS];
 	s32 rssi_dbm;
 	s32 noise_dbm;
 	__le32 supp_rates;
+	__le32 supp_ht_rates;
 	__le32 missed_beacons;
 	__le32 rx_beacons;
 	__le32 rx_dir_pkts;
@@ -737,6 +760,20 @@ struct iwm_umac_notif_stats {
 	__le32 roam_ap_loadblance;
 } __attribute__ ((packed));
 
+#define UMAC_STOP_TX_FLAG    0x1
+#define UMAC_RESUME_TX_FLAG  0x2
+
+#define LAST_SEQ_NUM_INVALID     0xFFFF
+
+struct iwm_umac_notif_stop_resume_tx {
+	struct iwm_umac_wifi_in_hdr hdr;
+	u8 flags; /* UMAC_*_TX_FLAG_* */
+	u8 sta_id;
+	__le16 stop_resume_tid_msk; /* tid bitmask */
+} __attribute__ ((packed));
+
+#define UMAC_MAX_NUM_PMKIDS 4
+
 /* WiFi interface wrapper header */
 struct iwm_umac_wifi_if {
 	u8 oid;
diff --git a/drivers/net/wireless/libertas/11d.c b/drivers/net/wireless/libertas/11d.c
deleted file mode 100644
index 5c6968101f0d..000000000000
--- a/drivers/net/wireless/libertas/11d.c
+++ /dev/null
@@ -1,696 +0,0 @@
-/**
-  * This file contains functions for 802.11D.
-  */
-#include <linux/ctype.h>
-#include <linux/kernel.h>
-#include <linux/wireless.h>
-
-#include "host.h"
-#include "decl.h"
-#include "11d.h"
-#include "dev.h"
-#include "wext.h"
-
-#define TX_PWR_DEFAULT	10
-
-static struct region_code_mapping region_code_mapping[] = {
-	{"US ", 0x10},		/* US FCC      */
-	{"CA ", 0x10},		/* IC Canada   */
-	{"SG ", 0x10},		/* Singapore   */
-	{"EU ", 0x30},		/* ETSI        */
-	{"AU ", 0x30},		/* Australia   */
-	{"KR ", 0x30},		/* Republic Of Korea */
-	{"ES ", 0x31},		/* Spain       */
-	{"FR ", 0x32},		/* France      */
-	{"JP ", 0x40},		/* Japan       */
-};
-
-/* Following 2 structure defines the supported channels */
-static struct chan_freq_power channel_freq_power_UN_BG[] = {
-	{1, 2412, TX_PWR_DEFAULT},
-	{2, 2417, TX_PWR_DEFAULT},
-	{3, 2422, TX_PWR_DEFAULT},
-	{4, 2427, TX_PWR_DEFAULT},
-	{5, 2432, TX_PWR_DEFAULT},
-	{6, 2437, TX_PWR_DEFAULT},
-	{7, 2442, TX_PWR_DEFAULT},
-	{8, 2447, TX_PWR_DEFAULT},
-	{9, 2452, TX_PWR_DEFAULT},
-	{10, 2457, TX_PWR_DEFAULT},
-	{11, 2462, TX_PWR_DEFAULT},
-	{12, 2467, TX_PWR_DEFAULT},
-	{13, 2472, TX_PWR_DEFAULT},
-	{14, 2484, TX_PWR_DEFAULT}
-};
-
-static u8 lbs_region_2_code(u8 *region)
-{
-	u8 i;
-
-	for (i = 0; i < COUNTRY_CODE_LEN && region[i]; i++)
-		region[i] = toupper(region[i]);
-
-	for (i = 0; i < ARRAY_SIZE(region_code_mapping); i++) {
-		if (!memcmp(region, region_code_mapping[i].region,
-			    COUNTRY_CODE_LEN))
-			return (region_code_mapping[i].code);
-	}
-
-	/* default is US */
-	return (region_code_mapping[0].code);
-}
-
-static u8 *lbs_code_2_region(u8 code)
-{
-	u8 i;
-
-	for (i = 0; i < ARRAY_SIZE(region_code_mapping); i++) {
-		if (region_code_mapping[i].code == code)
-			return (region_code_mapping[i].region);
-	}
-	/* default is US */
-	return (region_code_mapping[0].region);
-}
-
-/**
- *  @brief This function finds the nrchan-th chan after the firstchan
- *  @param band       band
- *  @param firstchan  first channel number
- *  @param nrchan   number of channels
- *  @return 	      the nrchan-th chan number
-*/
-static u8 lbs_get_chan_11d(u8 firstchan, u8 nrchan, u8 *chan)
-/*find the nrchan-th chan after the firstchan*/
-{
-	u8 i;
-	struct chan_freq_power *cfp;
-	u8 cfp_no;
-
-	cfp = channel_freq_power_UN_BG;
-	cfp_no = ARRAY_SIZE(channel_freq_power_UN_BG);
-
-	for (i = 0; i < cfp_no; i++) {
-		if ((cfp + i)->channel == firstchan) {
-			lbs_deb_11d("firstchan found\n");
-			break;
-		}
-	}
-
-	if (i < cfp_no) {
-		/*if beyond the boundary */
-		if (i + nrchan < cfp_no) {
-			*chan = (cfp + i + nrchan)->channel;
-			return 1;
-		}
-	}
-
-	return 0;
-}
-
-/**
- *  @brief This function Checks if chan txpwr is learned from AP/IBSS
- *  @param chan                 chan number
- *  @param parsed_region_chan   pointer to parsed_region_chan_11d
- *  @return 	                TRUE; FALSE
-*/
-static u8 lbs_channel_known_11d(u8 chan,
-			  struct parsed_region_chan_11d * parsed_region_chan)
-{
-	struct chan_power_11d *chanpwr = parsed_region_chan->chanpwr;
-	u8 nr_chan = parsed_region_chan->nr_chan;
-	u8 i = 0;
-
-	lbs_deb_hex(LBS_DEB_11D, "parsed_region_chan", (char *)chanpwr,
-		sizeof(struct chan_power_11d) * nr_chan);
-
-	for (i = 0; i < nr_chan; i++) {
-		if (chan == chanpwr[i].chan) {
-			lbs_deb_11d("found chan %d\n", chan);
-			return 1;
-		}
-	}
-
-	lbs_deb_11d("chan %d not found\n", chan);
-	return 0;
-}
-
-u32 lbs_chan_2_freq(u8 chan)
-{
-	struct chan_freq_power *cf;
-	u16 i;
-	u32 freq = 0;
-
-	cf = channel_freq_power_UN_BG;
-
-	for (i = 0; i < ARRAY_SIZE(channel_freq_power_UN_BG); i++) {
-		if (chan == cf[i].channel)
-			freq = cf[i].freq;
-	}
-
-	return freq;
-}
-
-static int generate_domain_info_11d(struct parsed_region_chan_11d
-				  *parsed_region_chan,
-				  struct lbs_802_11d_domain_reg *domaininfo)
-{
-	u8 nr_subband = 0;
-
-	u8 nr_chan = parsed_region_chan->nr_chan;
-	u8 nr_parsedchan = 0;
-
-	u8 firstchan = 0, nextchan = 0, maxpwr = 0;
-
-	u8 i, flag = 0;
-
-	memcpy(domaininfo->countrycode, parsed_region_chan->countrycode,
-	       COUNTRY_CODE_LEN);
-
-	lbs_deb_11d("nrchan %d\n", nr_chan);
-	lbs_deb_hex(LBS_DEB_11D, "parsed_region_chan", (char *)parsed_region_chan,
-		sizeof(struct parsed_region_chan_11d));
-
-	for (i = 0; i < nr_chan; i++) {
-		if (!flag) {
-			flag = 1;
-			nextchan = firstchan =
-			    parsed_region_chan->chanpwr[i].chan;
-			maxpwr = parsed_region_chan->chanpwr[i].pwr;
-			nr_parsedchan = 1;
-			continue;
-		}
-
-		if (parsed_region_chan->chanpwr[i].chan == nextchan + 1 &&
-		    parsed_region_chan->chanpwr[i].pwr == maxpwr) {
-			nextchan++;
-			nr_parsedchan++;
-		} else {
-			domaininfo->subband[nr_subband].firstchan = firstchan;
-			domaininfo->subband[nr_subband].nrchan =
-			    nr_parsedchan;
-			domaininfo->subband[nr_subband].maxtxpwr = maxpwr;
-			nr_subband++;
-			nextchan = firstchan =
-			    parsed_region_chan->chanpwr[i].chan;
-			maxpwr = parsed_region_chan->chanpwr[i].pwr;
-		}
-	}
-
-	if (flag) {
-		domaininfo->subband[nr_subband].firstchan = firstchan;
-		domaininfo->subband[nr_subband].nrchan = nr_parsedchan;
-		domaininfo->subband[nr_subband].maxtxpwr = maxpwr;
-		nr_subband++;
-	}
-	domaininfo->nr_subband = nr_subband;
-
-	lbs_deb_11d("nr_subband=%x\n", domaininfo->nr_subband);
-	lbs_deb_hex(LBS_DEB_11D, "domaininfo", (char *)domaininfo,
-		COUNTRY_CODE_LEN + 1 +
-		sizeof(struct ieee_subbandset) * nr_subband);
-	return 0;
-}
-
-/**
- *  @brief This function generates parsed_region_chan from Domain Info learned from AP/IBSS
- *  @param region_chan          pointer to struct region_channel
- *  @param *parsed_region_chan  pointer to parsed_region_chan_11d
- *  @return 	                N/A
-*/
-static void lbs_generate_parsed_region_chan_11d(struct region_channel *region_chan,
-					  struct parsed_region_chan_11d *
-					  parsed_region_chan)
-{
-	u8 i;
-	struct chan_freq_power *cfp;
-
-	if (region_chan == NULL) {
-		lbs_deb_11d("region_chan is NULL\n");
-		return;
-	}
-
-	cfp = region_chan->CFP;
-	if (cfp == NULL) {
-		lbs_deb_11d("cfp is NULL \n");
-		return;
-	}
-
-	parsed_region_chan->band = region_chan->band;
-	parsed_region_chan->region = region_chan->region;
-	memcpy(parsed_region_chan->countrycode,
-	       lbs_code_2_region(region_chan->region), COUNTRY_CODE_LEN);
-
-	lbs_deb_11d("region 0x%x, band %d\n", parsed_region_chan->region,
-	       parsed_region_chan->band);
-
-	for (i = 0; i < region_chan->nrcfp; i++, cfp++) {
-		parsed_region_chan->chanpwr[i].chan = cfp->channel;
-		parsed_region_chan->chanpwr[i].pwr = cfp->maxtxpower;
-		lbs_deb_11d("chan %d, pwr %d\n",
-		       parsed_region_chan->chanpwr[i].chan,
-		       parsed_region_chan->chanpwr[i].pwr);
-	}
-	parsed_region_chan->nr_chan = region_chan->nrcfp;
-
-	lbs_deb_11d("nrchan %d\n", parsed_region_chan->nr_chan);
-
-	return;
-}
-
-/**
- *  @brief generate parsed_region_chan from Domain Info learned from AP/IBSS
- *  @param region               region ID
- *  @param band                 band
- *  @param chan                 chan
- *  @return 	                TRUE;FALSE
-*/
-static u8 lbs_region_chan_supported_11d(u8 region, u8 chan)
-{
-	struct chan_freq_power *cfp;
-	int cfp_no;
-	u8 idx;
-	int ret = 0;
-
-	lbs_deb_enter(LBS_DEB_11D);
-
-	cfp = lbs_get_region_cfp_table(region, &cfp_no);
-	if (cfp == NULL)
-		return 0;
-
-	for (idx = 0; idx < cfp_no; idx++) {
-		if (chan == (cfp + idx)->channel) {
-			/* If Mrvl Chip Supported? */
-			if ((cfp + idx)->unsupported) {
-				ret = 0;
-			} else {
-				ret = 1;
-			}
-			goto done;
-		}
-	}
-
-	/*chan is not in the region table */
-
-done:
-	lbs_deb_leave_args(LBS_DEB_11D, "ret %d", ret);
-	return ret;
-}
-
-/**
- *  @brief This function checks if chan txpwr is learned from AP/IBSS
- *  @param chan                 chan number
- *  @param parsed_region_chan   pointer to parsed_region_chan_11d
- *  @return 	                0
-*/
-static int parse_domain_info_11d(struct ieee_ie_country_info_full_set *countryinfo,
-				 u8 band,
-				 struct parsed_region_chan_11d *parsed_region_chan)
-{
-	u8 nr_subband, nrchan;
-	u8 lastchan, firstchan;
-	u8 region;
-	u8 curchan = 0;
-
-	u8 idx = 0;		/*chan index in parsed_region_chan */
-
-	u8 j, i;
-
-	lbs_deb_enter(LBS_DEB_11D);
-
-	/*validation Rules:
-	   1. valid region Code
-	   2. First Chan increment
-	   3. channel range no overlap
-	   4. channel is valid?
-	   5. channel is supported by region?
-	   6. Others
-	 */
-
-	lbs_deb_hex(LBS_DEB_11D, "countryinfo", (u8 *) countryinfo, 30);
-
-	if ((*(countryinfo->countrycode)) == 0
-	    || (countryinfo->header.len <= COUNTRY_CODE_LEN)) {
-		/* No region Info or Wrong region info: treat as No 11D info */
-		goto done;
-	}
-
-	/*Step1: check region_code */
-	parsed_region_chan->region = region =
-	    lbs_region_2_code(countryinfo->countrycode);
-
-	lbs_deb_11d("regioncode=%x\n", (u8) parsed_region_chan->region);
-	lbs_deb_hex(LBS_DEB_11D, "countrycode", (char *)countryinfo->countrycode,
-		COUNTRY_CODE_LEN);
-
-	parsed_region_chan->band = band;
-
-	memcpy(parsed_region_chan->countrycode, countryinfo->countrycode,
-	       COUNTRY_CODE_LEN);
-
-	nr_subband = (countryinfo->header.len - COUNTRY_CODE_LEN) /
-	    sizeof(struct ieee_subbandset);
-
-	for (j = 0, lastchan = 0; j < nr_subband; j++) {
-
-		if (countryinfo->subband[j].firstchan <= lastchan) {
-			/*Step2&3. Check First Chan Num increment and no overlap */
-			lbs_deb_11d("chan %d>%d, overlap\n",
-			       countryinfo->subband[j].firstchan, lastchan);
-			continue;
-		}
-
-		firstchan = countryinfo->subband[j].firstchan;
-		nrchan = countryinfo->subband[j].nrchan;
-
-		for (i = 0; idx < MAX_NO_OF_CHAN && i < nrchan; i++) {
-			/*step4: channel is supported? */
-
-			if (!lbs_get_chan_11d(firstchan, i, &curchan)) {
-				/* Chan is not found in UN table */
-				lbs_deb_11d("chan is not supported: %d \n", i);
-				break;
-			}
-
-			lastchan = curchan;
-
-			if (lbs_region_chan_supported_11d(region, curchan)) {
-				/*step5: Check if curchan is supported by mrvl in region */
-				parsed_region_chan->chanpwr[idx].chan = curchan;
-				parsed_region_chan->chanpwr[idx].pwr =
-				    countryinfo->subband[j].maxtxpwr;
-				idx++;
-			} else {
-				/*not supported and ignore the chan */
-				lbs_deb_11d(
-				       "i %d, chan %d unsupported in region %x, band %d\n",
-				       i, curchan, region, band);
-			}
-		}
-
-		/*Step6: Add other checking if any */
-
-	}
-
-	parsed_region_chan->nr_chan = idx;
-
-	lbs_deb_11d("nrchan=%x\n", parsed_region_chan->nr_chan);
-	lbs_deb_hex(LBS_DEB_11D, "parsed_region_chan", (u8 *) parsed_region_chan,
-		2 + COUNTRY_CODE_LEN + sizeof(struct parsed_region_chan_11d) * idx);
-
-done:
-	lbs_deb_enter(LBS_DEB_11D);
-	return 0;
-}
-
-/**
- *  @brief This function calculates the scan type for channels
- *  @param chan                 chan number
- *  @param parsed_region_chan   pointer to parsed_region_chan_11d
- *  @return 	                PASSIVE if chan is unknown; ACTIVE if chan is known
-*/
-u8 lbs_get_scan_type_11d(u8 chan,
-			  struct parsed_region_chan_11d * parsed_region_chan)
-{
-	u8 scan_type = CMD_SCAN_TYPE_PASSIVE;
-
-	lbs_deb_enter(LBS_DEB_11D);
-
-	if (lbs_channel_known_11d(chan, parsed_region_chan)) {
-		lbs_deb_11d("found, do active scan\n");
-		scan_type = CMD_SCAN_TYPE_ACTIVE;
-	} else {
-		lbs_deb_11d("not found, do passive scan\n");
-	}
-
-	lbs_deb_leave_args(LBS_DEB_11D, "ret scan_type %d", scan_type);
-	return scan_type;
-
-}
-
-void lbs_init_11d(struct lbs_private *priv)
-{
-	priv->enable11d = 0;
-	memset(&(priv->parsed_region_chan), 0,
-	       sizeof(struct parsed_region_chan_11d));
-	return;
-}
-
-/**
- *  @brief This function sets DOMAIN INFO to FW
- *  @param priv       pointer to struct lbs_private
- *  @return 	      0; -1
-*/
-static int set_domain_info_11d(struct lbs_private *priv)
-{
-	int ret;
-
-	if (!priv->enable11d) {
-		lbs_deb_11d("dnld domain Info with 11d disabled\n");
-		return 0;
-	}
-
-	ret = lbs_prepare_and_send_command(priv, CMD_802_11D_DOMAIN_INFO,
-				    CMD_ACT_SET,
-				    CMD_OPTION_WAITFORRSP, 0, NULL);
-	if (ret)
-		lbs_deb_11d("fail to dnld domain info\n");
-
-	return ret;
-}
-
-/**
- *  @brief This function setups scan channels
- *  @param priv       pointer to struct lbs_private
- *  @param band       band
- *  @return 	      0
-*/
-int lbs_set_universaltable(struct lbs_private *priv, u8 band)
-{
-	u16 size = sizeof(struct chan_freq_power);
-	u16 i = 0;
-
-	memset(priv->universal_channel, 0,
-	       sizeof(priv->universal_channel));
-
-	priv->universal_channel[i].nrcfp =
-	    sizeof(channel_freq_power_UN_BG) / size;
-	lbs_deb_11d("BG-band nrcfp %d\n",
-	       priv->universal_channel[i].nrcfp);
-
-	priv->universal_channel[i].CFP = channel_freq_power_UN_BG;
-	priv->universal_channel[i].valid = 1;
-	priv->universal_channel[i].region = UNIVERSAL_REGION_CODE;
-	priv->universal_channel[i].band = band;
-	i++;
-
-	return 0;
-}
-
-/**
- *  @brief This function implements command CMD_802_11D_DOMAIN_INFO
- *  @param priv       pointer to struct lbs_private
- *  @param cmd        pointer to cmd buffer
- *  @param cmdno      cmd ID
- *  @param cmdOption  cmd action
- *  @return 	      0
-*/
-int lbs_cmd_802_11d_domain_info(struct lbs_private *priv,
-				 struct cmd_ds_command *cmd, u16 cmdno,
-				 u16 cmdoption)
-{
-	struct cmd_ds_802_11d_domain_info *pdomaininfo =
-	    &cmd->params.domaininfo;
-	struct mrvl_ie_domain_param_set *domain = &pdomaininfo->domain;
-	u8 nr_subband = priv->domainreg.nr_subband;
-
-	lbs_deb_enter(LBS_DEB_11D);
-
-	lbs_deb_11d("nr_subband=%x\n", nr_subband);
-
-	cmd->command = cpu_to_le16(cmdno);
-	pdomaininfo->action = cpu_to_le16(cmdoption);
-	if (cmdoption == CMD_ACT_GET) {
-		cmd->size =
-		    cpu_to_le16(sizeof(pdomaininfo->action) + S_DS_GEN);
-		lbs_deb_hex(LBS_DEB_11D, "802_11D_DOMAIN_INFO", (u8 *) cmd,
-			le16_to_cpu(cmd->size));
-		goto done;
-	}
-
-	domain->header.type = cpu_to_le16(TLV_TYPE_DOMAIN);
-	memcpy(domain->countrycode, priv->domainreg.countrycode,
-	       sizeof(domain->countrycode));
-
-	domain->header.len =
-	    cpu_to_le16(nr_subband * sizeof(struct ieee_subbandset) +
-			     sizeof(domain->countrycode));
-
-	if (nr_subband) {
-		memcpy(domain->subband, priv->domainreg.subband,
-		       nr_subband * sizeof(struct ieee_subbandset));
-
-		cmd->size = cpu_to_le16(sizeof(pdomaininfo->action) +
-					     le16_to_cpu(domain->header.len) +
-					     sizeof(struct mrvl_ie_header) +
-					     S_DS_GEN);
-	} else {
-		cmd->size =
-		    cpu_to_le16(sizeof(pdomaininfo->action) + S_DS_GEN);
-	}
-
-	lbs_deb_hex(LBS_DEB_11D, "802_11D_DOMAIN_INFO", (u8 *) cmd, le16_to_cpu(cmd->size));
-
-done:
-	lbs_deb_enter(LBS_DEB_11D);
-	return 0;
-}
-
-/**
- *  @brief This function parses countryinfo from AP and download country info to FW
- *  @param priv    pointer to struct lbs_private
- *  @param resp    pointer to command response buffer
- *  @return 	   0; -1
- */
-int lbs_ret_802_11d_domain_info(struct cmd_ds_command *resp)
-{
-	struct cmd_ds_802_11d_domain_info *domaininfo = &resp->params.domaininforesp;
-	struct mrvl_ie_domain_param_set *domain = &domaininfo->domain;
-	u16 action = le16_to_cpu(domaininfo->action);
-	s16 ret = 0;
-	u8 nr_subband = 0;
-
-	lbs_deb_enter(LBS_DEB_11D);
-
-	lbs_deb_hex(LBS_DEB_11D, "domain info resp", (u8 *) resp,
-		(int)le16_to_cpu(resp->size));
-
-	nr_subband = (le16_to_cpu(domain->header.len) - COUNTRY_CODE_LEN) /
-		      sizeof(struct ieee_subbandset);
-
-	lbs_deb_11d("domain info resp: nr_subband %d\n", nr_subband);
-
-	if (nr_subband > MRVDRV_MAX_SUBBAND_802_11D) {
-		lbs_deb_11d("Invalid Numrer of Subband returned!!\n");
-		return -1;
-	}
-
-	switch (action) {
-	case CMD_ACT_SET:	/*Proc Set action */
-		break;
-
-	case CMD_ACT_GET:
-		break;
-	default:
-		lbs_deb_11d("Invalid action:%d\n", domaininfo->action);
-		ret = -1;
-		break;
-	}
-
-	lbs_deb_leave_args(LBS_DEB_11D, "ret %d", ret);
-	return ret;
-}
-
-/**
- *  @brief This function parses countryinfo from AP and download country info to FW
- *  @param priv    pointer to struct lbs_private
- *  @return 	   0; -1
- */
-int lbs_parse_dnld_countryinfo_11d(struct lbs_private *priv,
-                                        struct bss_descriptor * bss)
-{
-	int ret;
-
-	lbs_deb_enter(LBS_DEB_11D);
-	if (priv->enable11d) {
-		memset(&priv->parsed_region_chan, 0,
-		       sizeof(struct parsed_region_chan_11d));
-		ret = parse_domain_info_11d(&bss->countryinfo, 0,
-					       &priv->parsed_region_chan);
-
-		if (ret == -1) {
-			lbs_deb_11d("error parsing domain_info from AP\n");
-			goto done;
-		}
-
-		memset(&priv->domainreg, 0,
-		       sizeof(struct lbs_802_11d_domain_reg));
-		generate_domain_info_11d(&priv->parsed_region_chan,
-				      &priv->domainreg);
-
-		ret = set_domain_info_11d(priv);
-
-		if (ret) {
-			lbs_deb_11d("error setting domain info\n");
-			goto done;
-		}
-	}
-	ret = 0;
-
-done:
-	lbs_deb_leave_args(LBS_DEB_11D, "ret %d", ret);
-	return ret;
-}
-
-/**
- *  @brief This function generates 11D info from user specified regioncode and download to FW
- *  @param priv    pointer to struct lbs_private
- *  @return 	   0; -1
- */
-int lbs_create_dnld_countryinfo_11d(struct lbs_private *priv)
-{
-	int ret;
-	struct region_channel *region_chan;
-	u8 j;
-
-	lbs_deb_enter(LBS_DEB_11D);
-	lbs_deb_11d("curbssparams.band %d\n", priv->curbssparams.band);
-
-	if (priv->enable11d) {
-		/* update parsed_region_chan_11; dnld domaininf to FW */
-
-		for (j = 0; j < ARRAY_SIZE(priv->region_channel); j++) {
-			region_chan = &priv->region_channel[j];
-
-			lbs_deb_11d("%d region_chan->band %d\n", j,
-			       region_chan->band);
-
-			if (!region_chan || !region_chan->valid
-			    || !region_chan->CFP)
-				continue;
-			if (region_chan->band != priv->curbssparams.band)
-				continue;
-			break;
-		}
-
-		if (j >= ARRAY_SIZE(priv->region_channel)) {
-			lbs_deb_11d("region_chan not found, band %d\n",
-			       priv->curbssparams.band);
-			ret = -1;
-			goto done;
-		}
-
-		memset(&priv->parsed_region_chan, 0,
-		       sizeof(struct parsed_region_chan_11d));
-		lbs_generate_parsed_region_chan_11d(region_chan,
-						     &priv->
-						     parsed_region_chan);
-
-		memset(&priv->domainreg, 0,
-		       sizeof(struct lbs_802_11d_domain_reg));
-		generate_domain_info_11d(&priv->parsed_region_chan,
-					 &priv->domainreg);
-
-		ret = set_domain_info_11d(priv);
-
-		if (ret) {
-			lbs_deb_11d("error setting domain info\n");
-			goto done;
-		}
-
-	}
-	ret = 0;
-
-done:
-	lbs_deb_leave_args(LBS_DEB_11D, "ret %d", ret);
-	return ret;
-}
diff --git a/drivers/net/wireless/libertas/11d.h b/drivers/net/wireless/libertas/11d.h
deleted file mode 100644
index fb75d3e321a0..000000000000
--- a/drivers/net/wireless/libertas/11d.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/**
-  * This header file contains data structures and
-  * function declarations of 802.11d
-  */
-#ifndef _LBS_11D_
-#define _LBS_11D_
-
-#include "types.h"
-#include "defs.h"
-
-#define UNIVERSAL_REGION_CODE			0xff
-
-/** (Beaconsize(256)-5(IEId,len,contrystr(3))/3(FirstChan,NoOfChan,MaxPwr)
- */
-#define MRVDRV_MAX_SUBBAND_802_11D		83
-
-#define COUNTRY_CODE_LEN			3
-#define MAX_NO_OF_CHAN 				40
-
-struct cmd_ds_command;
-
-/** Data structure for Country IE*/
-struct ieee_subbandset {
-	u8 firstchan;
-	u8 nrchan;
-	u8 maxtxpwr;
-} __attribute__ ((packed));
-
-struct ieee_ie_country_info_set {
-	struct ieee_ie_header header;
-
-	u8 countrycode[COUNTRY_CODE_LEN];
-	struct ieee_subbandset subband[1];
-};
-
-struct ieee_ie_country_info_full_set {
-	struct ieee_ie_header header;
-
-	u8 countrycode[COUNTRY_CODE_LEN];
-	struct ieee_subbandset subband[MRVDRV_MAX_SUBBAND_802_11D];
-} __attribute__ ((packed));
-
-struct mrvl_ie_domain_param_set {
-	struct mrvl_ie_header header;
-
-	u8 countrycode[COUNTRY_CODE_LEN];
-	struct ieee_subbandset subband[1];
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11d_domain_info {
-	__le16 action;
-	struct mrvl_ie_domain_param_set domain;
-} __attribute__ ((packed));
-
-/** domain regulatory information */
-struct lbs_802_11d_domain_reg {
-	/** country Code*/
-	u8 countrycode[COUNTRY_CODE_LEN];
-	/** No. of subband*/
-	u8 nr_subband;
-	struct ieee_subbandset subband[MRVDRV_MAX_SUBBAND_802_11D];
-};
-
-struct chan_power_11d {
-	u8 chan;
-	u8 pwr;
-} __attribute__ ((packed));
-
-struct parsed_region_chan_11d {
-	u8 band;
-	u8 region;
-	s8 countrycode[COUNTRY_CODE_LEN];
-	struct chan_power_11d chanpwr[MAX_NO_OF_CHAN];
-	u8 nr_chan;
-} __attribute__ ((packed));
-
-struct region_code_mapping {
-	u8 region[COUNTRY_CODE_LEN];
-	u8 code;
-};
-
-struct lbs_private;
-
-u8 lbs_get_scan_type_11d(u8 chan,
-			  struct parsed_region_chan_11d *parsed_region_chan);
-
-u32 lbs_chan_2_freq(u8 chan);
-
-void lbs_init_11d(struct lbs_private *priv);
-
-int lbs_set_universaltable(struct lbs_private *priv, u8 band);
-
-int lbs_cmd_802_11d_domain_info(struct lbs_private *priv,
-				 struct cmd_ds_command *cmd, u16 cmdno,
-				 u16 cmdOption);
-
-int lbs_ret_802_11d_domain_info(struct cmd_ds_command *resp);
-
-struct bss_descriptor;
-int lbs_parse_dnld_countryinfo_11d(struct lbs_private *priv,
-                                        struct bss_descriptor * bss);
-
-int lbs_create_dnld_countryinfo_11d(struct lbs_private *priv);
-
-#endif
diff --git a/drivers/net/wireless/libertas/Kconfig b/drivers/net/wireless/libertas/Kconfig
new file mode 100644
index 000000000000..30aa9d48d67e
--- /dev/null
+++ b/drivers/net/wireless/libertas/Kconfig
@@ -0,0 +1,39 @@
+config LIBERTAS
+	tristate "Marvell 8xxx Libertas WLAN driver support"
+	depends on CFG80211
+	select WIRELESS_EXT
+	select WEXT_SPY
+	select LIB80211
+	select FW_LOADER
+	---help---
+	  A library for Marvell Libertas 8xxx devices.
+
+config LIBERTAS_USB
+	tristate "Marvell Libertas 8388 USB 802.11b/g cards"
+	depends on LIBERTAS && USB
+	---help---
+	  A driver for Marvell Libertas 8388 USB devices.
+
+config LIBERTAS_CS
+	tristate "Marvell Libertas 8385 CompactFlash 802.11b/g cards"
+	depends on LIBERTAS && PCMCIA
+	---help---
+	  A driver for Marvell Libertas 8385 CompactFlash devices.
+
+config LIBERTAS_SDIO
+	tristate "Marvell Libertas 8385/8686/8688 SDIO 802.11b/g cards"
+	depends on LIBERTAS && MMC
+	---help---
+	  A driver for Marvell Libertas 8385/8686/8688 SDIO devices.
+
+config LIBERTAS_SPI
+	tristate "Marvell Libertas 8686 SPI 802.11b/g cards"
+	depends on LIBERTAS && SPI
+	---help---
+	  A driver for Marvell Libertas 8686 SPI devices.
+
+config LIBERTAS_DEBUG
+	bool "Enable full debugging output in the Libertas module."
+	depends on LIBERTAS
+	---help---
+	  Debugging support.
diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile
index 0b6918584503..b188cd97a053 100644
--- a/drivers/net/wireless/libertas/Makefile
+++ b/drivers/net/wireless/libertas/Makefile
@@ -1,5 +1,15 @@
-libertas-objs := main.o wext.o rx.o tx.o cmd.o cmdresp.o scan.o 11d.o	\
-		 debugfs.o persistcfg.o ethtool.o assoc.o
+libertas-y += assoc.o
+libertas-y += cfg.o
+libertas-y += cmd.o
+libertas-y += cmdresp.o
+libertas-y += debugfs.o
+libertas-y += ethtool.o
+libertas-y += main.o
+libertas-y += mesh.o
+libertas-y += rx.o
+libertas-y += scan.o
+libertas-y += tx.o
+libertas-y += wext.o
 
 usb8xxx-objs += if_usb.o
 libertas_cs-objs += if_cs.o
diff --git a/drivers/net/wireless/libertas/README b/drivers/net/wireless/libertas/README
index ab6a2d518af0..2726c044430f 100644
--- a/drivers/net/wireless/libertas/README
+++ b/drivers/net/wireless/libertas/README
@@ -1,5 +1,5 @@
 ================================================================================
-			README for USB8388
+			README for Libertas
 
  (c) Copyright © 2003-2006, Marvell International Ltd.
  All Rights Reserved
@@ -226,4 +226,28 @@ setuserscan
     All entries in the scan table (not just the new scan data when keep=1)
     will be displayed upon completion by use of the getscantable ioctl.
 
+========================
+IWCONFIG COMMANDS
+========================
+power period
+
+	This command is used to configure the station in deep sleep mode /
+	auto deep sleep mode.
+
+	The timer is implemented to monitor the activities (command, event,
+	etc.). When an activity is detected station will exit from deep
+	sleep mode automatically and restart the timer. At timer expiry
+	(no activity for defined time period) the deep sleep mode is entered
+	automatically.
+
+	Note: this command is for SDIO interface only.
+
+	Usage:
+	To enable deep sleep mode do:
+		iwconfig wlan0 power period 0
+	To enable auto deep sleep mode with idle time period 5 seconds do:
+		iwconfig wlan0 power period 5
+	To disable deep sleep/auto deep sleep mode do:
+		iwconfig wlan0 power period -1
+
 ==============================================================================
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index dd8732611ba9..751067369ba8 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -23,6 +23,13 @@ static const u8 bssid_off[ETH_ALEN]  __attribute__ ((aligned (2))) =
  */
 #define CAPINFO_MASK	(~(0xda00))
 
+/**
+ * 802.11b/g supported bitrates (in 500Kb/s units)
+ */
+u8 lbs_bg_rates[MAX_RATES] =
+    { 0x02, 0x04, 0x0b, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c,
+0x00, 0x00 };
+
 
 /**
  *  @brief This function finds common rates between rates and card rates.
@@ -147,6 +154,397 @@ static int lbs_set_authentication(struct lbs_private *priv, u8 bssid[6], u8 auth
 }
 
 
+int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action,
+			   struct assoc_request *assoc)
+{
+	struct cmd_ds_802_11_set_wep cmd;
+	int ret = 0;
+
+	lbs_deb_enter(LBS_DEB_CMD);
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.hdr.command = cpu_to_le16(CMD_802_11_SET_WEP);
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+
+	cmd.action = cpu_to_le16(cmd_action);
+
+	if (cmd_action == CMD_ACT_ADD) {
+		int i;
+
+		/* default tx key index */
+		cmd.keyindex = cpu_to_le16(assoc->wep_tx_keyidx &
+					   CMD_WEP_KEY_INDEX_MASK);
+
+		/* Copy key types and material to host command structure */
+		for (i = 0; i < 4; i++) {
+			struct enc_key *pkey = &assoc->wep_keys[i];
+
+			switch (pkey->len) {
+			case KEY_LEN_WEP_40:
+				cmd.keytype[i] = CMD_TYPE_WEP_40_BIT;
+				memmove(cmd.keymaterial[i], pkey->key, pkey->len);
+				lbs_deb_cmd("SET_WEP: add key %d (40 bit)\n", i);
+				break;
+			case KEY_LEN_WEP_104:
+				cmd.keytype[i] = CMD_TYPE_WEP_104_BIT;
+				memmove(cmd.keymaterial[i], pkey->key, pkey->len);
+				lbs_deb_cmd("SET_WEP: add key %d (104 bit)\n", i);
+				break;
+			case 0:
+				break;
+			default:
+				lbs_deb_cmd("SET_WEP: invalid key %d, length %d\n",
+					    i, pkey->len);
+				ret = -1;
+				goto done;
+				break;
+			}
+		}
+	} else if (cmd_action == CMD_ACT_REMOVE) {
+		/* ACT_REMOVE clears _all_ WEP keys */
+
+		/* default tx key index */
+		cmd.keyindex = cpu_to_le16(priv->wep_tx_keyidx &
+					   CMD_WEP_KEY_INDEX_MASK);
+		lbs_deb_cmd("SET_WEP: remove key %d\n", priv->wep_tx_keyidx);
+	}
+
+	ret = lbs_cmd_with_response(priv, CMD_802_11_SET_WEP, &cmd);
+done:
+	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
+	return ret;
+}
+
+int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
+			      uint16_t *enable)
+{
+	struct cmd_ds_802_11_enable_rsn cmd;
+	int ret;
+
+	lbs_deb_enter(LBS_DEB_CMD);
+
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+	cmd.action = cpu_to_le16(cmd_action);
+
+	if (cmd_action == CMD_ACT_GET)
+		cmd.enable = 0;
+	else {
+		if (*enable)
+			cmd.enable = cpu_to_le16(CMD_ENABLE_RSN);
+		else
+			cmd.enable = cpu_to_le16(CMD_DISABLE_RSN);
+		lbs_deb_cmd("ENABLE_RSN: %d\n", *enable);
+	}
+
+	ret = lbs_cmd_with_response(priv, CMD_802_11_ENABLE_RSN, &cmd);
+	if (!ret && cmd_action == CMD_ACT_GET)
+		*enable = le16_to_cpu(cmd.enable);
+
+	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
+	return ret;
+}
+
+static void set_one_wpa_key(struct MrvlIEtype_keyParamSet *keyparam,
+		struct enc_key *key)
+{
+	lbs_deb_enter(LBS_DEB_CMD);
+
+	if (key->flags & KEY_INFO_WPA_ENABLED)
+		keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_ENABLED);
+	if (key->flags & KEY_INFO_WPA_UNICAST)
+		keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_UNICAST);
+	if (key->flags & KEY_INFO_WPA_MCAST)
+		keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_MCAST);
+
+	keyparam->type = cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
+	keyparam->keytypeid = cpu_to_le16(key->type);
+	keyparam->keylen = cpu_to_le16(key->len);
+	memcpy(keyparam->key, key->key, key->len);
+
+	/* Length field doesn't include the {type,length} header */
+	keyparam->length = cpu_to_le16(sizeof(*keyparam) - 4);
+	lbs_deb_leave(LBS_DEB_CMD);
+}
+
+int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
+				struct assoc_request *assoc)
+{
+	struct cmd_ds_802_11_key_material cmd;
+	int ret = 0;
+	int index = 0;
+
+	lbs_deb_enter(LBS_DEB_CMD);
+
+	cmd.action = cpu_to_le16(cmd_action);
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+
+	if (cmd_action == CMD_ACT_GET) {
+		cmd.hdr.size = cpu_to_le16(sizeof(struct cmd_header) + 2);
+	} else {
+		memset(cmd.keyParamSet, 0, sizeof(cmd.keyParamSet));
+
+		if (test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc->flags)) {
+			set_one_wpa_key(&cmd.keyParamSet[index],
+					&assoc->wpa_unicast_key);
+			index++;
+		}
+
+		if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc->flags)) {
+			set_one_wpa_key(&cmd.keyParamSet[index],
+					&assoc->wpa_mcast_key);
+			index++;
+		}
+
+		/* The common header and as many keys as we included */
+		cmd.hdr.size = cpu_to_le16(offsetof(typeof(cmd),
+						    keyParamSet[index]));
+	}
+	ret = lbs_cmd_with_response(priv, CMD_802_11_KEY_MATERIAL, &cmd);
+	/* Copy the returned key to driver private data */
+	if (!ret && cmd_action == CMD_ACT_GET) {
+		void *buf_ptr = cmd.keyParamSet;
+		void *resp_end = &(&cmd)[1];
+
+		while (buf_ptr < resp_end) {
+			struct MrvlIEtype_keyParamSet *keyparam = buf_ptr;
+			struct enc_key *key;
+			uint16_t param_set_len = le16_to_cpu(keyparam->length);
+			uint16_t key_len = le16_to_cpu(keyparam->keylen);
+			uint16_t key_flags = le16_to_cpu(keyparam->keyinfo);
+			uint16_t key_type = le16_to_cpu(keyparam->keytypeid);
+			void *end;
+
+			end = (void *)keyparam + sizeof(keyparam->type)
+				+ sizeof(keyparam->length) + param_set_len;
+
+			/* Make sure we don't access past the end of the IEs */
+			if (end > resp_end)
+				break;
+
+			if (key_flags & KEY_INFO_WPA_UNICAST)
+				key = &priv->wpa_unicast_key;
+			else if (key_flags & KEY_INFO_WPA_MCAST)
+				key = &priv->wpa_mcast_key;
+			else
+				break;
+
+			/* Copy returned key into driver */
+			memset(key, 0, sizeof(struct enc_key));
+			if (key_len > sizeof(key->key))
+				break;
+			key->type = key_type;
+			key->flags = key_flags;
+			key->len = key_len;
+			memcpy(key->key, keyparam->key, key->len);
+
+			buf_ptr = end + 1;
+		}
+	}
+
+	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
+	return ret;
+}
+
+static __le16 lbs_rate_to_fw_bitmap(int rate, int lower_rates_ok)
+{
+/*		Bit  	Rate
+*		15:13 Reserved
+*		12    54 Mbps
+*		11    48 Mbps
+*		10    36 Mbps
+*		9     24 Mbps
+*		8     18 Mbps
+*		7     12 Mbps
+*		6     9 Mbps
+*		5     6 Mbps
+*		4     Reserved
+*		3     11 Mbps
+*		2     5.5 Mbps
+*		1     2 Mbps
+*		0     1 Mbps
+**/
+
+	uint16_t ratemask;
+	int i = lbs_data_rate_to_fw_index(rate);
+	if (lower_rates_ok)
+		ratemask = (0x1fef >> (12 - i));
+	else
+		ratemask = (1 << i);
+	return cpu_to_le16(ratemask);
+}
+
+int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
+				      uint16_t cmd_action)
+{
+	struct cmd_ds_802_11_rate_adapt_rateset cmd;
+	int ret;
+
+	lbs_deb_enter(LBS_DEB_CMD);
+
+	if (!priv->cur_rate && !priv->enablehwauto)
+		return -EINVAL;
+
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+
+	cmd.action = cpu_to_le16(cmd_action);
+	cmd.enablehwauto = cpu_to_le16(priv->enablehwauto);
+	cmd.bitmap = lbs_rate_to_fw_bitmap(priv->cur_rate, priv->enablehwauto);
+	ret = lbs_cmd_with_response(priv, CMD_802_11_RATE_ADAPT_RATESET, &cmd);
+	if (!ret && cmd_action == CMD_ACT_GET) {
+		priv->ratebitmap = le16_to_cpu(cmd.bitmap);
+		priv->enablehwauto = le16_to_cpu(cmd.enablehwauto);
+	}
+
+	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
+	return ret;
+}
+
+/**
+ *  @brief Set the data rate
+ *
+ *  @param priv    	A pointer to struct lbs_private structure
+ *  @param rate  	The desired data rate, or 0 to clear a locked rate
+ *
+ *  @return 	   	0 on success, error on failure
+ */
+int lbs_set_data_rate(struct lbs_private *priv, u8 rate)
+{
+	struct cmd_ds_802_11_data_rate cmd;
+	int ret = 0;
+
+	lbs_deb_enter(LBS_DEB_CMD);
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+
+	if (rate > 0) {
+		cmd.action = cpu_to_le16(CMD_ACT_SET_TX_FIX_RATE);
+		cmd.rates[0] = lbs_data_rate_to_fw_index(rate);
+		if (cmd.rates[0] == 0) {
+			lbs_deb_cmd("DATA_RATE: invalid requested rate of"
+				" 0x%02X\n", rate);
+			ret = 0;
+			goto out;
+		}
+		lbs_deb_cmd("DATA_RATE: set fixed 0x%02X\n", cmd.rates[0]);
+	} else {
+		cmd.action = cpu_to_le16(CMD_ACT_SET_TX_AUTO);
+		lbs_deb_cmd("DATA_RATE: setting auto\n");
+	}
+
+	ret = lbs_cmd_with_response(priv, CMD_802_11_DATA_RATE, &cmd);
+	if (ret)
+		goto out;
+
+	lbs_deb_hex(LBS_DEB_CMD, "DATA_RATE_RESP", (u8 *) &cmd, sizeof(cmd));
+
+	/* FIXME: get actual rates FW can do if this command actually returns
+	 * all data rates supported.
+	 */
+	priv->cur_rate = lbs_fw_index_to_data_rate(cmd.rates[0]);
+	lbs_deb_cmd("DATA_RATE: current rate is 0x%02x\n", priv->cur_rate);
+
+out:
+	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
+	return ret;
+}
+
+
+int lbs_cmd_802_11_rssi(struct lbs_private *priv,
+				struct cmd_ds_command *cmd)
+{
+
+	lbs_deb_enter(LBS_DEB_CMD);
+	cmd->command = cpu_to_le16(CMD_802_11_RSSI);
+	cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_rssi) +
+		sizeof(struct cmd_header));
+	cmd->params.rssi.N = cpu_to_le16(DEFAULT_BCN_AVG_FACTOR);
+
+	/* reset Beacon SNR/NF/RSSI values */
+	priv->SNR[TYPE_BEACON][TYPE_NOAVG] = 0;
+	priv->SNR[TYPE_BEACON][TYPE_AVG] = 0;
+	priv->NF[TYPE_BEACON][TYPE_NOAVG] = 0;
+	priv->NF[TYPE_BEACON][TYPE_AVG] = 0;
+	priv->RSSI[TYPE_BEACON][TYPE_NOAVG] = 0;
+	priv->RSSI[TYPE_BEACON][TYPE_AVG] = 0;
+
+	lbs_deb_leave(LBS_DEB_CMD);
+	return 0;
+}
+
+int lbs_ret_802_11_rssi(struct lbs_private *priv,
+				struct cmd_ds_command *resp)
+{
+	struct cmd_ds_802_11_rssi_rsp *rssirsp = &resp->params.rssirsp;
+
+	lbs_deb_enter(LBS_DEB_CMD);
+
+	/* store the non average value */
+	priv->SNR[TYPE_BEACON][TYPE_NOAVG] = get_unaligned_le16(&rssirsp->SNR);
+	priv->NF[TYPE_BEACON][TYPE_NOAVG] =
+		get_unaligned_le16(&rssirsp->noisefloor);
+
+	priv->SNR[TYPE_BEACON][TYPE_AVG] = get_unaligned_le16(&rssirsp->avgSNR);
+	priv->NF[TYPE_BEACON][TYPE_AVG] =
+		get_unaligned_le16(&rssirsp->avgnoisefloor);
+
+	priv->RSSI[TYPE_BEACON][TYPE_NOAVG] =
+	    CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_NOAVG],
+		     priv->NF[TYPE_BEACON][TYPE_NOAVG]);
+
+	priv->RSSI[TYPE_BEACON][TYPE_AVG] =
+	    CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_AVG] / AVG_SCALE,
+		     priv->NF[TYPE_BEACON][TYPE_AVG] / AVG_SCALE);
+
+	lbs_deb_cmd("RSSI: beacon %d, avg %d\n",
+	       priv->RSSI[TYPE_BEACON][TYPE_NOAVG],
+	       priv->RSSI[TYPE_BEACON][TYPE_AVG]);
+
+	lbs_deb_leave(LBS_DEB_CMD);
+	return 0;
+}
+
+
+int lbs_cmd_bcn_ctrl(struct lbs_private *priv,
+				struct cmd_ds_command *cmd,
+				u16 cmd_action)
+{
+	struct cmd_ds_802_11_beacon_control
+		*bcn_ctrl = &cmd->params.bcn_ctrl;
+
+	lbs_deb_enter(LBS_DEB_CMD);
+	cmd->size =
+	    cpu_to_le16(sizeof(struct cmd_ds_802_11_beacon_control)
+			     + sizeof(struct cmd_header));
+	cmd->command = cpu_to_le16(CMD_802_11_BEACON_CTRL);
+
+	bcn_ctrl->action = cpu_to_le16(cmd_action);
+	bcn_ctrl->beacon_enable = cpu_to_le16(priv->beacon_enable);
+	bcn_ctrl->beacon_period = cpu_to_le16(priv->beacon_period);
+
+	lbs_deb_leave(LBS_DEB_CMD);
+	return 0;
+}
+
+int lbs_ret_802_11_bcn_ctrl(struct lbs_private *priv,
+					struct cmd_ds_command *resp)
+{
+	struct cmd_ds_802_11_beacon_control *bcn_ctrl =
+	    &resp->params.bcn_ctrl;
+
+	lbs_deb_enter(LBS_DEB_CMD);
+
+	if (bcn_ctrl->action == CMD_ACT_GET) {
+		priv->beacon_enable = (u8) le16_to_cpu(bcn_ctrl->beacon_enable);
+		priv->beacon_period = le16_to_cpu(bcn_ctrl->beacon_period);
+	}
+
+	lbs_deb_enter(LBS_DEB_CMD);
+	return 0;
+}
+
+
+
 static int lbs_assoc_post(struct lbs_private *priv,
 			  struct cmd_ds_802_11_associate_response *resp)
 {
@@ -226,7 +624,7 @@ static int lbs_assoc_post(struct lbs_private *priv,
 	priv->connect_status = LBS_CONNECTED;
 
 	/* Update current SSID and BSSID */
-	memcpy(&priv->curbssparams.ssid, &bss->ssid, IW_ESSID_MAX_SIZE);
+	memcpy(&priv->curbssparams.ssid, &bss->ssid, IEEE80211_MAX_SSID_LEN);
 	priv->curbssparams.ssid_len = bss->ssid_len;
 	memcpy(priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
 
@@ -369,12 +767,7 @@ static int lbs_associate(struct lbs_private *priv,
 				   (u16)(pos - (u8 *) &cmd.iebuf));
 
 	/* update curbssparams */
-	priv->curbssparams.channel = bss->phy.ds.channel;
-
-	if (lbs_parse_dnld_countryinfo_11d(priv, bss)) {
-		ret = -1;
-		goto done;
-	}
+	priv->channel = bss->phy.ds.channel;
 
 	ret = lbs_cmd_with_response(priv, command, &cmd);
 	if (ret == 0) {
@@ -472,7 +865,7 @@ static int lbs_adhoc_post(struct lbs_private *priv,
 	memcpy(&priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
 
 	/* Set the new SSID to current SSID */
-	memcpy(&priv->curbssparams.ssid, &bss->ssid, IW_ESSID_MAX_SIZE);
+	memcpy(&priv->curbssparams.ssid, &bss->ssid, IEEE80211_MAX_SSID_LEN);
 	priv->curbssparams.ssid_len = bss->ssid_len;
 
 	netif_carrier_on(priv->dev);
@@ -487,7 +880,7 @@ static int lbs_adhoc_post(struct lbs_private *priv,
 	lbs_deb_join("ADHOC_RESP: Joined/started '%s', BSSID %pM, channel %d\n",
 		     print_ssid(ssid, bss->ssid, bss->ssid_len),
 		     priv->curbssparams.bssid,
-		     priv->curbssparams.channel);
+		     priv->channel);
 
 done:
 	lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
@@ -560,7 +953,7 @@ static int lbs_adhoc_join(struct lbs_private *priv,
 	lbs_deb_join("AdhocJoin: band = %c\n", assoc_req->band);
 
 	priv->adhoccreate = 0;
-	priv->curbssparams.channel = bss->channel;
+	priv->channel = bss->channel;
 
 	/* Build the join command */
 	memset(&cmd, 0, sizeof(cmd));
@@ -633,11 +1026,6 @@ static int lbs_adhoc_join(struct lbs_private *priv,
 		}
 	}
 
-	if (lbs_parse_dnld_countryinfo_11d(priv, bss)) {
-		ret = -1;
-		goto out;
-	}
-
 	ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_JOIN, &cmd);
 	if (ret == 0) {
 		ret = lbs_adhoc_post(priv,
@@ -737,12 +1125,6 @@ static int lbs_adhoc_start(struct lbs_private *priv,
 	lbs_deb_join("ADHOC_START: rates=%02x %02x %02x %02x\n",
 	       cmd.rates[0], cmd.rates[1], cmd.rates[2], cmd.rates[3]);
 
-	if (lbs_create_dnld_countryinfo_11d(priv)) {
-		lbs_deb_join("ADHOC_START: dnld_countryinfo_11d failed\n");
-		ret = -1;
-		goto out;
-	}
-
 	lbs_deb_join("ADHOC_START: Starting Ad-Hoc BSS on channel %d, band %d\n",
 		     assoc_req->channel, assoc_req->band);
 
@@ -1099,7 +1481,7 @@ static int assoc_helper_essid(struct lbs_private *priv,
 			/* else send START command */
 			lbs_deb_assoc("SSID not found, creating adhoc network\n");
 			memcpy(&assoc_req->bss.ssid, &assoc_req->ssid,
-				IW_ESSID_MAX_SIZE);
+				IEEE80211_MAX_SSID_LEN);
 			assoc_req->bss.ssid_len = assoc_req->ssid_len;
 			lbs_adhoc_start(priv, assoc_req);
 		}
@@ -1185,7 +1567,8 @@ static int assoc_helper_mode(struct lbs_private *priv,
 	}
 
 	priv->mode = assoc_req->mode;
-	ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE, assoc_req->mode);
+	ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE,
+		assoc_req->mode == IW_MODE_ADHOC ? 2 : 1);
 
 done:
 	lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
@@ -1205,7 +1588,7 @@ static int assoc_helper_channel(struct lbs_private *priv,
 		goto done;
 	}
 
-	if (assoc_req->channel == priv->curbssparams.channel)
+	if (assoc_req->channel == priv->channel)
 		goto done;
 
 	if (priv->mesh_dev) {
@@ -1217,7 +1600,7 @@ static int assoc_helper_channel(struct lbs_private *priv,
 	}
 
 	lbs_deb_assoc("ASSOC: channel: %d -> %d\n",
-		      priv->curbssparams.channel, assoc_req->channel);
+		      priv->channel, assoc_req->channel);
 
 	ret = lbs_set_channel(priv, assoc_req->channel);
 	if (ret < 0)
@@ -1232,7 +1615,7 @@ static int assoc_helper_channel(struct lbs_private *priv,
 		goto done;
 	}
 
-	if (assoc_req->channel != priv->curbssparams.channel) {
+	if (assoc_req->channel != priv->channel) {
 		lbs_deb_assoc("ASSOC: channel: failed to update channel to %d\n",
 		              assoc_req->channel);
 		goto restore_mesh;
@@ -1253,7 +1636,7 @@ static int assoc_helper_channel(struct lbs_private *priv,
  restore_mesh:
 	if (priv->mesh_dev)
 		lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
-				priv->curbssparams.channel);
+				priv->channel);
 
  done:
 	lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
@@ -1475,7 +1858,7 @@ static int should_stop_adhoc(struct lbs_private *priv,
 	}
 
 	if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) {
-		if (assoc_req->channel != priv->curbssparams.channel)
+		if (assoc_req->channel != priv->channel)
 			return 1;
 	}
 
@@ -1557,7 +1940,7 @@ static int lbs_find_best_network_ssid(struct lbs_private *priv,
 
 	found = lbs_find_best_ssid_in_list(priv, preferred_mode);
 	if (found && (found->ssid_len > 0)) {
-		memcpy(out_ssid, &found->ssid, IW_ESSID_MAX_SIZE);
+		memcpy(out_ssid, &found->ssid, IEEE80211_MAX_SSID_LEN);
 		*out_ssid_len = found->ssid_len;
 		*out_mode = found->mode;
 		ret = 0;
@@ -1775,12 +2158,12 @@ struct assoc_request *lbs_get_association_request(struct lbs_private *priv)
 	assoc_req = priv->pending_assoc_req;
 	if (!test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) {
 		memcpy(&assoc_req->ssid, &priv->curbssparams.ssid,
-		       IW_ESSID_MAX_SIZE);
+		       IEEE80211_MAX_SSID_LEN);
 		assoc_req->ssid_len = priv->curbssparams.ssid_len;
 	}
 
 	if (!test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags))
-		assoc_req->channel = priv->curbssparams.channel;
+		assoc_req->channel = priv->channel;
 
 	if (!test_bit(ASSOC_FLAG_BAND, &assoc_req->flags))
 		assoc_req->band = priv->curbssparams.band;
diff --git a/drivers/net/wireless/libertas/assoc.h b/drivers/net/wireless/libertas/assoc.h
index 6e765e9f91a3..40621b789fc5 100644
--- a/drivers/net/wireless/libertas/assoc.h
+++ b/drivers/net/wireless/libertas/assoc.h
@@ -3,7 +3,126 @@
 #ifndef _LBS_ASSOC_H_
 #define _LBS_ASSOC_H_
 
-#include "dev.h"
+
+#include "defs.h"
+#include "host.h"
+
+
+struct lbs_private;
+
+/*
+ * In theory, the IE is limited to the IE length, 255,
+ * but in practice 64 bytes are enough.
+ */
+#define MAX_WPA_IE_LEN 64
+
+
+
+struct lbs_802_11_security {
+	u8 WPAenabled;
+	u8 WPA2enabled;
+	u8 wep_enabled;
+	u8 auth_mode;
+	u32 key_mgmt;
+};
+
+/** Current Basic Service Set State Structure */
+struct current_bss_params {
+	/** bssid */
+	u8 bssid[ETH_ALEN];
+	/** ssid */
+	u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
+	u8 ssid_len;
+
+	/** band */
+	u8 band;
+	/** channel is directly in priv->channel */
+	/** zero-terminated array of supported data rates */
+	u8 rates[MAX_RATES + 1];
+};
+
+/**
+ *  @brief Structure used to store information for each beacon/probe response
+ */
+struct bss_descriptor {
+	u8 bssid[ETH_ALEN];
+
+	u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
+	u8 ssid_len;
+
+	u16 capability;
+	u32 rssi;
+	u32 channel;
+	u16 beaconperiod;
+	__le16 atimwindow;
+
+	/* IW_MODE_AUTO, IW_MODE_ADHOC, IW_MODE_INFRA */
+	u8 mode;
+
+	/* zero-terminated array of supported data rates */
+	u8 rates[MAX_RATES + 1];
+
+	unsigned long last_scanned;
+
+	union ieee_phy_param_set phy;
+	union ieee_ss_param_set ss;
+
+	u8 wpa_ie[MAX_WPA_IE_LEN];
+	size_t wpa_ie_len;
+	u8 rsn_ie[MAX_WPA_IE_LEN];
+	size_t rsn_ie_len;
+
+	u8 mesh;
+
+	struct list_head list;
+};
+
+/** Association request
+ *
+ * Encapsulates all the options that describe a specific assocation request
+ * or configuration of the wireless card's radio, mode, and security settings.
+ */
+struct assoc_request {
+#define ASSOC_FLAG_SSID			1
+#define ASSOC_FLAG_CHANNEL		2
+#define ASSOC_FLAG_BAND			3
+#define ASSOC_FLAG_MODE			4
+#define ASSOC_FLAG_BSSID		5
+#define ASSOC_FLAG_WEP_KEYS		6
+#define ASSOC_FLAG_WEP_TX_KEYIDX	7
+#define ASSOC_FLAG_WPA_MCAST_KEY	8
+#define ASSOC_FLAG_WPA_UCAST_KEY	9
+#define ASSOC_FLAG_SECINFO		10
+#define ASSOC_FLAG_WPA_IE		11
+	unsigned long flags;
+
+	u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
+	u8 ssid_len;
+	u8 channel;
+	u8 band;
+	u8 mode;
+	u8 bssid[ETH_ALEN] __attribute__ ((aligned (2)));
+
+	/** WEP keys */
+	struct enc_key wep_keys[4];
+	u16 wep_tx_keyidx;
+
+	/** WPA keys */
+	struct enc_key wpa_mcast_key;
+	struct enc_key wpa_unicast_key;
+
+	struct lbs_802_11_security secinfo;
+
+	/** WPA Information Elements*/
+	u8 wpa_ie[MAX_WPA_IE_LEN];
+	u8 wpa_ie_len;
+
+	/* BSS to associate with for infrastructure of Ad-Hoc join */
+	struct bss_descriptor bss;
+};
+
+
+extern u8 lbs_bg_rates[MAX_RATES];
 
 void lbs_association_worker(struct work_struct *work);
 struct assoc_request *lbs_get_association_request(struct lbs_private *priv);
@@ -13,4 +132,24 @@ int lbs_adhoc_stop(struct lbs_private *priv);
 int lbs_cmd_80211_deauthenticate(struct lbs_private *priv,
 				 u8 bssid[ETH_ALEN], u16 reason);
 
+int lbs_cmd_802_11_rssi(struct lbs_private *priv,
+				struct cmd_ds_command *cmd);
+int lbs_ret_802_11_rssi(struct lbs_private *priv,
+				struct cmd_ds_command *resp);
+
+int lbs_cmd_bcn_ctrl(struct lbs_private *priv,
+				struct cmd_ds_command *cmd,
+				u16 cmd_action);
+int lbs_ret_802_11_bcn_ctrl(struct lbs_private *priv,
+					struct cmd_ds_command *resp);
+
+int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action,
+			   struct assoc_request *assoc);
+
+int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
+			      uint16_t *enable);
+
+int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
+				struct assoc_request *assoc);
+
 #endif /* _LBS_ASSOC_H */
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
new file mode 100644
index 000000000000..4396dccd12ac
--- /dev/null
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -0,0 +1,198 @@
+/*
+ * Implement cfg80211 ("iw") support.
+ *
+ * Copyright (C) 2009 M&N Solutions GmbH, 61191 Rosbach, Germany
+ * Holger Schurig <hs4233@mail.mn-solutions.de>
+ *
+ */
+
+#include <net/cfg80211.h>
+
+#include "cfg.h"
+#include "cmd.h"
+
+
+#define CHAN2G(_channel, _freq, _flags) {        \
+	.band             = IEEE80211_BAND_2GHZ, \
+	.center_freq      = (_freq),             \
+	.hw_value         = (_channel),          \
+	.flags            = (_flags),            \
+	.max_antenna_gain = 0,                   \
+	.max_power        = 30,                  \
+}
+
+static struct ieee80211_channel lbs_2ghz_channels[] = {
+	CHAN2G(1,  2412, 0),
+	CHAN2G(2,  2417, 0),
+	CHAN2G(3,  2422, 0),
+	CHAN2G(4,  2427, 0),
+	CHAN2G(5,  2432, 0),
+	CHAN2G(6,  2437, 0),
+	CHAN2G(7,  2442, 0),
+	CHAN2G(8,  2447, 0),
+	CHAN2G(9,  2452, 0),
+	CHAN2G(10, 2457, 0),
+	CHAN2G(11, 2462, 0),
+	CHAN2G(12, 2467, 0),
+	CHAN2G(13, 2472, 0),
+	CHAN2G(14, 2484, 0),
+};
+
+#define RATETAB_ENT(_rate, _rateid, _flags) { \
+	.bitrate  = (_rate),                  \
+	.hw_value = (_rateid),                \
+	.flags    = (_flags),                 \
+}
+
+
+static struct ieee80211_rate lbs_rates[] = {
+	RATETAB_ENT(10,  0x1,   0),
+	RATETAB_ENT(20,  0x2,   0),
+	RATETAB_ENT(55,  0x4,   0),
+	RATETAB_ENT(110, 0x8,   0),
+	RATETAB_ENT(60,  0x10,  0),
+	RATETAB_ENT(90,  0x20,  0),
+	RATETAB_ENT(120, 0x40,  0),
+	RATETAB_ENT(180, 0x80,  0),
+	RATETAB_ENT(240, 0x100, 0),
+	RATETAB_ENT(360, 0x200, 0),
+	RATETAB_ENT(480, 0x400, 0),
+	RATETAB_ENT(540, 0x800, 0),
+};
+
+static struct ieee80211_supported_band lbs_band_2ghz = {
+	.channels = lbs_2ghz_channels,
+	.n_channels = ARRAY_SIZE(lbs_2ghz_channels),
+	.bitrates = lbs_rates,
+	.n_bitrates = ARRAY_SIZE(lbs_rates),
+};
+
+
+static const u32 cipher_suites[] = {
+	WLAN_CIPHER_SUITE_WEP40,
+	WLAN_CIPHER_SUITE_WEP104,
+	WLAN_CIPHER_SUITE_TKIP,
+	WLAN_CIPHER_SUITE_CCMP,
+};
+
+
+
+static int lbs_cfg_set_channel(struct wiphy *wiphy,
+	struct ieee80211_channel *chan,
+	enum nl80211_channel_type channel_type)
+{
+	struct lbs_private *priv = wiphy_priv(wiphy);
+	int ret = -ENOTSUPP;
+
+	lbs_deb_enter_args(LBS_DEB_CFG80211, "freq %d, type %d", chan->center_freq, channel_type);
+
+	if (channel_type != NL80211_CHAN_NO_HT)
+		goto out;
+
+	ret = lbs_set_channel(priv, chan->hw_value);
+
+ out:
+	lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
+	return ret;
+}
+
+
+
+
+static struct cfg80211_ops lbs_cfg80211_ops = {
+	.set_channel = lbs_cfg_set_channel,
+};
+
+
+/*
+ * At this time lbs_private *priv doesn't even exist, so we just allocate
+ * memory and don't initialize the wiphy further. This is postponed until we
+ * can talk to the firmware and happens at registration time in
+ * lbs_cfg_wiphy_register().
+ */
+struct wireless_dev *lbs_cfg_alloc(struct device *dev)
+{
+	int ret = 0;
+	struct wireless_dev *wdev;
+
+	lbs_deb_enter(LBS_DEB_CFG80211);
+
+	wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
+	if (!wdev) {
+		dev_err(dev, "cannot allocate wireless device\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	wdev->wiphy = wiphy_new(&lbs_cfg80211_ops, sizeof(struct lbs_private));
+	if (!wdev->wiphy) {
+		dev_err(dev, "cannot allocate wiphy\n");
+		ret = -ENOMEM;
+		goto err_wiphy_new;
+	}
+
+	lbs_deb_leave(LBS_DEB_CFG80211);
+	return wdev;
+
+ err_wiphy_new:
+	kfree(wdev);
+	lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
+	return ERR_PTR(ret);
+}
+
+
+/*
+ * This function get's called after lbs_setup_firmware() determined the
+ * firmware capabities. So we can setup the wiphy according to our
+ * hardware/firmware.
+ */
+int lbs_cfg_register(struct lbs_private *priv)
+{
+	struct wireless_dev *wdev = priv->wdev;
+	int ret;
+
+	lbs_deb_enter(LBS_DEB_CFG80211);
+
+	wdev->wiphy->max_scan_ssids = 1;
+	wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+
+	/* TODO: BIT(NL80211_IFTYPE_ADHOC); */
+	wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+
+	/* TODO: honor priv->regioncode */
+	wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &lbs_band_2ghz;
+
+	/*
+	 * We could check priv->fwcapinfo && FW_CAPINFO_WPA, but I have
+	 * never seen a firmware without WPA
+	 */
+	wdev->wiphy->cipher_suites = cipher_suites;
+	wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+	ret = wiphy_register(wdev->wiphy);
+	if (ret < 0)
+		lbs_pr_err("cannot register wiphy device\n");
+
+	ret = register_netdev(priv->dev);
+	if (ret)
+		lbs_pr_err("cannot register network device\n");
+
+	lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
+	return ret;
+}
+
+
+void lbs_cfg_free(struct lbs_private *priv)
+{
+	struct wireless_dev *wdev = priv->wdev;
+
+	lbs_deb_enter(LBS_DEB_CFG80211);
+
+	if (!wdev)
+		return;
+
+	if (wdev->wiphy) {
+		wiphy_unregister(wdev->wiphy);
+		wiphy_free(wdev->wiphy);
+	}
+	kfree(wdev);
+}
diff --git a/drivers/net/wireless/libertas/cfg.h b/drivers/net/wireless/libertas/cfg.h
new file mode 100644
index 000000000000..e09a193a34d6
--- /dev/null
+++ b/drivers/net/wireless/libertas/cfg.h
@@ -0,0 +1,16 @@
+#ifndef __LBS_CFG80211_H__
+#define __LBS_CFG80211_H__
+
+#include "dev.h"
+
+struct wireless_dev *lbs_cfg_alloc(struct device *dev);
+int lbs_cfg_register(struct lbs_private *priv);
+void lbs_cfg_free(struct lbs_private *priv);
+
+int lbs_send_specific_ssid_scan(struct lbs_private *priv, u8 *ssid,
+	u8 ssid_len);
+int lbs_scan_networks(struct lbs_private *priv, int full_scan);
+void lbs_cfg_scan_worker(struct work_struct *work);
+
+
+#endif
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 0a324dcd264c..b9b371bfa30f 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -3,21 +3,20 @@
   * It prepares command and sends it to firmware when it is ready.
   */
 
-#include <net/iw_handler.h>
-#include <net/lib80211.h>
 #include <linux/kfifo.h>
 #include <linux/sched.h>
+
 #include "host.h"
-#include "hostcmd.h"
 #include "decl.h"
 #include "defs.h"
 #include "dev.h"
 #include "assoc.h"
 #include "wext.h"
+#include "scan.h"
 #include "cmd.h"
 
-static struct cmd_ctrl_node *lbs_get_cmd_ctrl_node(struct lbs_private *priv);
 
+static struct cmd_ctrl_node *lbs_get_cmd_ctrl_node(struct lbs_private *priv);
 
 /**
  *  @brief Simple callback that copies response back into command
@@ -77,6 +76,30 @@ static u8 is_command_allowed_in_ps(u16 cmd)
 }
 
 /**
+ *  @brief This function checks if the command is allowed.
+ *
+ *  @param priv         A pointer to lbs_private structure
+ *  @return             allowed or not allowed.
+ */
+
+static int lbs_is_cmd_allowed(struct lbs_private *priv)
+{
+	int ret = 1;
+
+	lbs_deb_enter(LBS_DEB_CMD);
+
+	if (!priv->is_auto_deep_sleep_enabled) {
+		if (priv->is_deep_sleep) {
+			lbs_deb_cmd("command not allowed in deep sleep\n");
+			ret = 0;
+		}
+	}
+
+	lbs_deb_leave(LBS_DEB_CMD);
+	return ret;
+}
+
+/**
  *  @brief Updates the hardware details like MAC address and regulatory region
  *
  *  @param priv    	A pointer to struct lbs_private structure
@@ -169,11 +192,6 @@ int lbs_update_hw_spec(struct lbs_private *priv)
 		goto out;
 	}
 
-	if (lbs_set_universaltable(priv, 0)) {
-		ret = -1;
-		goto out;
-	}
-
 out:
 	lbs_deb_leave(LBS_DEB_CMD);
 	return ret;
@@ -222,7 +240,7 @@ static int lbs_cmd_802_11_ps_mode(struct cmd_ds_command *cmd,
 
 	cmd->command = cpu_to_le16(CMD_802_11_PS_MODE);
 	cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_ps_mode) +
-				S_DS_GEN);
+				sizeof(struct cmd_header));
 	psm->action = cpu_to_le16(cmd_action);
 	psm->multipledtim = 0;
 	switch (cmd_action) {
@@ -251,33 +269,6 @@ static int lbs_cmd_802_11_ps_mode(struct cmd_ds_command *cmd,
 	return 0;
 }
 
-int lbs_cmd_802_11_inactivity_timeout(struct lbs_private *priv,
-				      uint16_t cmd_action, uint16_t *timeout)
-{
-	struct cmd_ds_802_11_inactivity_timeout cmd;
-	int ret;
-
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	cmd.hdr.command = cpu_to_le16(CMD_802_11_INACTIVITY_TIMEOUT);
-	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
-
-	cmd.action = cpu_to_le16(cmd_action);
-
-	if (cmd_action == CMD_ACT_SET)
-		cmd.timeout = cpu_to_le16(*timeout);
-	else
-		cmd.timeout = 0;
-
-	ret = lbs_cmd_with_response(priv, CMD_802_11_INACTIVITY_TIMEOUT, &cmd);
-
-	if (!ret)
-		*timeout = le16_to_cpu(cmd.timeout);
-
-	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
-	return 0;
-}
-
 int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action,
 				struct sleep_params *sp)
 {
@@ -320,190 +311,53 @@ int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action,
 	return 0;
 }
 
-int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action,
-			   struct assoc_request *assoc)
+static int lbs_wait_for_ds_awake(struct lbs_private *priv)
 {
-	struct cmd_ds_802_11_set_wep cmd;
 	int ret = 0;
 
 	lbs_deb_enter(LBS_DEB_CMD);
 
-	memset(&cmd, 0, sizeof(cmd));
-	cmd.hdr.command = cpu_to_le16(CMD_802_11_SET_WEP);
-	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
-
-	cmd.action = cpu_to_le16(cmd_action);
-
-	if (cmd_action == CMD_ACT_ADD) {
-		int i;
-
-		/* default tx key index */
-		cmd.keyindex = cpu_to_le16(assoc->wep_tx_keyidx &
-					   CMD_WEP_KEY_INDEX_MASK);
-
-		/* Copy key types and material to host command structure */
-		for (i = 0; i < 4; i++) {
-			struct enc_key *pkey = &assoc->wep_keys[i];
-
-			switch (pkey->len) {
-			case KEY_LEN_WEP_40:
-				cmd.keytype[i] = CMD_TYPE_WEP_40_BIT;
-				memmove(cmd.keymaterial[i], pkey->key, pkey->len);
-				lbs_deb_cmd("SET_WEP: add key %d (40 bit)\n", i);
-				break;
-			case KEY_LEN_WEP_104:
-				cmd.keytype[i] = CMD_TYPE_WEP_104_BIT;
-				memmove(cmd.keymaterial[i], pkey->key, pkey->len);
-				lbs_deb_cmd("SET_WEP: add key %d (104 bit)\n", i);
-				break;
-			case 0:
-				break;
-			default:
-				lbs_deb_cmd("SET_WEP: invalid key %d, length %d\n",
-					    i, pkey->len);
-				ret = -1;
-				goto done;
-				break;
-			}
+	if (priv->is_deep_sleep) {
+		if (!wait_event_interruptible_timeout(priv->ds_awake_q,
+					!priv->is_deep_sleep, (10 * HZ))) {
+			lbs_pr_err("ds_awake_q: timer expired\n");
+			ret = -1;
 		}
-	} else if (cmd_action == CMD_ACT_REMOVE) {
-		/* ACT_REMOVE clears _all_ WEP keys */
-
-		/* default tx key index */
-		cmd.keyindex = cpu_to_le16(priv->wep_tx_keyidx &
-					   CMD_WEP_KEY_INDEX_MASK);
-		lbs_deb_cmd("SET_WEP: remove key %d\n", priv->wep_tx_keyidx);
-	}
-
-	ret = lbs_cmd_with_response(priv, CMD_802_11_SET_WEP, &cmd);
-done:
-	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
-	return ret;
-}
-
-int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
-			      uint16_t *enable)
-{
-	struct cmd_ds_802_11_enable_rsn cmd;
-	int ret;
-
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
-	cmd.action = cpu_to_le16(cmd_action);
-
-	if (cmd_action == CMD_ACT_GET)
-		cmd.enable = 0;
-	else {
-		if (*enable)
-			cmd.enable = cpu_to_le16(CMD_ENABLE_RSN);
-		else
-			cmd.enable = cpu_to_le16(CMD_DISABLE_RSN);
-		lbs_deb_cmd("ENABLE_RSN: %d\n", *enable);
 	}
 
-	ret = lbs_cmd_with_response(priv, CMD_802_11_ENABLE_RSN, &cmd);
-	if (!ret && cmd_action == CMD_ACT_GET)
-		*enable = le16_to_cpu(cmd.enable);
-
 	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
 	return ret;
 }
 
-static void set_one_wpa_key(struct MrvlIEtype_keyParamSet *keyparam,
-                            struct enc_key *key)
+int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep)
 {
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	if (key->flags & KEY_INFO_WPA_ENABLED)
-		keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_ENABLED);
-	if (key->flags & KEY_INFO_WPA_UNICAST)
-		keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_UNICAST);
-	if (key->flags & KEY_INFO_WPA_MCAST)
-		keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_MCAST);
-
-	keyparam->type = cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
-	keyparam->keytypeid = cpu_to_le16(key->type);
-	keyparam->keylen = cpu_to_le16(key->len);
-	memcpy(keyparam->key, key->key, key->len);
-
-	/* Length field doesn't include the {type,length} header */
-	keyparam->length = cpu_to_le16(sizeof(*keyparam) - 4);
-	lbs_deb_leave(LBS_DEB_CMD);
-}
-
-int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
-				struct assoc_request *assoc)
-{
-	struct cmd_ds_802_11_key_material cmd;
-	int ret = 0;
-	int index = 0;
+	int ret =  0;
 
 	lbs_deb_enter(LBS_DEB_CMD);
 
-	cmd.action = cpu_to_le16(cmd_action);
-	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
-
-	if (cmd_action == CMD_ACT_GET) {
-		cmd.hdr.size = cpu_to_le16(S_DS_GEN + 2);
-	} else {
-		memset(cmd.keyParamSet, 0, sizeof(cmd.keyParamSet));
-
-		if (test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc->flags)) {
-			set_one_wpa_key(&cmd.keyParamSet[index],
-					&assoc->wpa_unicast_key);
-			index++;
-		}
-
-		if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc->flags)) {
-			set_one_wpa_key(&cmd.keyParamSet[index],
-					&assoc->wpa_mcast_key);
-			index++;
+	if (deep_sleep) {
+		if (priv->is_deep_sleep != 1) {
+			lbs_deb_cmd("deep sleep: sleep\n");
+			BUG_ON(!priv->enter_deep_sleep);
+			ret = priv->enter_deep_sleep(priv);
+			if (!ret) {
+				netif_stop_queue(priv->dev);
+				netif_carrier_off(priv->dev);
+			}
+		} else {
+			lbs_pr_err("deep sleep: already enabled\n");
 		}
-
-		/* The common header and as many keys as we included */
-		cmd.hdr.size = cpu_to_le16(offsetof(typeof(cmd),
-						    keyParamSet[index]));
-	}
-	ret = lbs_cmd_with_response(priv, CMD_802_11_KEY_MATERIAL, &cmd);
-	/* Copy the returned key to driver private data */
-	if (!ret && cmd_action == CMD_ACT_GET) {
-		void *buf_ptr = cmd.keyParamSet;
-		void *resp_end = &(&cmd)[1];
-
-		while (buf_ptr < resp_end) {
-			struct MrvlIEtype_keyParamSet *keyparam = buf_ptr;
-			struct enc_key *key;
-			uint16_t param_set_len = le16_to_cpu(keyparam->length);
-			uint16_t key_len = le16_to_cpu(keyparam->keylen);
-			uint16_t key_flags = le16_to_cpu(keyparam->keyinfo);
-			uint16_t key_type = le16_to_cpu(keyparam->keytypeid);
-			void *end;
-
-			end = (void *)keyparam + sizeof(keyparam->type)
-				+ sizeof(keyparam->length) + param_set_len;
-
-			/* Make sure we don't access past the end of the IEs */
-			if (end > resp_end)
-				break;
-
-			if (key_flags & KEY_INFO_WPA_UNICAST)
-				key = &priv->wpa_unicast_key;
-			else if (key_flags & KEY_INFO_WPA_MCAST)
-				key = &priv->wpa_mcast_key;
-			else
-				break;
-
-			/* Copy returned key into driver */
-			memset(key, 0, sizeof(struct enc_key));
-			if (key_len > sizeof(key->key))
-				break;
-			key->type = key_type;
-			key->flags = key_flags;
-			key->len = key_len;
-			memcpy(key->key, keyparam->key, key->len);
-
-			buf_ptr = end + 1;
+	} else {
+		if (priv->is_deep_sleep) {
+			lbs_deb_cmd("deep sleep: wakeup\n");
+			BUG_ON(!priv->exit_deep_sleep);
+			ret = priv->exit_deep_sleep(priv);
+			if (!ret) {
+				ret = lbs_wait_for_ds_awake(priv);
+				if (ret)
+					lbs_pr_err("deep sleep: wakeup"
+							"failed\n");
+			}
 		}
 	}
 
@@ -535,7 +389,7 @@ int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val)
 	switch (oid) {
 	case SNMP_MIB_OID_BSS_TYPE:
 		cmd.bufsize = cpu_to_le16(sizeof(u8));
-		cmd.value[0] = (val == IW_MODE_ADHOC) ? 2 : 1;
+		cmd.value[0] = val;
 		break;
 	case SNMP_MIB_OID_11D_ENABLE:
 	case SNMP_MIB_OID_FRAG_THRESHOLD:
@@ -588,13 +442,7 @@ int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val)
 
 	switch (le16_to_cpu(cmd.bufsize)) {
 	case sizeof(u8):
-		if (oid == SNMP_MIB_OID_BSS_TYPE) {
-			if (cmd.value[0] == 2)
-				*out_val = IW_MODE_ADHOC;
-			else
-				*out_val = IW_MODE_INFRA;
-		} else
-			*out_val = cmd.value[0];
+		*out_val = cmd.value[0];
 		break;
 	case sizeof(u16):
 		*out_val = le16_to_cpu(*((__le16 *)(&cmd.value)));
@@ -681,7 +529,7 @@ static int lbs_cmd_802_11_monitor_mode(struct cmd_ds_command *cmd,
 	cmd->command = cpu_to_le16(CMD_802_11_MONITOR_MODE);
 	cmd->size =
 	    cpu_to_le16(sizeof(struct cmd_ds_802_11_monitor_mode) +
-			     S_DS_GEN);
+			     sizeof(struct cmd_header));
 
 	monitor->action = cpu_to_le16(cmd_action);
 	if (cmd_action == CMD_ACT_SET) {
@@ -692,111 +540,6 @@ static int lbs_cmd_802_11_monitor_mode(struct cmd_ds_command *cmd,
 	return 0;
 }
 
-static __le16 lbs_rate_to_fw_bitmap(int rate, int lower_rates_ok)
-{
-/*		Bit  	Rate
-*		15:13 Reserved
-*		12    54 Mbps
-*		11    48 Mbps
-*		10    36 Mbps
-*		9     24 Mbps
-*		8     18 Mbps
-*		7     12 Mbps
-*		6     9 Mbps
-*		5     6 Mbps
-*		4     Reserved
-*		3     11 Mbps
-*		2     5.5 Mbps
-*		1     2 Mbps
-*		0     1 Mbps
-**/
-
-	uint16_t ratemask;
-	int i = lbs_data_rate_to_fw_index(rate);
-	if (lower_rates_ok)
-		ratemask = (0x1fef >> (12 - i));
-	else
-		ratemask = (1 << i);
-	return cpu_to_le16(ratemask);
-}
-
-int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
-				      uint16_t cmd_action)
-{
-	struct cmd_ds_802_11_rate_adapt_rateset cmd;
-	int ret;
-
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	if (!priv->cur_rate && !priv->enablehwauto)
-		return -EINVAL;
-
-	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
-
-	cmd.action = cpu_to_le16(cmd_action);
-	cmd.enablehwauto = cpu_to_le16(priv->enablehwauto);
-	cmd.bitmap = lbs_rate_to_fw_bitmap(priv->cur_rate, priv->enablehwauto);
-	ret = lbs_cmd_with_response(priv, CMD_802_11_RATE_ADAPT_RATESET, &cmd);
-	if (!ret && cmd_action == CMD_ACT_GET) {
-		priv->ratebitmap = le16_to_cpu(cmd.bitmap);
-		priv->enablehwauto = le16_to_cpu(cmd.enablehwauto);
-	}
-
-	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
-	return ret;
-}
-EXPORT_SYMBOL_GPL(lbs_cmd_802_11_rate_adapt_rateset);
-
-/**
- *  @brief Set the data rate
- *
- *  @param priv    	A pointer to struct lbs_private structure
- *  @param rate  	The desired data rate, or 0 to clear a locked rate
- *
- *  @return 	   	0 on success, error on failure
- */
-int lbs_set_data_rate(struct lbs_private *priv, u8 rate)
-{
-	struct cmd_ds_802_11_data_rate cmd;
-	int ret = 0;
-
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	memset(&cmd, 0, sizeof(cmd));
-	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
-
-	if (rate > 0) {
-		cmd.action = cpu_to_le16(CMD_ACT_SET_TX_FIX_RATE);
-		cmd.rates[0] = lbs_data_rate_to_fw_index(rate);
-		if (cmd.rates[0] == 0) {
-			lbs_deb_cmd("DATA_RATE: invalid requested rate of"
-			            " 0x%02X\n", rate);
-			ret = 0;
-			goto out;
-		}
-		lbs_deb_cmd("DATA_RATE: set fixed 0x%02X\n", cmd.rates[0]);
-	} else {
-		cmd.action = cpu_to_le16(CMD_ACT_SET_TX_AUTO);
-		lbs_deb_cmd("DATA_RATE: setting auto\n");
-	}
-
-	ret = lbs_cmd_with_response(priv, CMD_802_11_DATA_RATE, &cmd);
-	if (ret)
-		goto out;
-
-	lbs_deb_hex(LBS_DEB_CMD, "DATA_RATE_RESP", (u8 *) &cmd, sizeof (cmd));
-
-	/* FIXME: get actual rates FW can do if this command actually returns
-	 * all data rates supported.
-	 */
-	priv->cur_rate = lbs_fw_index_to_data_rate(cmd.rates[0]);
-	lbs_deb_cmd("DATA_RATE: current rate is 0x%02x\n", priv->cur_rate);
-
-out:
-	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
-	return ret;
-}
-
 /**
  *  @brief Get the radio channel
  *
@@ -804,7 +547,7 @@ out:
  *
  *  @return 	   	The channel on success, error on failure
  */
-int lbs_get_channel(struct lbs_private *priv)
+static int lbs_get_channel(struct lbs_private *priv)
 {
 	struct cmd_ds_802_11_rf_channel cmd;
 	int ret = 0;
@@ -836,7 +579,7 @@ int lbs_update_channel(struct lbs_private *priv)
 
 	ret = lbs_get_channel(priv);
 	if (ret > 0) {
-		priv->curbssparams.channel = ret;
+		priv->channel = ret;
 		ret = 0;
 	}
 	lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
@@ -855,7 +598,7 @@ int lbs_set_channel(struct lbs_private *priv, u8 channel)
 {
 	struct cmd_ds_802_11_rf_channel cmd;
 #ifdef DEBUG
-	u8 old_channel = priv->curbssparams.channel;
+	u8 old_channel = priv->channel;
 #endif
 	int ret = 0;
 
@@ -870,36 +613,15 @@ int lbs_set_channel(struct lbs_private *priv, u8 channel)
 	if (ret)
 		goto out;
 
-	priv->curbssparams.channel = (uint8_t) le16_to_cpu(cmd.channel);
+	priv->channel = (uint8_t) le16_to_cpu(cmd.channel);
 	lbs_deb_cmd("channel switch from %d to %d\n", old_channel,
-		priv->curbssparams.channel);
+		priv->channel);
 
 out:
 	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
 	return ret;
 }
 
-static int lbs_cmd_802_11_rssi(struct lbs_private *priv,
-				struct cmd_ds_command *cmd)
-{
-
-	lbs_deb_enter(LBS_DEB_CMD);
-	cmd->command = cpu_to_le16(CMD_802_11_RSSI);
-	cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_rssi) + S_DS_GEN);
-	cmd->params.rssi.N = cpu_to_le16(DEFAULT_BCN_AVG_FACTOR);
-
-	/* reset Beacon SNR/NF/RSSI values */
-	priv->SNR[TYPE_BEACON][TYPE_NOAVG] = 0;
-	priv->SNR[TYPE_BEACON][TYPE_AVG] = 0;
-	priv->NF[TYPE_BEACON][TYPE_NOAVG] = 0;
-	priv->NF[TYPE_BEACON][TYPE_AVG] = 0;
-	priv->RSSI[TYPE_BEACON][TYPE_NOAVG] = 0;
-	priv->RSSI[TYPE_BEACON][TYPE_AVG] = 0;
-
-	lbs_deb_leave(LBS_DEB_CMD);
-	return 0;
-}
-
 static int lbs_cmd_reg_access(struct cmd_ds_command *cmdptr,
 			       u8 cmd_action, void *pdata_buf)
 {
@@ -916,7 +638,7 @@ static int lbs_cmd_reg_access(struct cmd_ds_command *cmdptr,
 
 			cmdptr->size =
 			    cpu_to_le16(sizeof (struct cmd_ds_mac_reg_access)
-					+ S_DS_GEN);
+					+ sizeof(struct cmd_header));
 			macreg =
 			    (struct cmd_ds_mac_reg_access *)&cmdptr->params.
 			    macreg;
@@ -935,7 +657,7 @@ static int lbs_cmd_reg_access(struct cmd_ds_command *cmdptr,
 			cmdptr->size =
 			    cpu_to_le16(sizeof
 					     (struct cmd_ds_bbp_reg_access)
-					     + S_DS_GEN);
+					     + sizeof(struct cmd_header));
 			bbpreg =
 			    (struct cmd_ds_bbp_reg_access *)&cmdptr->params.
 			    bbpreg;
@@ -954,7 +676,7 @@ static int lbs_cmd_reg_access(struct cmd_ds_command *cmdptr,
 			cmdptr->size =
 			    cpu_to_le16(sizeof
 					     (struct cmd_ds_rf_reg_access) +
-					     S_DS_GEN);
+					     sizeof(struct cmd_header));
 			rfreg =
 			    (struct cmd_ds_rf_reg_access *)&cmdptr->params.
 			    rfreg;
@@ -974,192 +696,6 @@ static int lbs_cmd_reg_access(struct cmd_ds_command *cmdptr,
 	return 0;
 }
 
-static int lbs_cmd_bt_access(struct cmd_ds_command *cmd,
-			       u16 cmd_action, void *pdata_buf)
-{
-	struct cmd_ds_bt_access *bt_access = &cmd->params.bt;
-	lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action);
-
-	cmd->command = cpu_to_le16(CMD_BT_ACCESS);
-	cmd->size = cpu_to_le16(sizeof(struct cmd_ds_bt_access) + S_DS_GEN);
-	cmd->result = 0;
-	bt_access->action = cpu_to_le16(cmd_action);
-
-	switch (cmd_action) {
-	case CMD_ACT_BT_ACCESS_ADD:
-		memcpy(bt_access->addr1, pdata_buf, 2 * ETH_ALEN);
-		lbs_deb_hex(LBS_DEB_MESH, "BT_ADD: blinded MAC addr", bt_access->addr1, 6);
-		break;
-	case CMD_ACT_BT_ACCESS_DEL:
-		memcpy(bt_access->addr1, pdata_buf, 1 * ETH_ALEN);
-		lbs_deb_hex(LBS_DEB_MESH, "BT_DEL: blinded MAC addr", bt_access->addr1, 6);
-		break;
-	case CMD_ACT_BT_ACCESS_LIST:
-		bt_access->id = cpu_to_le32(*(u32 *) pdata_buf);
-		break;
-	case CMD_ACT_BT_ACCESS_RESET:
-		break;
-	case CMD_ACT_BT_ACCESS_SET_INVERT:
-		bt_access->id = cpu_to_le32(*(u32 *) pdata_buf);
-		break;
-	case CMD_ACT_BT_ACCESS_GET_INVERT:
-		break;
-	default:
-		break;
-	}
-	lbs_deb_leave(LBS_DEB_CMD);
-	return 0;
-}
-
-static int lbs_cmd_fwt_access(struct cmd_ds_command *cmd,
-			       u16 cmd_action, void *pdata_buf)
-{
-	struct cmd_ds_fwt_access *fwt_access = &cmd->params.fwt;
-	lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action);
-
-	cmd->command = cpu_to_le16(CMD_FWT_ACCESS);
-	cmd->size = cpu_to_le16(sizeof(struct cmd_ds_fwt_access) + S_DS_GEN);
-	cmd->result = 0;
-
-	if (pdata_buf)
-		memcpy(fwt_access, pdata_buf, sizeof(*fwt_access));
-	else
-		memset(fwt_access, 0, sizeof(*fwt_access));
-
-	fwt_access->action = cpu_to_le16(cmd_action);
-
-	lbs_deb_leave(LBS_DEB_CMD);
-	return 0;
-}
-
-int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
-		    struct cmd_ds_mesh_access *cmd)
-{
-	int ret;
-
-	lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action);
-
-	cmd->hdr.command = cpu_to_le16(CMD_MESH_ACCESS);
-	cmd->hdr.size = cpu_to_le16(sizeof(*cmd));
-	cmd->hdr.result = 0;
-
-	cmd->action = cpu_to_le16(cmd_action);
-
-	ret = lbs_cmd_with_response(priv, CMD_MESH_ACCESS, cmd);
-
-	lbs_deb_leave(LBS_DEB_CMD);
-	return ret;
-}
-
-static int __lbs_mesh_config_send(struct lbs_private *priv,
-				  struct cmd_ds_mesh_config *cmd,
-				  uint16_t action, uint16_t type)
-{
-	int ret;
-	u16 command = CMD_MESH_CONFIG_OLD;
-
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	/*
-	 * Command id is 0xac for v10 FW along with mesh interface
-	 * id in bits 14-13-12.
-	 */
-	if (priv->mesh_fw_ver == MESH_FW_NEW)
-		command = CMD_MESH_CONFIG |
-			  (MESH_IFACE_ID << MESH_IFACE_BIT_OFFSET);
-
-	cmd->hdr.command = cpu_to_le16(command);
-	cmd->hdr.size = cpu_to_le16(sizeof(struct cmd_ds_mesh_config));
-	cmd->hdr.result = 0;
-
-	cmd->type = cpu_to_le16(type);
-	cmd->action = cpu_to_le16(action);
-
-	ret = lbs_cmd_with_response(priv, command, cmd);
-
-	lbs_deb_leave(LBS_DEB_CMD);
-	return ret;
-}
-
-int lbs_mesh_config_send(struct lbs_private *priv,
-			 struct cmd_ds_mesh_config *cmd,
-			 uint16_t action, uint16_t type)
-{
-	int ret;
-
-	if (!(priv->fwcapinfo & FW_CAPINFO_PERSISTENT_CONFIG))
-		return -EOPNOTSUPP;
-
-	ret = __lbs_mesh_config_send(priv, cmd, action, type);
-	return ret;
-}
-
-/* This function is the CMD_MESH_CONFIG legacy function.  It only handles the
- * START and STOP actions.  The extended actions supported by CMD_MESH_CONFIG
- * are all handled by preparing a struct cmd_ds_mesh_config and passing it to
- * lbs_mesh_config_send.
- */
-int lbs_mesh_config(struct lbs_private *priv, uint16_t action, uint16_t chan)
-{
-	struct cmd_ds_mesh_config cmd;
-	struct mrvl_meshie *ie;
-	DECLARE_SSID_BUF(ssid);
-
-	memset(&cmd, 0, sizeof(cmd));
-	cmd.channel = cpu_to_le16(chan);
-	ie = (struct mrvl_meshie *)cmd.data;
-
-	switch (action) {
-	case CMD_ACT_MESH_CONFIG_START:
-		ie->id = WLAN_EID_GENERIC;
-		ie->val.oui[0] = 0x00;
-		ie->val.oui[1] = 0x50;
-		ie->val.oui[2] = 0x43;
-		ie->val.type = MARVELL_MESH_IE_TYPE;
-		ie->val.subtype = MARVELL_MESH_IE_SUBTYPE;
-		ie->val.version = MARVELL_MESH_IE_VERSION;
-		ie->val.active_protocol_id = MARVELL_MESH_PROTO_ID_HWMP;
-		ie->val.active_metric_id = MARVELL_MESH_METRIC_ID;
-		ie->val.mesh_capability = MARVELL_MESH_CAPABILITY;
-		ie->val.mesh_id_len = priv->mesh_ssid_len;
-		memcpy(ie->val.mesh_id, priv->mesh_ssid, priv->mesh_ssid_len);
-		ie->len = sizeof(struct mrvl_meshie_val) -
-			IW_ESSID_MAX_SIZE + priv->mesh_ssid_len;
-		cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie_val));
-		break;
-	case CMD_ACT_MESH_CONFIG_STOP:
-		break;
-	default:
-		return -1;
-	}
-	lbs_deb_cmd("mesh config action %d type %x channel %d SSID %s\n",
-		    action, priv->mesh_tlv, chan,
-		    print_ssid(ssid, priv->mesh_ssid, priv->mesh_ssid_len));
-
-	return __lbs_mesh_config_send(priv, &cmd, action, priv->mesh_tlv);
-}
-
-static int lbs_cmd_bcn_ctrl(struct lbs_private * priv,
-				struct cmd_ds_command *cmd,
-				u16 cmd_action)
-{
-	struct cmd_ds_802_11_beacon_control
-		*bcn_ctrl = &cmd->params.bcn_ctrl;
-
-	lbs_deb_enter(LBS_DEB_CMD);
-	cmd->size =
-	    cpu_to_le16(sizeof(struct cmd_ds_802_11_beacon_control)
-			     + S_DS_GEN);
-	cmd->command = cpu_to_le16(CMD_802_11_BEACON_CTRL);
-
-	bcn_ctrl->action = cpu_to_le16(cmd_action);
-	bcn_ctrl->beacon_enable = cpu_to_le16(priv->beacon_enable);
-	bcn_ctrl->beacon_period = cpu_to_le16(priv->beacon_period);
-
-	lbs_deb_leave(LBS_DEB_CMD);
-	return 0;
-}
-
 static void lbs_queue_cmd(struct lbs_private *priv,
 			  struct cmd_ctrl_node *cmdnode)
 {
@@ -1243,8 +779,17 @@ static void lbs_submit_command(struct lbs_private *priv,
 		timeo = HZ/4;
 	}
 
-	/* Setup the timer after transmit command */
-	mod_timer(&priv->command_timer, jiffies + timeo);
+	if (command == CMD_802_11_DEEP_SLEEP) {
+		if (priv->is_auto_deep_sleep_enabled) {
+			priv->wakeup_dev_required = 1;
+			priv->dnld_sent = 0;
+		}
+		priv->is_deep_sleep = 1;
+		lbs_complete_command(priv, cmdnode, 0);
+	} else {
+		/* Setup the timer after transmit command */
+		mod_timer(&priv->command_timer, jiffies + timeo);
+	}
 
 	lbs_deb_leave(LBS_DEB_HOST);
 }
@@ -1391,6 +936,11 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
 		goto done;
 	}
 
+	if (!lbs_is_cmd_allowed(priv)) {
+		ret = -EBUSY;
+		goto done;
+	}
+
 	cmdnode = lbs_get_cmd_ctrl_node(priv);
 
 	if (cmdnode == NULL) {
@@ -1441,7 +991,7 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
 
 		cmdptr->command = cpu_to_le16(cmd_no);
 		cmdptr->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_afc) +
-					   S_DS_GEN);
+					   sizeof(struct cmd_header));
 
 		memmove(&cmdptr->params.afc,
 			pdata_buf, sizeof(struct cmd_ds_802_11_afc));
@@ -1449,45 +999,17 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
 		ret = 0;
 		goto done;
 
-	case CMD_802_11D_DOMAIN_INFO:
-		ret = lbs_cmd_802_11d_domain_info(priv, cmdptr,
-						   cmd_no, cmd_action);
-		break;
-
 	case CMD_802_11_TPC_CFG:
 		cmdptr->command = cpu_to_le16(CMD_802_11_TPC_CFG);
 		cmdptr->size =
 		    cpu_to_le16(sizeof(struct cmd_ds_802_11_tpc_cfg) +
-				     S_DS_GEN);
+				     sizeof(struct cmd_header));
 
 		memmove(&cmdptr->params.tpccfg,
 			pdata_buf, sizeof(struct cmd_ds_802_11_tpc_cfg));
 
 		ret = 0;
 		break;
-	case CMD_802_11_LED_GPIO_CTRL:
-		{
-			struct mrvl_ie_ledgpio *gpio =
-			    (struct mrvl_ie_ledgpio*)
-			    cmdptr->params.ledgpio.data;
-
-			memmove(&cmdptr->params.ledgpio,
-				pdata_buf,
-				sizeof(struct cmd_ds_802_11_led_ctrl));
-
-			cmdptr->command =
-			    cpu_to_le16(CMD_802_11_LED_GPIO_CTRL);
-
-#define ACTION_NUMLED_TLVTYPE_LEN_FIELDS_LEN 8
-			cmdptr->size =
-			    cpu_to_le16(le16_to_cpu(gpio->header.len)
-				+ S_DS_GEN
-				+ ACTION_NUMLED_TLVTYPE_LEN_FIELDS_LEN);
-			gpio->header.len = gpio->header.len;
-
-			ret = 0;
-			break;
-		}
 
 	case CMD_BT_ACCESS:
 		ret = lbs_cmd_bt_access(cmdptr, cmd_action, pdata_buf);
@@ -1497,15 +1019,13 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
 		ret = lbs_cmd_fwt_access(cmdptr, cmd_action, pdata_buf);
 		break;
 
-	case CMD_GET_TSF:
-		cmdptr->command = cpu_to_le16(CMD_GET_TSF);
-		cmdptr->size = cpu_to_le16(sizeof(struct cmd_ds_get_tsf) +
-					   S_DS_GEN);
-		ret = 0;
-		break;
 	case CMD_802_11_BEACON_CTRL:
 		ret = lbs_cmd_bcn_ctrl(priv, cmdptr, cmd_action);
 		break;
+	case CMD_802_11_DEEP_SLEEP:
+		cmdptr->command = cpu_to_le16(CMD_802_11_DEEP_SLEEP);
+		cmdptr->size = cpu_to_le16(sizeof(struct cmd_header));
+		break;
 	default:
 		lbs_pr_err("PREP_CMD: unknown command 0x%04x\n", cmd_no);
 		ret = -1;
@@ -1823,30 +1343,6 @@ done:
 	return ret;
 }
 
-void lbs_send_iwevcustom_event(struct lbs_private *priv, s8 *str)
-{
-	union iwreq_data iwrq;
-	u8 buf[50];
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	memset(&iwrq, 0, sizeof(union iwreq_data));
-	memset(buf, 0, sizeof(buf));
-
-	snprintf(buf, sizeof(buf) - 1, "%s", str);
-
-	iwrq.data.length = strlen(buf) + 1 + IW_EV_LCP_LEN;
-
-	/* Send Event to upper layer */
-	lbs_deb_wext("event indication string %s\n", (char *)buf);
-	lbs_deb_wext("event indication length %d\n", iwrq.data.length);
-	lbs_deb_wext("sending wireless event IWEVCUSTOM for %s\n", str);
-
-	wireless_send_event(priv->dev, IWEVCUSTOM, &iwrq, buf);
-
-	lbs_deb_leave(LBS_DEB_WEXT);
-}
-
 static void lbs_send_confirmsleep(struct lbs_private *priv)
 {
 	unsigned long flags;
@@ -2024,7 +1520,7 @@ int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
 }
 
 
-static struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
+struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
 	uint16_t command, struct cmd_header *in_cmd, int in_cmd_size,
 	int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
 	unsigned long callback_arg)
@@ -2039,6 +1535,11 @@ static struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
 		goto done;
 	}
 
+	if (!lbs_is_cmd_allowed(priv)) {
+		cmdnode = ERR_PTR(-EBUSY);
+		goto done;
+	}
+
 	cmdnode = lbs_get_cmd_ctrl_node(priv);
 	if (cmdnode == NULL) {
 		lbs_deb_host("PREP_CMD: cmdnode is NULL\n");
@@ -2117,5 +1618,3 @@ done:
 	return ret;
 }
 EXPORT_SYMBOL_GPL(__lbs_cmd);
-
-
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index 392e578ca095..2862748aef70 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -3,11 +3,30 @@
 #ifndef _LBS_CMD_H_
 #define _LBS_CMD_H_
 
-#include "hostcmd.h"
+#include "host.h"
 #include "dev.h"
 
+
+/* Command & response transfer between host and card */
+
+struct cmd_ctrl_node {
+	struct list_head list;
+	int result;
+	/* command response */
+	int (*callback)(struct lbs_private *,
+			unsigned long,
+			struct cmd_header *);
+	unsigned long callback_arg;
+	/* command data */
+	struct cmd_header *cmdbuf;
+	/* wait queue */
+	u16 cmdwaitqwoken;
+	wait_queue_head_t cmdwait_q;
+};
+
+
 /* lbs_cmd() infers the size of the buffer to copy data back into, from
-   the size of the target of the pointer. Since the command to be sent 
+   the size of the target of the pointer. Since the command to be sent
    may often be smaller, that size is set in cmd->size by the caller.*/
 #define lbs_cmd(priv, cmdnr, cmd, cb, cb_arg)	({		\
 	uint16_t __sz = le16_to_cpu((cmd)->hdr.size);		\
@@ -18,6 +37,11 @@
 #define lbs_cmd_with_response(priv, cmdnr, cmd)	\
 	lbs_cmd(priv, cmdnr, cmd, lbs_cmd_copyback, (unsigned long) (cmd))
 
+int lbs_prepare_and_send_command(struct lbs_private *priv,
+	u16 cmd_no,
+	u16 cmd_action,
+	u16 wait_option, u32 cmd_oid, void *pdata_buf);
+
 void lbs_cmd_async(struct lbs_private *priv, uint16_t command,
 	struct cmd_header *in_cmd, int in_cmd_size);
 
@@ -26,62 +50,93 @@ int __lbs_cmd(struct lbs_private *priv, uint16_t command,
 	      int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
 	      unsigned long callback_arg);
 
-int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
-		int8_t p1, int8_t p2);
+struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
+	uint16_t command, struct cmd_header *in_cmd, int in_cmd_size,
+	int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
+	unsigned long callback_arg);
 
-int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1,
-		int8_t p2, int usesnr);
+int lbs_cmd_copyback(struct lbs_private *priv, unsigned long extra,
+		     struct cmd_header *resp);
 
-int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
-		int8_t p1, int8_t p2);
+int lbs_allocate_cmd_buffer(struct lbs_private *priv);
+int lbs_free_cmd_buffer(struct lbs_private *priv);
 
-int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1,
-		int8_t p2, int usesnr);
+int lbs_execute_next_command(struct lbs_private *priv);
+void lbs_complete_command(struct lbs_private *priv, struct cmd_ctrl_node *cmd,
+			  int result);
+int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len);
 
-int lbs_cmd_copyback(struct lbs_private *priv, unsigned long extra,
-		     struct cmd_header *resp);
 
-int lbs_update_hw_spec(struct lbs_private *priv);
+/* From cmdresp.c */
 
-int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
-		    struct cmd_ds_mesh_access *cmd);
+void lbs_mac_event_disconnected(struct lbs_private *priv);
 
-int lbs_set_data_rate(struct lbs_private *priv, u8 rate);
 
-int lbs_get_channel(struct lbs_private *priv);
+
+/* Events */
+
+int lbs_process_event(struct lbs_private *priv, u32 event);
+
+
+/* Actual commands */
+
+int lbs_update_hw_spec(struct lbs_private *priv);
+
 int lbs_set_channel(struct lbs_private *priv, u8 channel);
 
-int lbs_mesh_config_send(struct lbs_private *priv,
-			 struct cmd_ds_mesh_config *cmd,
-			 uint16_t action, uint16_t type);
-int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
+int lbs_update_channel(struct lbs_private *priv);
 
 int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria,
 		struct wol_config *p_wol_config);
-int lbs_suspend(struct lbs_private *priv);
-void lbs_resume(struct lbs_private *priv);
 
-int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
-				      uint16_t cmd_action);
-int lbs_cmd_802_11_inactivity_timeout(struct lbs_private *priv,
-				      uint16_t cmd_action, uint16_t *timeout);
 int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action,
 				struct sleep_params *sp);
-int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action,
-			   struct assoc_request *assoc);
-int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
-			      uint16_t *enable);
-int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
-				struct assoc_request *assoc);
 
-int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel,
-		     s16 *maxlevel);
-int lbs_set_tx_power(struct lbs_private *priv, s16 dbm);
+void lbs_ps_sleep(struct lbs_private *priv, int wait_option);
+
+void lbs_ps_wakeup(struct lbs_private *priv, int wait_option);
+
+void lbs_ps_confirm_sleep(struct lbs_private *priv);
 
 int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on);
 
+void lbs_set_mac_control(struct lbs_private *priv);
+
+int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel,
+		     s16 *maxlevel);
+
 int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val);
 
 int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val);
 
+
+/* Mesh related */
+
+int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
+		    struct cmd_ds_mesh_access *cmd);
+
+int lbs_mesh_config_send(struct lbs_private *priv,
+			 struct cmd_ds_mesh_config *cmd,
+			 uint16_t action, uint16_t type);
+
+int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
+
+
+/* Commands only used in wext.c, assoc. and scan.c */
+
+int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
+		int8_t p1, int8_t p2);
+
+int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1,
+		int8_t p2, int usesnr);
+
+int lbs_set_data_rate(struct lbs_private *priv, u8 rate);
+
+int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
+				      uint16_t cmd_action);
+
+int lbs_set_tx_power(struct lbs_private *priv, s16 dbm);
+
+int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep);
+
 #endif /* _LBS_CMD_H */
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 23f684337fdd..21d57690c20a 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -11,6 +11,7 @@
 
 #include "host.h"
 #include "decl.h"
+#include "cmd.h"
 #include "defs.h"
 #include "dev.h"
 #include "assoc.h"
@@ -26,23 +27,17 @@
  */
 void lbs_mac_event_disconnected(struct lbs_private *priv)
 {
-	union iwreq_data wrqu;
-
 	if (priv->connect_status != LBS_CONNECTED)
 		return;
 
 	lbs_deb_enter(LBS_DEB_ASSOC);
 
-	memset(wrqu.ap_addr.sa_data, 0x00, ETH_ALEN);
-	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
-
 	/*
 	 * Cisco AP sends EAP failure and de-auth in less than 0.5 ms.
 	 * It causes problem in the Supplicant
 	 */
-
 	msleep_interruptible(1000);
-	wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
+	lbs_send_disconnect_notification(priv);
 
 	/* report disconnect to upper layer */
 	netif_stop_queue(priv->dev);
@@ -67,7 +62,7 @@ void lbs_mac_event_disconnected(struct lbs_private *priv)
 	 * no longer valid.
 	 */
 	memset(&priv->curbssparams.bssid, 0, ETH_ALEN);
-	memset(&priv->curbssparams.ssid, 0, IW_ESSID_MAX_SIZE);
+	memset(&priv->curbssparams.ssid, 0, IEEE80211_MAX_SSID_LEN);
 	priv->curbssparams.ssid_len = 0;
 
 	if (priv->psstate != PS_STATE_FULL_POWER) {
@@ -78,32 +73,6 @@ void lbs_mac_event_disconnected(struct lbs_private *priv)
 	lbs_deb_leave(LBS_DEB_ASSOC);
 }
 
-/**
- *  @brief This function handles MIC failure event.
- *
- *  @param priv    A pointer to struct lbs_private structure
- *  @para  event   the event id
- *  @return 	   n/a
- */
-static void handle_mic_failureevent(struct lbs_private *priv, u32 event)
-{
-	char buf[50];
-
-	lbs_deb_enter(LBS_DEB_CMD);
-	memset(buf, 0, sizeof(buf));
-
-	sprintf(buf, "%s", "MLME-MICHAELMICFAILURE.indication ");
-
-	if (event == MACREG_INT_CODE_MIC_ERR_UNICAST) {
-		strcat(buf, "unicast ");
-	} else {
-		strcat(buf, "multicast ");
-	}
-
-	lbs_send_iwevcustom_event(priv, buf);
-	lbs_deb_leave(LBS_DEB_CMD);
-}
-
 static int lbs_ret_reg_access(struct lbs_private *priv,
 			       u16 type, struct cmd_ds_command *resp)
 {
@@ -147,53 +116,6 @@ static int lbs_ret_reg_access(struct lbs_private *priv,
 	return ret;
 }
 
-static int lbs_ret_802_11_rssi(struct lbs_private *priv,
-				struct cmd_ds_command *resp)
-{
-	struct cmd_ds_802_11_rssi_rsp *rssirsp = &resp->params.rssirsp;
-
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	/* store the non average value */
-	priv->SNR[TYPE_BEACON][TYPE_NOAVG] = get_unaligned_le16(&rssirsp->SNR);
-	priv->NF[TYPE_BEACON][TYPE_NOAVG] = get_unaligned_le16(&rssirsp->noisefloor);
-
-	priv->SNR[TYPE_BEACON][TYPE_AVG] = get_unaligned_le16(&rssirsp->avgSNR);
-	priv->NF[TYPE_BEACON][TYPE_AVG] = get_unaligned_le16(&rssirsp->avgnoisefloor);
-
-	priv->RSSI[TYPE_BEACON][TYPE_NOAVG] =
-	    CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_NOAVG],
-		     priv->NF[TYPE_BEACON][TYPE_NOAVG]);
-
-	priv->RSSI[TYPE_BEACON][TYPE_AVG] =
-	    CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_AVG] / AVG_SCALE,
-		     priv->NF[TYPE_BEACON][TYPE_AVG] / AVG_SCALE);
-
-	lbs_deb_cmd("RSSI: beacon %d, avg %d\n",
-	       priv->RSSI[TYPE_BEACON][TYPE_NOAVG],
-	       priv->RSSI[TYPE_BEACON][TYPE_AVG]);
-
-	lbs_deb_leave(LBS_DEB_CMD);
-	return 0;
-}
-
-static int lbs_ret_802_11_bcn_ctrl(struct lbs_private * priv,
-					struct cmd_ds_command *resp)
-{
-	struct cmd_ds_802_11_beacon_control *bcn_ctrl =
-	    &resp->params.bcn_ctrl;
-
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	if (bcn_ctrl->action == CMD_ACT_GET) {
-		priv->beacon_enable = (u8) le16_to_cpu(bcn_ctrl->beacon_enable);
-		priv->beacon_period = le16_to_cpu(bcn_ctrl->beacon_period);
-	}
-
-	lbs_deb_enter(LBS_DEB_CMD);
-	return 0;
-}
-
 static inline int handle_cmd_response(struct lbs_private *priv,
 				      struct cmd_header *cmd_response)
 {
@@ -227,29 +149,13 @@ static inline int handle_cmd_response(struct lbs_private *priv,
 		ret = lbs_ret_802_11_rssi(priv, resp);
 		break;
 
-	case CMD_RET(CMD_802_11D_DOMAIN_INFO):
-		ret = lbs_ret_802_11d_domain_info(resp);
-		break;
-
 	case CMD_RET(CMD_802_11_TPC_CFG):
 		spin_lock_irqsave(&priv->driver_lock, flags);
 		memmove((void *)priv->cur_cmd->callback_arg, &resp->params.tpccfg,
 			sizeof(struct cmd_ds_802_11_tpc_cfg));
 		spin_unlock_irqrestore(&priv->driver_lock, flags);
 		break;
-	case CMD_RET(CMD_802_11_LED_GPIO_CTRL):
-		spin_lock_irqsave(&priv->driver_lock, flags);
-		memmove((void *)priv->cur_cmd->callback_arg, &resp->params.ledgpio,
-			sizeof(struct cmd_ds_802_11_led_ctrl));
-		spin_unlock_irqrestore(&priv->driver_lock, flags);
-		break;
 
-	case CMD_RET(CMD_GET_TSF):
-		spin_lock_irqsave(&priv->driver_lock, flags);
-		memcpy((void *)priv->cur_cmd->callback_arg,
-		       &resp->params.gettsf.tsfvalue, sizeof(u64));
-		spin_unlock_irqrestore(&priv->driver_lock, flags);
-		break;
 	case CMD_RET(CMD_BT_ACCESS):
 		spin_lock_irqsave(&priv->driver_lock, flags);
 		if (priv->cur_cmd->callback_arg)
@@ -505,9 +411,21 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
 
 	case MACREG_INT_CODE_HOST_AWAKE:
 		lbs_deb_cmd("EVENT: host awake\n");
+		if (priv->reset_deep_sleep_wakeup)
+			priv->reset_deep_sleep_wakeup(priv);
+		priv->is_deep_sleep = 0;
 		lbs_send_confirmwake(priv);
 		break;
 
+	case MACREG_INT_CODE_DEEP_SLEEP_AWAKE:
+		if (priv->reset_deep_sleep_wakeup)
+			priv->reset_deep_sleep_wakeup(priv);
+		lbs_deb_cmd("EVENT: ds awake\n");
+		priv->is_deep_sleep = 0;
+		priv->wakeup_dev_required = 0;
+		wake_up_interruptible(&priv->ds_awake_q);
+		break;
+
 	case MACREG_INT_CODE_PS_AWAKE:
 		lbs_deb_cmd("EVENT: ps awake\n");
 		/* handle unexpected PS AWAKE event */
@@ -533,12 +451,12 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
 
 	case MACREG_INT_CODE_MIC_ERR_UNICAST:
 		lbs_deb_cmd("EVENT: UNICAST MIC ERROR\n");
-		handle_mic_failureevent(priv, MACREG_INT_CODE_MIC_ERR_UNICAST);
+		lbs_send_mic_failureevent(priv, event);
 		break;
 
 	case MACREG_INT_CODE_MIC_ERR_MULTICAST:
 		lbs_deb_cmd("EVENT: MULTICAST MIC ERROR\n");
-		handle_mic_failureevent(priv, MACREG_INT_CODE_MIC_ERR_MULTICAST);
+		lbs_send_mic_failureevent(priv, event);
 		break;
 
 	case MACREG_INT_CODE_MIB_CHANGED:
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index 893a55ca344a..587b0cb0088d 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -451,10 +451,12 @@ static ssize_t lbs_rdmac_read(struct file *file, char __user *userbuf,
 				CMD_MAC_REG_ACCESS, 0,
 				CMD_OPTION_WAITFORRSP, 0, &offval);
 	mdelay(10);
-	pos += snprintf(buf+pos, len-pos, "MAC[0x%x] = 0x%08x\n",
+	if (!ret) {
+		pos += snprintf(buf+pos, len-pos, "MAC[0x%x] = 0x%08x\n",
 				priv->mac_offset, priv->offsetvalue.value);
 
-	ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
+		ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
+	}
 	free_page(addr);
 	return ret;
 }
@@ -514,7 +516,8 @@ static ssize_t lbs_wrmac_write(struct file *file,
 				CMD_OPTION_WAITFORRSP, 0, &offval);
 	mdelay(10);
 
-	res = count;
+	if (!res)
+		res = count;
 out_unlock:
 	free_page(addr);
 	return res;
@@ -539,10 +542,12 @@ static ssize_t lbs_rdbbp_read(struct file *file, char __user *userbuf,
 				CMD_BBP_REG_ACCESS, 0,
 				CMD_OPTION_WAITFORRSP, 0, &offval);
 	mdelay(10);
-	pos += snprintf(buf+pos, len-pos, "BBP[0x%x] = 0x%08x\n",
+	if (!ret) {
+		pos += snprintf(buf+pos, len-pos, "BBP[0x%x] = 0x%08x\n",
 				priv->bbp_offset, priv->offsetvalue.value);
 
-	ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
+		ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
+	}
 	free_page(addr);
 
 	return ret;
@@ -603,7 +608,8 @@ static ssize_t lbs_wrbbp_write(struct file *file,
 				CMD_OPTION_WAITFORRSP, 0, &offval);
 	mdelay(10);
 
-	res = count;
+	if (!res)
+		res = count;
 out_unlock:
 	free_page(addr);
 	return res;
@@ -628,10 +634,12 @@ static ssize_t lbs_rdrf_read(struct file *file, char __user *userbuf,
 				CMD_RF_REG_ACCESS, 0,
 				CMD_OPTION_WAITFORRSP, 0, &offval);
 	mdelay(10);
-	pos += snprintf(buf+pos, len-pos, "RF[0x%x] = 0x%08x\n",
+	if (!ret) {
+		pos += snprintf(buf+pos, len-pos, "RF[0x%x] = 0x%08x\n",
 				priv->rf_offset, priv->offsetvalue.value);
 
-	ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
+		ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
+	}
 	free_page(addr);
 
 	return ret;
@@ -692,7 +700,8 @@ static ssize_t lbs_wrrf_write(struct file *file,
 				CMD_OPTION_WAITFORRSP, 0, &offval);
 	mdelay(10);
 
-	res = count;
+	if (!res)
+		res = count;
 out_unlock:
 	free_page(addr);
 	return res;
diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h
index 8b15380ae6e1..709ffcad22ad 100644
--- a/drivers/net/wireless/libertas/decl.h
+++ b/drivers/net/wireless/libertas/decl.h
@@ -8,71 +8,46 @@
 
 #include <linux/netdevice.h>
 
-#include "defs.h"
 
-/** Function Prototype Declaration */
 struct lbs_private;
 struct sk_buff;
 struct net_device;
-struct cmd_ctrl_node;
-struct cmd_ds_command;
 
-void lbs_set_mac_control(struct lbs_private *priv);
 
-void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count);
-
-int lbs_free_cmd_buffer(struct lbs_private *priv);
-
-int lbs_prepare_and_send_command(struct lbs_private *priv,
-	u16 cmd_no,
-	u16 cmd_action,
-	u16 wait_option, u32 cmd_oid, void *pdata_buf);
+/* ethtool.c */
+extern const struct ethtool_ops lbs_ethtool_ops;
 
-int lbs_allocate_cmd_buffer(struct lbs_private *priv);
-int lbs_execute_next_command(struct lbs_private *priv);
-int lbs_process_event(struct lbs_private *priv, u32 event);
-void lbs_queue_event(struct lbs_private *priv, u32 event);
-void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx);
 
-u32 lbs_fw_index_to_data_rate(u8 index);
-u8 lbs_data_rate_to_fw_index(u32 rate);
-
-/** The proc fs interface */
-int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len);
-void lbs_complete_command(struct lbs_private *priv, struct cmd_ctrl_node *cmd,
-			  int result);
+/* tx.c */
+void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count);
 netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb,
 				struct net_device *dev);
-int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band);
 
+/* rx.c */
 int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *);
 
-void lbs_ps_sleep(struct lbs_private *priv, int wait_option);
-void lbs_ps_confirm_sleep(struct lbs_private *priv);
-void lbs_ps_wakeup(struct lbs_private *priv, int wait_option);
-
-struct chan_freq_power *lbs_find_cfp_by_band_and_channel(
-	struct lbs_private *priv,
-	u8 band,
-	u16 channel);
-
-void lbs_mac_event_disconnected(struct lbs_private *priv);
-
-void lbs_send_iwevcustom_event(struct lbs_private *priv, s8 *str);
-
-/* persistcfg.c */
-void lbs_persist_config_init(struct net_device *net);
-void lbs_persist_config_remove(struct net_device *net);
 
 /* main.c */
-struct chan_freq_power *lbs_get_region_cfp_table(u8 region,
-	int *cfp_no);
 struct lbs_private *lbs_add_card(void *card, struct device *dmdev);
 void lbs_remove_card(struct lbs_private *priv);
 int lbs_start_card(struct lbs_private *priv);
 void lbs_stop_card(struct lbs_private *priv);
 void lbs_host_to_card_done(struct lbs_private *priv);
 
-int lbs_update_channel(struct lbs_private *priv);
+int lbs_set_mac_address(struct net_device *dev, void *addr);
+void lbs_set_multicast_list(struct net_device *dev);
+
+int lbs_suspend(struct lbs_private *priv);
+void lbs_resume(struct lbs_private *priv);
+
+void lbs_queue_event(struct lbs_private *priv, u32 event);
+void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx);
+
+int lbs_enter_auto_deep_sleep(struct lbs_private *priv);
+int lbs_exit_auto_deep_sleep(struct lbs_private *priv);
+
+u32 lbs_fw_index_to_data_rate(u8 index);
+u8 lbs_data_rate_to_fw_index(u32 rate);
+
 
 #endif
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index 72f3479a4d70..6b6ea9f7bf5b 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -42,6 +42,7 @@
 #define LBS_DEB_SDIO	0x00400000
 #define LBS_DEB_SYSFS	0x00800000
 #define LBS_DEB_SPI	0x01000000
+#define LBS_DEB_CFG80211 0x02000000
 
 extern unsigned int lbs_debug;
 
@@ -86,6 +87,7 @@ do { if ((lbs_debug & (grp)) == (grp)) \
 #define lbs_deb_sdio(fmt, args...)      LBS_DEB_LL(LBS_DEB_SDIO, " sdio", fmt, ##args)
 #define lbs_deb_sysfs(fmt, args...)     LBS_DEB_LL(LBS_DEB_SYSFS, " sysfs", fmt, ##args)
 #define lbs_deb_spi(fmt, args...)       LBS_DEB_LL(LBS_DEB_SPI, " spi", fmt, ##args)
+#define lbs_deb_cfg80211(fmt, args...)  LBS_DEB_LL(LBS_DEB_CFG80211, " cfg80211", fmt, ##args)
 
 #define lbs_pr_info(format, args...) \
 	printk(KERN_INFO DRV_NAME": " format, ## args)
@@ -320,7 +322,6 @@ static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, in
 extern const char lbs_driver_version[];
 extern u16 lbs_region_code_to_index[MRVDRV_MAX_REGION_CODE];
 
-extern u8 lbs_bg_rates[MAX_RATES];
 
 /** ENUM definition*/
 /** SNRNF_TYPE */
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index d3b69a4b4b5e..6a8d2b291d8c 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -6,75 +6,11 @@
 #ifndef _LBS_DEV_H_
 #define _LBS_DEV_H_
 
-#include <linux/netdevice.h>
-#include <linux/wireless.h>
-#include <linux/ethtool.h>
-#include <linux/debugfs.h>
+#include "mesh.h"
+#include "scan.h"
+#include "assoc.h"
 
-#include "defs.h"
-#include "hostcmd.h"
 
-extern const struct ethtool_ops lbs_ethtool_ops;
-
-#define	MAX_BSSID_PER_CHANNEL		16
-
-#define NR_TX_QUEUE			3
-
-/* For the extended Scan */
-#define MAX_EXTENDED_SCAN_BSSID_LIST    MAX_BSSID_PER_CHANNEL * \
-						MRVDRV_MAX_CHANNEL_SIZE + 1
-
-#define	MAX_REGION_CHANNEL_NUM	2
-
-/** Chan-freq-TxPower mapping table*/
-struct chan_freq_power {
-	/** channel Number		*/
-	u16 channel;
-	/** frequency of this channel	*/
-	u32 freq;
-	/** Max allowed Tx power level	*/
-	u16 maxtxpower;
-	/** TRUE:channel unsupported;  FLASE:supported*/
-	u8 unsupported;
-};
-
-/** region-band mapping table*/
-struct region_channel {
-	/** TRUE if this entry is valid		     */
-	u8 valid;
-	/** region code for US, Japan ...	     */
-	u8 region;
-	/** band B/G/A, used for BAND_CONFIG cmd	     */
-	u8 band;
-	/** Actual No. of elements in the array below */
-	u8 nrcfp;
-	/** chan-freq-txpower mapping table*/
-	struct chan_freq_power *CFP;
-};
-
-struct lbs_802_11_security {
-	u8 WPAenabled;
-	u8 WPA2enabled;
-	u8 wep_enabled;
-	u8 auth_mode;
-	u32 key_mgmt;
-};
-
-/** Current Basic Service Set State Structure */
-struct current_bss_params {
-	/** bssid */
-	u8 bssid[ETH_ALEN];
-	/** ssid */
-	u8 ssid[IW_ESSID_MAX_SIZE + 1];
-	u8 ssid_len;
-
-	/** band */
-	u8 band;
-	/** channel */
-	u8 channel;
-	/** zero-terminated array of supported data rates */
-	u8 rates[MAX_RATES + 1];
-};
 
 /** sleep_params */
 struct sleep_params {
@@ -86,109 +22,99 @@ struct sleep_params {
 	uint16_t sp_reserved;
 };
 
-/* Mesh statistics */
-struct lbs_mesh_stats {
-	u32	fwd_bcast_cnt;		/* Fwd: Broadcast counter */
-	u32	fwd_unicast_cnt;	/* Fwd: Unicast counter */
-	u32	fwd_drop_ttl;		/* Fwd: TTL zero */
-	u32	fwd_drop_rbt;		/* Fwd: Recently Broadcasted */
-	u32	fwd_drop_noroute; 	/* Fwd: No route to Destination */
-	u32	fwd_drop_nobuf;		/* Fwd: Run out of internal buffers */
-	u32	drop_blind;		/* Rx:  Dropped by blinding table */
-	u32	tx_failed_cnt;		/* Tx:  Failed transmissions */
-};
 
 /** Private structure for the MV device */
 struct lbs_private {
-	int mesh_open;
-	int mesh_fw_ver;
-	int infra_open;
-	int mesh_autostart_enabled;
 
-	char name[DEV_NAME_LEN];
-
-	void *card;
+	/* Basic networking */
 	struct net_device *dev;
+	u32 connect_status;
+	int infra_open;
+	struct work_struct mcast_work;
+	u32 nr_of_multicastmacaddr;
+	u8 multicastlist[MRVDRV_MAX_MULTICAST_LIST_SIZE][ETH_ALEN];
+
+	/* CFG80211 */
+	struct wireless_dev *wdev;
 
+	/* Mesh */
 	struct net_device *mesh_dev; /* Virtual device */
+	u32 mesh_connect_status;
+	struct lbs_mesh_stats mstats;
+	int mesh_open;
+	int mesh_fw_ver;
+	int mesh_autostart_enabled;
+	uint16_t mesh_tlv;
+	u8 mesh_ssid[IEEE80211_MAX_SSID_LEN + 1];
+	u8 mesh_ssid_len;
+	struct work_struct sync_channel;
+
+	/* Monitor mode */
 	struct net_device *rtap_net_dev;
+	u32 monitormode;
 
-	struct iw_statistics wstats;
-	struct lbs_mesh_stats mstats;
+	/* Debugfs */
 	struct dentry *debugfs_dir;
 	struct dentry *debugfs_debug;
 	struct dentry *debugfs_files[6];
-
 	struct dentry *events_dir;
 	struct dentry *debugfs_events_files[6];
-
 	struct dentry *regs_dir;
 	struct dentry *debugfs_regs_files[6];
 
+	/* Hardware debugging */
 	u32 mac_offset;
 	u32 bbp_offset;
 	u32 rf_offset;
+	struct lbs_offset_value offsetvalue;
 
-	/* Download sent:
-	   bit0 1/0=data_sent/data_tx_done,
-	   bit1 1/0=cmd_sent/cmd_tx_done,
-	   all other bits reserved 0 */
-	u8 dnld_sent;
-
-	/** thread to service interrupts */
-	struct task_struct *main_thread;
-	wait_queue_head_t waitq;
-	struct workqueue_struct *work_thread;
-
-	struct work_struct mcast_work;
+	/* Power management */
+	u16 psmode;
+	u32 psstate;
+	u8 needtowakeup;
 
-	/** Scanning */
-	struct delayed_work scan_work;
-	struct delayed_work assoc_work;
-	struct work_struct sync_channel;
-	/* remember which channel was scanned last, != 0 if currently scanning */
-	int scan_channel;
-	u8 scan_ssid[IW_ESSID_MAX_SIZE + 1];
-	u8 scan_ssid_len;
+	/* Deep sleep */
+	int is_deep_sleep;
+	int is_auto_deep_sleep_enabled;
+	int wakeup_dev_required;
+	int is_activity_detected;
+	int auto_deep_sleep_timeout; /* in ms */
+	wait_queue_head_t ds_awake_q;
+	struct timer_list auto_deepsleep_timer;
 
-	/** Hardware access */
+	/* Hardware access */
+	void *card;
+	u8 fw_ready;
+	u8 surpriseremoved;
 	int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb);
 	void (*reset_card) (struct lbs_private *priv);
+	int (*enter_deep_sleep) (struct lbs_private *priv);
+	int (*exit_deep_sleep) (struct lbs_private *priv);
+	int (*reset_deep_sleep_wakeup) (struct lbs_private *priv);
 
-	/* Wake On LAN */
-	uint32_t wol_criteria;
-	uint8_t wol_gpio;
-	uint8_t wol_gap;
-
-	/** Wlan adapter data structure*/
-	/** STATUS variables */
+	/* Adapter info (from EEPROM) */
 	u32 fwrelease;
 	u32 fwcapinfo;
+	u16 regioncode;
+	u8 current_addr[ETH_ALEN];
 
-	struct mutex lock;
-
-	/* TX packet ready to be sent... */
-	int tx_pending_len;		/* -1 while building packet */
-
-	u8 tx_pending_buf[LBS_UPLD_SIZE];
-	/* protected by hard_start_xmit serialization */
-
-	/** command-related variables */
+	/* Command download */
+	u8 dnld_sent;
+	/* bit0 1/0=data_sent/data_tx_done,
+	   bit1 1/0=cmd_sent/cmd_tx_done,
+	   all other bits reserved 0 */
 	u16 seqnum;
-
 	struct cmd_ctrl_node *cmd_array;
-	/** Current command */
 	struct cmd_ctrl_node *cur_cmd;
-	int cur_cmd_retcode;
-	/** command Queues */
-	/** Free command buffers */
-	struct list_head cmdfreeq;
-	/** Pending command buffers */
-	struct list_head cmdpendingq;
-
+	struct list_head cmdfreeq;    /* free command buffers */
+	struct list_head cmdpendingq; /* pending command buffers */
 	wait_queue_head_t cmd_pending;
+	struct timer_list command_timer;
+	int nr_retries;
+	int cmd_timed_out;
 
 	/* Command responses sent from the hardware to the driver */
+	int cur_cmd_retcode;
 	u8 resp_idx;
 	u8 resp_buf[2][LBS_UPLD_SIZE];
 	u32 resp_len[2];
@@ -196,95 +122,76 @@ struct lbs_private {
 	/* Events sent from hardware to driver */
 	struct kfifo *event_fifo;
 
-	/* nickname */
-	u8 nodename[16];
-
-	/** spin locks */
-	spinlock_t driver_lock;
-
-	/** Timers */
-	struct timer_list command_timer;
-	int nr_retries;
-	int cmd_timed_out;
-
-	/** current ssid/bssid related parameters*/
-	struct current_bss_params curbssparams;
-
-	uint16_t mesh_tlv;
-	u8 mesh_ssid[IW_ESSID_MAX_SIZE + 1];
-	u8 mesh_ssid_len;
-
-	/* IW_MODE_* */
-	u8 mode;
-
-	/* Scan results list */
-	struct list_head network_list;
-	struct list_head network_free_list;
-	struct bss_descriptor *networks;
-
-	u16 beacon_period;
-	u8 beacon_enable;
-	u8 adhoccreate;
-
-	/** capability Info used in Association, start, join */
-	u16 capability;
-
-	/** MAC address information */
-	u8 current_addr[ETH_ALEN];
-	u8 multicastlist[MRVDRV_MAX_MULTICAST_LIST_SIZE][ETH_ALEN];
-	u32 nr_of_multicastmacaddr;
+	/** thread to service interrupts */
+	struct task_struct *main_thread;
+	wait_queue_head_t waitq;
+	struct workqueue_struct *work_thread;
 
-	/** 802.11 statistics */
-//	struct cmd_DS_802_11_GET_STAT wlan802_11Stat;
+	/** Encryption stuff */
+	struct lbs_802_11_security secinfo;
+	struct enc_key wpa_mcast_key;
+	struct enc_key wpa_unicast_key;
+	u8 wpa_ie[MAX_WPA_IE_LEN];
+	u8 wpa_ie_len;
+	u16 wep_tx_keyidx;
+	struct enc_key wep_keys[4];
 
-	uint16_t enablehwauto;
-	uint16_t ratebitmap;
+	/* Wake On LAN */
+	uint32_t wol_criteria;
+	uint8_t wol_gpio;
+	uint8_t wol_gap;
 
+	/* Transmitting */
+	int tx_pending_len;		/* -1 while building packet */
+	u8 tx_pending_buf[LBS_UPLD_SIZE];
+	/* protected by hard_start_xmit serialization */
 	u8 txretrycount;
-
-	/** Tx-related variables (for single packet tx) */
 	struct sk_buff *currenttxskb;
 
-	/** NIC Operation characteristics */
+	/* Locks */
+	struct mutex lock;
+	spinlock_t driver_lock;
+
+	/* NIC/link operation characteristics */
 	u16 mac_control;
-	u32 connect_status;
-	u32 mesh_connect_status;
-	u16 regioncode;
+	u8 radio_on;
+	u8 channel;
 	s16 txpower_cur;
 	s16 txpower_min;
 	s16 txpower_max;
 
-	/** POWER MANAGEMENT AND PnP SUPPORT */
-	u8 surpriseremoved;
-
-	u16 psmode;		/* Wlan802_11PowermodeCAM=disable
-				   Wlan802_11PowermodeMAX_PSP=enable */
-	u32 psstate;
-	u8 needtowakeup;
+	/** Scanning */
+	struct delayed_work scan_work;
+	int scan_channel;
+	/* remember which channel was scanned last, != 0 if currently scanning */
+	u8 scan_ssid[IEEE80211_MAX_SSID_LEN + 1];
+	u8 scan_ssid_len;
 
+	/* Associating */
+	struct delayed_work assoc_work;
+	struct current_bss_params curbssparams;
+	u8 mode;
+	struct list_head network_list;
+	struct list_head network_free_list;
+	struct bss_descriptor *networks;
 	struct assoc_request * pending_assoc_req;
 	struct assoc_request * in_progress_assoc_req;
+	u16 capability;
+	uint16_t enablehwauto;
+	uint16_t ratebitmap;
 
-	/** Encryption parameter */
-	struct lbs_802_11_security secinfo;
-
-	/** WEP keys */
-	struct enc_key wep_keys[4];
-	u16 wep_tx_keyidx;
-
-	/** WPA keys */
-	struct enc_key wpa_mcast_key;
-	struct enc_key wpa_unicast_key;
-
-/*
- * In theory, the IE is limited to the IE length, 255,
- * but in practice 64 bytes are enough.
- */
-#define MAX_WPA_IE_LEN 64
+	/* ADHOC */
+	u16 beacon_period;
+	u8 beacon_enable;
+	u8 adhoccreate;
 
-	/** WPA Information Elements*/
-	u8 wpa_ie[MAX_WPA_IE_LEN];
-	u8 wpa_ie_len;
+	/* WEXT */
+	char name[DEV_NAME_LEN];
+	u8 nodename[16];
+	struct iw_statistics wstats;
+	u8 cur_rate;
+#define	MAX_REGION_CHANNEL_NUM	2
+	struct region_channel region_channel[MAX_REGION_CHANNEL_NUM];
 
 	/** Requested Signal Strength*/
 	u16 SNR[MAX_TYPE_B][MAX_TYPE_AVG];
@@ -294,116 +201,8 @@ struct lbs_private {
 	u8 rawNF[DEFAULT_DATA_AVG_FACTOR];
 	u16 nextSNRNF;
 	u16 numSNRNF;
-
-	u8 radio_on;
-
-	/** data rate stuff */
-	u8 cur_rate;
-
-	/** RF calibration data */
-
-#define	MAX_REGION_CHANNEL_NUM	2
-	/** region channel data */
-	struct region_channel region_channel[MAX_REGION_CHANNEL_NUM];
-
-	struct region_channel universal_channel[MAX_REGION_CHANNEL_NUM];
-
-	/** 11D and Domain Regulatory Data */
-	struct lbs_802_11d_domain_reg domainreg;
-	struct parsed_region_chan_11d parsed_region_chan;
-
-	/** FSM variable for 11d support */
-	u32 enable11d;
-
-	/**	MISCELLANEOUS */
-	struct lbs_offset_value offsetvalue;
-
-	u32 monitormode;
-	u8 fw_ready;
 };
 
 extern struct cmd_confirm_sleep confirm_sleep;
 
-/**
- *  @brief Structure used to store information for each beacon/probe response
- */
-struct bss_descriptor {
-	u8 bssid[ETH_ALEN];
-
-	u8 ssid[IW_ESSID_MAX_SIZE + 1];
-	u8 ssid_len;
-
-	u16 capability;
-	u32 rssi;
-	u32 channel;
-	u16 beaconperiod;
-	__le16 atimwindow;
-
-	/* IW_MODE_AUTO, IW_MODE_ADHOC, IW_MODE_INFRA */
-	u8 mode;
-
-	/* zero-terminated array of supported data rates */
-	u8 rates[MAX_RATES + 1];
-
-	unsigned long last_scanned;
-
-	union ieee_phy_param_set phy;
-	union ieee_ss_param_set ss;
-
-	struct ieee_ie_country_info_full_set countryinfo;
-
-	u8 wpa_ie[MAX_WPA_IE_LEN];
-	size_t wpa_ie_len;
-	u8 rsn_ie[MAX_WPA_IE_LEN];
-	size_t rsn_ie_len;
-
-	u8 mesh;
-
-	struct list_head list;
-};
-
-/** Association request
- *
- * Encapsulates all the options that describe a specific assocation request
- * or configuration of the wireless card's radio, mode, and security settings.
- */
-struct assoc_request {
-#define ASSOC_FLAG_SSID			1
-#define ASSOC_FLAG_CHANNEL		2
-#define ASSOC_FLAG_BAND			3
-#define ASSOC_FLAG_MODE			4
-#define ASSOC_FLAG_BSSID		5
-#define ASSOC_FLAG_WEP_KEYS		6
-#define ASSOC_FLAG_WEP_TX_KEYIDX	7
-#define ASSOC_FLAG_WPA_MCAST_KEY	8
-#define ASSOC_FLAG_WPA_UCAST_KEY	9
-#define ASSOC_FLAG_SECINFO		10
-#define ASSOC_FLAG_WPA_IE		11
-	unsigned long flags;
-
-	u8 ssid[IW_ESSID_MAX_SIZE + 1];
-	u8 ssid_len;
-	u8 channel;
-	u8 band;
-	u8 mode;
-	u8 bssid[ETH_ALEN] __attribute__ ((aligned (2)));
-
-	/** WEP keys */
-	struct enc_key wep_keys[4];
-	u16 wep_tx_keyidx;
-
-	/** WPA keys */
-	struct enc_key wpa_mcast_key;
-	struct enc_key wpa_unicast_key;
-
-	struct lbs_802_11_security secinfo;
-
-	/** WPA Information Elements*/
-	u8 wpa_ie[MAX_WPA_IE_LEN];
-	u8 wpa_ie_len;
-
-	/* BSS to associate with for infrastructure of Ad-Hoc join */
-	struct bss_descriptor bss;
-};
-
 #endif
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c
index 53d56ab83c03..63d020374c2b 100644
--- a/drivers/net/wireless/libertas/ethtool.c
+++ b/drivers/net/wireless/libertas/ethtool.c
@@ -8,17 +8,8 @@
 #include "dev.h"
 #include "wext.h"
 #include "cmd.h"
+#include "mesh.h"
 
-static const char * mesh_stat_strings[]= {
-			"drop_duplicate_bcast",
-			"drop_ttl_zero",
-			"drop_no_fwd_route",
-			"drop_no_buffers",
-			"fwded_unicast_cnt",
-			"fwded_bcast_cnt",
-			"drop_blind_table",
-			"tx_failed_cnt"
-};
 
 static void lbs_ethtool_get_drvinfo(struct net_device *dev,
 					 struct ethtool_drvinfo *info)
@@ -73,73 +64,6 @@ out:
         return ret;
 }
 
-static void lbs_ethtool_get_stats(struct net_device *dev,
-				  struct ethtool_stats *stats, uint64_t *data)
-{
-	struct lbs_private *priv = dev->ml_priv;
-	struct cmd_ds_mesh_access mesh_access;
-	int ret;
-
-	lbs_deb_enter(LBS_DEB_ETHTOOL);
-
-	/* Get Mesh Statistics */
-	ret = lbs_mesh_access(priv, CMD_ACT_MESH_GET_STATS, &mesh_access);
-
-	if (ret) {
-		memset(data, 0, MESH_STATS_NUM*(sizeof(uint64_t)));
-		return;
-	}
-
-	priv->mstats.fwd_drop_rbt = le32_to_cpu(mesh_access.data[0]);
-	priv->mstats.fwd_drop_ttl = le32_to_cpu(mesh_access.data[1]);
-	priv->mstats.fwd_drop_noroute = le32_to_cpu(mesh_access.data[2]);
-	priv->mstats.fwd_drop_nobuf = le32_to_cpu(mesh_access.data[3]);
-	priv->mstats.fwd_unicast_cnt = le32_to_cpu(mesh_access.data[4]);
-	priv->mstats.fwd_bcast_cnt = le32_to_cpu(mesh_access.data[5]);
-	priv->mstats.drop_blind = le32_to_cpu(mesh_access.data[6]);
-	priv->mstats.tx_failed_cnt = le32_to_cpu(mesh_access.data[7]);
-
-	data[0] = priv->mstats.fwd_drop_rbt;
-	data[1] = priv->mstats.fwd_drop_ttl;
-	data[2] = priv->mstats.fwd_drop_noroute;
-	data[3] = priv->mstats.fwd_drop_nobuf;
-	data[4] = priv->mstats.fwd_unicast_cnt;
-	data[5] = priv->mstats.fwd_bcast_cnt;
-	data[6] = priv->mstats.drop_blind;
-	data[7] = priv->mstats.tx_failed_cnt;
-
-	lbs_deb_enter(LBS_DEB_ETHTOOL);
-}
-
-static int lbs_ethtool_get_sset_count(struct net_device *dev, int sset)
-{
-	struct lbs_private *priv = dev->ml_priv;
-
-	if (sset == ETH_SS_STATS && dev == priv->mesh_dev)
-		return MESH_STATS_NUM;
-
-	return -EOPNOTSUPP;
-}
-
-static void lbs_ethtool_get_strings(struct net_device *dev,
-				    uint32_t stringset, uint8_t *s)
-{
-	int i;
-
-	lbs_deb_enter(LBS_DEB_ETHTOOL);
-
-	switch (stringset) {
-        case ETH_SS_STATS:
-		for (i=0; i < MESH_STATS_NUM; i++) {
-			memcpy(s + i * ETH_GSTRING_LEN,
-					mesh_stat_strings[i],
-					ETH_GSTRING_LEN);
-		}
-		break;
-        }
-	lbs_deb_enter(LBS_DEB_ETHTOOL);
-}
-
 static void lbs_ethtool_get_wol(struct net_device *dev,
 				struct ethtool_wolinfo *wol)
 {
@@ -190,9 +114,9 @@ const struct ethtool_ops lbs_ethtool_ops = {
 	.get_drvinfo = lbs_ethtool_get_drvinfo,
 	.get_eeprom =  lbs_ethtool_get_eeprom,
 	.get_eeprom_len = lbs_ethtool_get_eeprom_len,
-	.get_sset_count = lbs_ethtool_get_sset_count,
-	.get_ethtool_stats = lbs_ethtool_get_stats,
-	.get_strings = lbs_ethtool_get_strings,
+	.get_sset_count = lbs_mesh_ethtool_get_sset_count,
+	.get_ethtool_stats = lbs_mesh_ethtool_get_stats,
+	.get_strings = lbs_mesh_ethtool_get_strings,
 	.get_wol = lbs_ethtool_get_wol,
 	.set_wol = lbs_ethtool_set_wol,
 };
diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h
index fe8f0cb737bc..3809c0b49464 100644
--- a/drivers/net/wireless/libertas/host.h
+++ b/drivers/net/wireless/libertas/host.h
@@ -1,201 +1,190 @@
 /**
-  * This file contains definitions of WLAN commands.
+  * This file function prototypes, data structure
+  * and  definitions for all the host/station commands
   */
 
 #ifndef _LBS_HOST_H_
 #define _LBS_HOST_H_
 
-/** PUBLIC DEFINITIONS */
-#define DEFAULT_AD_HOC_CHANNEL			6
-#define	DEFAULT_AD_HOC_CHANNEL_A		36
+#include "types.h"
+#include "defs.h"
 
-#define CMD_OPTION_WAITFORRSP			0x0002
+#define DEFAULT_AD_HOC_CHANNEL                  6
+
+#define CMD_OPTION_WAITFORRSP                   0x0002
 
 /** Host command IDs */
 
 /* Return command are almost always the same as the host command, but with
  * bit 15 set high.  There are a few exceptions, though...
  */
-#define CMD_RET(cmd)			(0x8000 | cmd)
+#define CMD_RET(cmd)                            (0x8000 | cmd)
 
 /* Return command convention exceptions: */
-#define CMD_RET_802_11_ASSOCIATE		0x8012
+#define CMD_RET_802_11_ASSOCIATE                0x8012
 
 /* Command codes */
-#define CMD_GET_HW_SPEC				0x0003
-#define	CMD_EEPROM_UPDATE			0x0004
-#define CMD_802_11_RESET			0x0005
-#define	CMD_802_11_SCAN				0x0006
-#define CMD_802_11_GET_LOG			0x000b
-#define CMD_MAC_MULTICAST_ADR			0x0010
-#define CMD_802_11_AUTHENTICATE			0x0011
-#define CMD_802_11_EEPROM_ACCESS		0x0059
-#define CMD_802_11_ASSOCIATE			0x0050
-#define CMD_802_11_SET_WEP			0x0013
-#define CMD_802_11_GET_STAT			0x0014
-#define CMD_802_3_GET_STAT			0x0015
-#define CMD_802_11_SNMP_MIB			0x0016
-#define CMD_MAC_REG_MAP				0x0017
-#define CMD_BBP_REG_MAP				0x0018
-#define CMD_MAC_REG_ACCESS			0x0019
-#define CMD_BBP_REG_ACCESS			0x001a
-#define CMD_RF_REG_ACCESS			0x001b
-#define CMD_802_11_RADIO_CONTROL		0x001c
-#define CMD_802_11_RF_CHANNEL			0x001d
-#define CMD_802_11_RF_TX_POWER			0x001e
-#define CMD_802_11_RSSI				0x001f
-#define CMD_802_11_RF_ANTENNA			0x0020
-#define CMD_802_11_PS_MODE			0x0021
-#define CMD_802_11_DATA_RATE			0x0022
-#define CMD_RF_REG_MAP				0x0023
-#define CMD_802_11_DEAUTHENTICATE		0x0024
-#define CMD_802_11_REASSOCIATE			0x0025
-#define CMD_MAC_CONTROL				0x0028
-#define CMD_802_11_AD_HOC_START			0x002b
-#define CMD_802_11_AD_HOC_JOIN			0x002c
-#define CMD_802_11_QUERY_TKIP_REPLY_CNTRS	0x002e
-#define CMD_802_11_ENABLE_RSN			0x002f
-#define CMD_802_11_SET_AFC			0x003c
-#define CMD_802_11_GET_AFC			0x003d
-#define CMD_802_11_AD_HOC_STOP			0x0040
-#define CMD_802_11_HOST_SLEEP_CFG		0x0043
-#define CMD_802_11_WAKEUP_CONFIRM		0x0044
-#define CMD_802_11_HOST_SLEEP_ACTIVATE		0x0045
-#define CMD_802_11_BEACON_STOP			0x0049
-#define CMD_802_11_MAC_ADDRESS			0x004d
-#define CMD_802_11_LED_GPIO_CTRL		0x004e
-#define CMD_802_11_EEPROM_ACCESS		0x0059
-#define CMD_802_11_BAND_CONFIG			0x0058
-#define CMD_GSPI_BUS_CONFIG			0x005a
-#define CMD_802_11D_DOMAIN_INFO			0x005b
-#define CMD_802_11_KEY_MATERIAL			0x005e
-#define CMD_802_11_SLEEP_PARAMS			0x0066
-#define CMD_802_11_INACTIVITY_TIMEOUT		0x0067
-#define CMD_802_11_SLEEP_PERIOD			0x0068
-#define CMD_802_11_TPC_CFG			0x0072
-#define CMD_802_11_PA_CFG			0x0073
-#define CMD_802_11_FW_WAKE_METHOD		0x0074
-#define CMD_802_11_SUBSCRIBE_EVENT		0x0075
-#define CMD_802_11_RATE_ADAPT_RATESET		0x0076
-#define CMD_802_11_TX_RATE_QUERY		0x007f
-#define	CMD_GET_TSF				0x0080
-#define CMD_BT_ACCESS				0x0087
-#define CMD_FWT_ACCESS				0x0095
-#define CMD_802_11_MONITOR_MODE			0x0098
-#define CMD_MESH_ACCESS				0x009b
-#define CMD_MESH_CONFIG_OLD			0x00a3
-#define CMD_MESH_CONFIG				0x00ac
-#define	CMD_SET_BOOT2_VER			0x00a5
-#define	CMD_FUNC_INIT				0x00a9
-#define	CMD_FUNC_SHUTDOWN			0x00aa
-#define CMD_802_11_BEACON_CTRL			0x00b0
+#define CMD_GET_HW_SPEC                         0x0003
+#define CMD_EEPROM_UPDATE                       0x0004
+#define CMD_802_11_RESET                        0x0005
+#define CMD_802_11_SCAN                         0x0006
+#define CMD_802_11_GET_LOG                      0x000b
+#define CMD_MAC_MULTICAST_ADR                   0x0010
+#define CMD_802_11_AUTHENTICATE                 0x0011
+#define CMD_802_11_EEPROM_ACCESS                0x0059
+#define CMD_802_11_ASSOCIATE                    0x0050
+#define CMD_802_11_SET_WEP                      0x0013
+#define CMD_802_11_GET_STAT                     0x0014
+#define CMD_802_3_GET_STAT                      0x0015
+#define CMD_802_11_SNMP_MIB                     0x0016
+#define CMD_MAC_REG_MAP                         0x0017
+#define CMD_BBP_REG_MAP                         0x0018
+#define CMD_MAC_REG_ACCESS                      0x0019
+#define CMD_BBP_REG_ACCESS                      0x001a
+#define CMD_RF_REG_ACCESS                       0x001b
+#define CMD_802_11_RADIO_CONTROL                0x001c
+#define CMD_802_11_RF_CHANNEL                   0x001d
+#define CMD_802_11_RF_TX_POWER                  0x001e
+#define CMD_802_11_RSSI                         0x001f
+#define CMD_802_11_RF_ANTENNA                   0x0020
+#define CMD_802_11_PS_MODE                      0x0021
+#define CMD_802_11_DATA_RATE                    0x0022
+#define CMD_RF_REG_MAP                          0x0023
+#define CMD_802_11_DEAUTHENTICATE               0x0024
+#define CMD_802_11_REASSOCIATE                  0x0025
+#define CMD_MAC_CONTROL                         0x0028
+#define CMD_802_11_AD_HOC_START                 0x002b
+#define CMD_802_11_AD_HOC_JOIN                  0x002c
+#define CMD_802_11_QUERY_TKIP_REPLY_CNTRS       0x002e
+#define CMD_802_11_ENABLE_RSN                   0x002f
+#define CMD_802_11_SET_AFC                      0x003c
+#define CMD_802_11_GET_AFC                      0x003d
+#define CMD_802_11_DEEP_SLEEP                   0x003e
+#define CMD_802_11_AD_HOC_STOP                  0x0040
+#define CMD_802_11_HOST_SLEEP_CFG               0x0043
+#define CMD_802_11_WAKEUP_CONFIRM               0x0044
+#define CMD_802_11_HOST_SLEEP_ACTIVATE          0x0045
+#define CMD_802_11_BEACON_STOP                  0x0049
+#define CMD_802_11_MAC_ADDRESS                  0x004d
+#define CMD_802_11_LED_GPIO_CTRL                0x004e
+#define CMD_802_11_EEPROM_ACCESS                0x0059
+#define CMD_802_11_BAND_CONFIG                  0x0058
+#define CMD_GSPI_BUS_CONFIG                     0x005a
+#define CMD_802_11D_DOMAIN_INFO                 0x005b
+#define CMD_802_11_KEY_MATERIAL                 0x005e
+#define CMD_802_11_SLEEP_PARAMS                 0x0066
+#define CMD_802_11_INACTIVITY_TIMEOUT           0x0067
+#define CMD_802_11_SLEEP_PERIOD                 0x0068
+#define CMD_802_11_TPC_CFG                      0x0072
+#define CMD_802_11_PA_CFG                       0x0073
+#define CMD_802_11_FW_WAKE_METHOD               0x0074
+#define CMD_802_11_SUBSCRIBE_EVENT              0x0075
+#define CMD_802_11_RATE_ADAPT_RATESET           0x0076
+#define CMD_802_11_TX_RATE_QUERY                0x007f
+#define CMD_GET_TSF                             0x0080
+#define CMD_BT_ACCESS                           0x0087
+#define CMD_FWT_ACCESS                          0x0095
+#define CMD_802_11_MONITOR_MODE                 0x0098
+#define CMD_MESH_ACCESS                         0x009b
+#define CMD_MESH_CONFIG_OLD                     0x00a3
+#define CMD_MESH_CONFIG                         0x00ac
+#define CMD_SET_BOOT2_VER                       0x00a5
+#define CMD_FUNC_INIT                           0x00a9
+#define CMD_FUNC_SHUTDOWN                       0x00aa
+#define CMD_802_11_BEACON_CTRL                  0x00b0
 
 /* For the IEEE Power Save */
-#define CMD_SUBCMD_ENTER_PS		0x0030
-#define CMD_SUBCMD_EXIT_PS		0x0031
-#define CMD_SUBCMD_SLEEP_CONFIRMED	0x0034
-#define CMD_SUBCMD_FULL_POWERDOWN	0x0035
-#define CMD_SUBCMD_FULL_POWERUP		0x0036
+#define CMD_SUBCMD_ENTER_PS                     0x0030
+#define CMD_SUBCMD_EXIT_PS                      0x0031
+#define CMD_SUBCMD_SLEEP_CONFIRMED              0x0034
+#define CMD_SUBCMD_FULL_POWERDOWN               0x0035
+#define CMD_SUBCMD_FULL_POWERUP                 0x0036
 
-#define CMD_ENABLE_RSN			0x0001
-#define CMD_DISABLE_RSN			0x0000
+#define CMD_ENABLE_RSN                          0x0001
+#define CMD_DISABLE_RSN                         0x0000
 
-#define CMD_ACT_GET			0x0000
-#define CMD_ACT_SET			0x0001
-#define CMD_ACT_GET_AES			0x0002
-#define CMD_ACT_SET_AES			0x0003
-#define CMD_ACT_REMOVE_AES		0x0004
+#define CMD_ACT_GET                             0x0000
+#define CMD_ACT_SET                             0x0001
 
 /* Define action or option for CMD_802_11_SET_WEP */
-#define CMD_ACT_ADD			0x0002
-#define CMD_ACT_REMOVE			0x0004
-#define CMD_ACT_USE_DEFAULT		0x0008
-
-#define CMD_TYPE_WEP_40_BIT		0x01
-#define CMD_TYPE_WEP_104_BIT		0x02
+#define CMD_ACT_ADD                             0x0002
+#define CMD_ACT_REMOVE                          0x0004
 
-#define CMD_NUM_OF_WEP_KEYS		4
+#define CMD_TYPE_WEP_40_BIT                     0x01
+#define CMD_TYPE_WEP_104_BIT                    0x02
 
-#define CMD_WEP_KEY_INDEX_MASK		0x3fff
+#define CMD_NUM_OF_WEP_KEYS                     4
 
-/* Define action or option for CMD_802_11_RESET */
-#define CMD_ACT_HALT			0x0003
+#define CMD_WEP_KEY_INDEX_MASK                  0x3fff
 
 /* Define action or option for CMD_802_11_SCAN */
-#define CMD_BSS_TYPE_BSS		0x0001
-#define CMD_BSS_TYPE_IBSS		0x0002
-#define CMD_BSS_TYPE_ANY		0x0003
+#define CMD_BSS_TYPE_BSS                        0x0001
+#define CMD_BSS_TYPE_IBSS                       0x0002
+#define CMD_BSS_TYPE_ANY                        0x0003
 
 /* Define action or option for CMD_802_11_SCAN */
-#define CMD_SCAN_TYPE_ACTIVE		0x0000
-#define CMD_SCAN_TYPE_PASSIVE		0x0001
+#define CMD_SCAN_TYPE_ACTIVE                    0x0000
+#define CMD_SCAN_TYPE_PASSIVE                   0x0001
 
-#define CMD_SCAN_RADIO_TYPE_BG		0
+#define CMD_SCAN_RADIO_TYPE_BG                  0
 
-#define	CMD_SCAN_PROBE_DELAY_TIME	0
+#define CMD_SCAN_PROBE_DELAY_TIME               0
 
 /* Define action or option for CMD_MAC_CONTROL */
-#define CMD_ACT_MAC_RX_ON			0x0001
-#define CMD_ACT_MAC_TX_ON			0x0002
-#define CMD_ACT_MAC_LOOPBACK_ON			0x0004
-#define CMD_ACT_MAC_WEP_ENABLE			0x0008
-#define CMD_ACT_MAC_INT_ENABLE			0x0010
-#define CMD_ACT_MAC_MULTICAST_ENABLE		0x0020
-#define CMD_ACT_MAC_BROADCAST_ENABLE		0x0040
-#define CMD_ACT_MAC_PROMISCUOUS_ENABLE		0x0080
-#define CMD_ACT_MAC_ALL_MULTICAST_ENABLE	0x0100
-#define CMD_ACT_MAC_STRICT_PROTECTION_ENABLE	0x0400
+#define CMD_ACT_MAC_RX_ON                       0x0001
+#define CMD_ACT_MAC_TX_ON                       0x0002
+#define CMD_ACT_MAC_LOOPBACK_ON                 0x0004
+#define CMD_ACT_MAC_WEP_ENABLE                  0x0008
+#define CMD_ACT_MAC_INT_ENABLE                  0x0010
+#define CMD_ACT_MAC_MULTICAST_ENABLE            0x0020
+#define CMD_ACT_MAC_BROADCAST_ENABLE            0x0040
+#define CMD_ACT_MAC_PROMISCUOUS_ENABLE          0x0080
+#define CMD_ACT_MAC_ALL_MULTICAST_ENABLE        0x0100
+#define CMD_ACT_MAC_STRICT_PROTECTION_ENABLE    0x0400
 
 /* Event flags for CMD_802_11_SUBSCRIBE_EVENT */
-#define CMD_SUBSCRIBE_RSSI_LOW		0x0001
-#define CMD_SUBSCRIBE_SNR_LOW		0x0002
-#define CMD_SUBSCRIBE_FAILCOUNT		0x0004
-#define CMD_SUBSCRIBE_BCNMISS		0x0008
-#define CMD_SUBSCRIBE_RSSI_HIGH		0x0010
-#define CMD_SUBSCRIBE_SNR_HIGH		0x0020
+#define CMD_SUBSCRIBE_RSSI_LOW                  0x0001
+#define CMD_SUBSCRIBE_SNR_LOW                   0x0002
+#define CMD_SUBSCRIBE_FAILCOUNT                 0x0004
+#define CMD_SUBSCRIBE_BCNMISS                   0x0008
+#define CMD_SUBSCRIBE_RSSI_HIGH                 0x0010
+#define CMD_SUBSCRIBE_SNR_HIGH                  0x0020
 
-#define RADIO_PREAMBLE_LONG	0x00
-#define RADIO_PREAMBLE_SHORT	0x02
-#define RADIO_PREAMBLE_AUTO	0x04
+#define RADIO_PREAMBLE_LONG                     0x00
+#define RADIO_PREAMBLE_SHORT                    0x02
+#define RADIO_PREAMBLE_AUTO                     0x04
 
 /* Define action or option for CMD_802_11_RF_CHANNEL */
-#define CMD_OPT_802_11_RF_CHANNEL_GET	0x00
-#define CMD_OPT_802_11_RF_CHANNEL_SET	0x01
+#define CMD_OPT_802_11_RF_CHANNEL_GET           0x00
+#define CMD_OPT_802_11_RF_CHANNEL_SET           0x01
 
 /* Define action or option for CMD_802_11_DATA_RATE */
-#define CMD_ACT_SET_TX_AUTO		0x0000
-#define CMD_ACT_SET_TX_FIX_RATE		0x0001
-#define CMD_ACT_GET_TX_RATE		0x0002
-
-#define CMD_ACT_SET_RX			0x0001
-#define	CMD_ACT_SET_TX			0x0002
-#define CMD_ACT_SET_BOTH		0x0003
-#define	CMD_ACT_GET_RX			0x0004
-#define CMD_ACT_GET_TX			0x0008
-#define	CMD_ACT_GET_BOTH		0x000c
+#define CMD_ACT_SET_TX_AUTO                     0x0000
+#define CMD_ACT_SET_TX_FIX_RATE                 0x0001
+#define CMD_ACT_GET_TX_RATE                     0x0002
 
 /* Define action or option for CMD_802_11_PS_MODE */
-#define CMD_TYPE_CAM			0x0000
-#define	CMD_TYPE_MAX_PSP		0x0001
-#define CMD_TYPE_FAST_PSP		0x0002
+#define CMD_TYPE_CAM                            0x0000
+#define CMD_TYPE_MAX_PSP                        0x0001
+#define CMD_TYPE_FAST_PSP                       0x0002
 
 /* Options for CMD_802_11_FW_WAKE_METHOD */
-#define CMD_WAKE_METHOD_UNCHANGED	0x0000
-#define CMD_WAKE_METHOD_COMMAND_INT	0x0001
-#define CMD_WAKE_METHOD_GPIO		0x0002
+#define CMD_WAKE_METHOD_UNCHANGED               0x0000
+#define CMD_WAKE_METHOD_COMMAND_INT             0x0001
+#define CMD_WAKE_METHOD_GPIO                    0x0002
 
 /* Object IDs for CMD_802_11_SNMP_MIB */
-#define SNMP_MIB_OID_BSS_TYPE		0x0000
-#define SNMP_MIB_OID_OP_RATE_SET	0x0001
-#define SNMP_MIB_OID_BEACON_PERIOD	0x0002  /* Reserved on v9+ */
-#define SNMP_MIB_OID_DTIM_PERIOD	0x0003  /* Reserved on v9+ */
-#define SNMP_MIB_OID_ASSOC_TIMEOUT	0x0004  /* Reserved on v9+ */
-#define SNMP_MIB_OID_RTS_THRESHOLD	0x0005
-#define SNMP_MIB_OID_SHORT_RETRY_LIMIT	0x0006
-#define SNMP_MIB_OID_LONG_RETRY_LIMIT	0x0007
-#define SNMP_MIB_OID_FRAG_THRESHOLD	0x0008
-#define SNMP_MIB_OID_11D_ENABLE		0x0009
-#define SNMP_MIB_OID_11H_ENABLE		0x000A
+#define SNMP_MIB_OID_BSS_TYPE                   0x0000
+#define SNMP_MIB_OID_OP_RATE_SET                0x0001
+#define SNMP_MIB_OID_BEACON_PERIOD              0x0002  /* Reserved on v9+ */
+#define SNMP_MIB_OID_DTIM_PERIOD                0x0003  /* Reserved on v9+ */
+#define SNMP_MIB_OID_ASSOC_TIMEOUT              0x0004  /* Reserved on v9+ */
+#define SNMP_MIB_OID_RTS_THRESHOLD              0x0005
+#define SNMP_MIB_OID_SHORT_RETRY_LIMIT          0x0006
+#define SNMP_MIB_OID_LONG_RETRY_LIMIT           0x0007
+#define SNMP_MIB_OID_FRAG_THRESHOLD             0x0008
+#define SNMP_MIB_OID_11D_ENABLE                 0x0009
+#define SNMP_MIB_OID_11H_ENABLE                 0x000A
 
 /* Define action or option for CMD_BT_ACCESS */
 enum cmd_bt_access_opts {
@@ -302,4 +291,672 @@ enum cmd_mesh_config_types {
 #define MACREG_INT_CODE_MESH_AUTO_STARTED	35
 #define MACREG_INT_CODE_FIRMWARE_READY		48
 
+
+/* 802.11-related definitions */
+
+/* TxPD descriptor */
+struct txpd {
+	/* union to cope up with later FW revisions */
+	union {
+		/* Current Tx packet status */
+		__le32 tx_status;
+		struct {
+			/* BSS type: client, AP, etc. */
+			u8 bss_type;
+			/* BSS number */
+			u8 bss_num;
+			/* Reserved */
+			__le16 reserved;
+		} bss;
+	} u;
+	/* Tx control */
+	__le32 tx_control;
+	__le32 tx_packet_location;
+	/* Tx packet length */
+	__le16 tx_packet_length;
+	/* First 2 byte of destination MAC address */
+	u8 tx_dest_addr_high[2];
+	/* Last 4 byte of destination MAC address */
+	u8 tx_dest_addr_low[4];
+	/* Pkt Priority */
+	u8 priority;
+	/* Pkt Trasnit Power control */
+	u8 powermgmt;
+	/* Amount of time the packet has been queued (units = 2ms) */
+	u8 pktdelay_2ms;
+	/* reserved */
+	u8 reserved1;
+} __attribute__ ((packed));
+
+/* RxPD Descriptor */
+struct rxpd {
+	/* union to cope up with later FW revisions */
+	union {
+		/* Current Rx packet status */
+		__le16 status;
+		struct {
+			/* BSS type: client, AP, etc. */
+			u8 bss_type;
+			/* BSS number */
+			u8 bss_num;
+		} __attribute__ ((packed)) bss;
+	} __attribute__ ((packed)) u;
+
+	/* SNR */
+	u8 snr;
+
+	/* Tx control */
+	u8 rx_control;
+
+	/* Pkt length */
+	__le16 pkt_len;
+
+	/* Noise Floor */
+	u8 nf;
+
+	/* Rx Packet Rate */
+	u8 rx_rate;
+
+	/* Pkt addr */
+	__le32 pkt_ptr;
+
+	/* Next Rx RxPD addr */
+	__le32 next_rxpd_ptr;
+
+	/* Pkt Priority */
+	u8 priority;
+	u8 reserved[3];
+} __attribute__ ((packed));
+
+struct cmd_header {
+	__le16 command;
+	__le16 size;
+	__le16 seqnum;
+	__le16 result;
+} __attribute__ ((packed));
+
+/* Generic structure to hold all key types. */
+struct enc_key {
+	u16 len;
+	u16 flags;  /* KEY_INFO_* from defs.h */
+	u16 type; /* KEY_TYPE_* from defs.h */
+	u8 key[32];
+};
+
+/* lbs_offset_value */
+struct lbs_offset_value {
+	u32 offset;
+	u32 value;
+} __attribute__ ((packed));
+
+/*
+ * Define data structure for CMD_GET_HW_SPEC
+ * This structure defines the response for the GET_HW_SPEC command
+ */
+struct cmd_ds_get_hw_spec {
+	struct cmd_header hdr;
+
+	/* HW Interface version number */
+	__le16 hwifversion;
+	/* HW version number */
+	__le16 version;
+	/* Max number of TxPD FW can handle */
+	__le16 nr_txpd;
+	/* Max no of Multicast address */
+	__le16 nr_mcast_adr;
+	/* MAC address */
+	u8 permanentaddr[6];
+
+	/* region Code */
+	__le16 regioncode;
+
+	/* Number of antenna used */
+	__le16 nr_antenna;
+
+	/* FW release number, example 0x01030304 = 2.3.4p1 */
+	__le32 fwrelease;
+
+	/* Base Address of TxPD queue */
+	__le32 wcb_base;
+	/* Read Pointer of RxPd queue */
+	__le32 rxpd_rdptr;
+
+	/* Write Pointer of RxPd queue */
+	__le32 rxpd_wrptr;
+
+	/*FW/HW capability */
+	__le32 fwcapinfo;
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_subscribe_event {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 events;
+
+	/* A TLV to the CMD_802_11_SUBSCRIBE_EVENT command can contain a
+	 * number of TLVs. From the v5.1 manual, those TLVs would add up to
+	 * 40 bytes. However, future firmware might add additional TLVs, so I
+	 * bump this up a bit.
+	 */
+	uint8_t tlv[128];
+} __attribute__ ((packed));
+
+/*
+ * This scan handle Country Information IE(802.11d compliant)
+ * Define data structure for CMD_802_11_SCAN
+ */
+struct cmd_ds_802_11_scan {
+	struct cmd_header hdr;
+
+	uint8_t bsstype;
+	uint8_t bssid[ETH_ALEN];
+	uint8_t tlvbuffer[0];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_scan_rsp {
+	struct cmd_header hdr;
+
+	__le16 bssdescriptsize;
+	uint8_t nr_sets;
+	uint8_t bssdesc_and_tlvbuffer[0];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_get_log {
+	struct cmd_header hdr;
+
+	__le32 mcasttxframe;
+	__le32 failed;
+	__le32 retry;
+	__le32 multiretry;
+	__le32 framedup;
+	__le32 rtssuccess;
+	__le32 rtsfailure;
+	__le32 ackfailure;
+	__le32 rxfrag;
+	__le32 mcastrxframe;
+	__le32 fcserror;
+	__le32 txframe;
+	__le32 wepundecryptable;
+} __attribute__ ((packed));
+
+struct cmd_ds_mac_control {
+	struct cmd_header hdr;
+	__le16 action;
+	u16 reserved;
+} __attribute__ ((packed));
+
+struct cmd_ds_mac_multicast_adr {
+	struct cmd_header hdr;
+	__le16 action;
+	__le16 nr_of_adrs;
+	u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_authenticate {
+	struct cmd_header hdr;
+
+	u8 bssid[ETH_ALEN];
+	u8 authtype;
+	u8 reserved[10];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_deauthenticate {
+	struct cmd_header hdr;
+
+	u8 macaddr[ETH_ALEN];
+	__le16 reasoncode;
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_associate {
+	struct cmd_header hdr;
+
+	u8 bssid[6];
+	__le16 capability;
+	__le16 listeninterval;
+	__le16 bcnperiod;
+	u8 dtimperiod;
+	u8 iebuf[512];    /* Enough for required and most optional IEs */
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_associate_response {
+	struct cmd_header hdr;
+
+	__le16 capability;
+	__le16 statuscode;
+	__le16 aid;
+	u8 iebuf[512];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_set_wep {
+	struct cmd_header hdr;
+
+	/* ACT_ADD, ACT_REMOVE or ACT_ENABLE */
+	__le16 action;
+
+	/* key Index selected for Tx */
+	__le16 keyindex;
+
+	/* 40, 128bit or TXWEP */
+	uint8_t keytype[4];
+	uint8_t keymaterial[4][16];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_snmp_mib {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 oid;
+	__le16 bufsize;
+	u8 value[128];
+} __attribute__ ((packed));
+
+struct cmd_ds_mac_reg_access {
+	__le16 action;
+	__le16 offset;
+	__le32 value;
+} __attribute__ ((packed));
+
+struct cmd_ds_bbp_reg_access {
+	__le16 action;
+	__le16 offset;
+	u8 value;
+	u8 reserved[3];
+} __attribute__ ((packed));
+
+struct cmd_ds_rf_reg_access {
+	__le16 action;
+	__le16 offset;
+	u8 value;
+	u8 reserved[3];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_radio_control {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 control;
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_beacon_control {
+	__le16 action;
+	__le16 beacon_enable;
+	__le16 beacon_period;
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_sleep_params {
+	struct cmd_header hdr;
+
+	/* ACT_GET/ACT_SET */
+	__le16 action;
+
+	/* Sleep clock error in ppm */
+	__le16 error;
+
+	/* Wakeup offset in usec */
+	__le16 offset;
+
+	/* Clock stabilization time in usec */
+	__le16 stabletime;
+
+	/* control periodic calibration */
+	uint8_t calcontrol;
+
+	/* control the use of external sleep clock */
+	uint8_t externalsleepclk;
+
+	/* reserved field, should be set to zero */
+	__le16 reserved;
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_rf_channel {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 channel;
+	__le16 rftype;      /* unused */
+	__le16 reserved;    /* unused */
+	u8 channellist[32]; /* unused */
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_rssi {
+	/* weighting factor */
+	__le16 N;
+
+	__le16 reserved_0;
+	__le16 reserved_1;
+	__le16 reserved_2;
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_rssi_rsp {
+	__le16 SNR;
+	__le16 noisefloor;
+	__le16 avgSNR;
+	__le16 avgnoisefloor;
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_mac_address {
+	struct cmd_header hdr;
+
+	__le16 action;
+	u8 macadd[ETH_ALEN];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_rf_tx_power {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 curlevel;
+	s8 maxlevel;
+	s8 minlevel;
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_monitor_mode {
+	__le16 action;
+	__le16 mode;
+} __attribute__ ((packed));
+
+struct cmd_ds_set_boot2_ver {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 version;
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_fw_wake_method {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 method;
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_ps_mode {
+	__le16 action;
+	__le16 nullpktinterval;
+	__le16 multipledtim;
+	__le16 reserved;
+	__le16 locallisteninterval;
+} __attribute__ ((packed));
+
+struct cmd_confirm_sleep {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 nullpktinterval;
+	__le16 multipledtim;
+	__le16 reserved;
+	__le16 locallisteninterval;
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_data_rate {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 reserved;
+	u8 rates[MAX_RATES];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_rate_adapt_rateset {
+	struct cmd_header hdr;
+	__le16 action;
+	__le16 enablehwauto;
+	__le16 bitmap;
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_ad_hoc_start {
+	struct cmd_header hdr;
+
+	u8 ssid[IEEE80211_MAX_SSID_LEN];
+	u8 bsstype;
+	__le16 beaconperiod;
+	u8 dtimperiod;   /* Reserved on v9 and later */
+	struct ieee_ie_ibss_param_set ibss;
+	u8 reserved1[4];
+	struct ieee_ie_ds_param_set ds;
+	u8 reserved2[4];
+	__le16 probedelay;  /* Reserved on v9 and later */
+	__le16 capability;
+	u8 rates[MAX_RATES];
+	u8 tlv_memory_size_pad[100];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_ad_hoc_result {
+	struct cmd_header hdr;
+
+	u8 pad[3];
+	u8 bssid[ETH_ALEN];
+} __attribute__ ((packed));
+
+struct adhoc_bssdesc {
+	u8 bssid[ETH_ALEN];
+	u8 ssid[IEEE80211_MAX_SSID_LEN];
+	u8 type;
+	__le16 beaconperiod;
+	u8 dtimperiod;
+	__le64 timestamp;
+	__le64 localtime;
+	struct ieee_ie_ds_param_set ds;
+	u8 reserved1[4];
+	struct ieee_ie_ibss_param_set ibss;
+	u8 reserved2[4];
+	__le16 capability;
+	u8 rates[MAX_RATES];
+
+	/* DO NOT ADD ANY FIELDS TO THIS STRUCTURE. It is used below in the
+	 * Adhoc join command and will cause a binary layout mismatch with
+	 * the firmware
+	 */
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_ad_hoc_join {
+	struct cmd_header hdr;
+
+	struct adhoc_bssdesc bss;
+	__le16 failtimeout;   /* Reserved on v9 and later */
+	__le16 probedelay;    /* Reserved on v9 and later */
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_ad_hoc_stop {
+	struct cmd_header hdr;
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_enable_rsn {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 enable;
+} __attribute__ ((packed));
+
+struct MrvlIEtype_keyParamSet {
+	/* type ID */
+	__le16 type;
+
+	/* length of Payload */
+	__le16 length;
+
+	/* type of key: WEP=0, TKIP=1, AES=2 */
+	__le16 keytypeid;
+
+	/* key control Info specific to a keytypeid */
+	__le16 keyinfo;
+
+	/* length of key */
+	__le16 keylen;
+
+	/* key material of size keylen */
+	u8 key[32];
+} __attribute__ ((packed));
+
+#define MAX_WOL_RULES 		16
+
+struct host_wol_rule {
+	uint8_t rule_no;
+	uint8_t rule_ops;
+	__le16 sig_offset;
+	__le16 sig_length;
+	__le16 reserve;
+	__be32 sig_mask;
+	__be32 signature;
+} __attribute__ ((packed));
+
+struct wol_config {
+	uint8_t action;
+	uint8_t pattern;
+	uint8_t no_rules_in_cmd;
+	uint8_t result;
+	struct host_wol_rule rule[MAX_WOL_RULES];
+} __attribute__ ((packed));
+
+struct cmd_ds_host_sleep {
+	struct cmd_header hdr;
+	__le32 criteria;
+	uint8_t gpio;
+	uint16_t gap;
+	struct wol_config wol_conf;
+} __attribute__ ((packed));
+
+
+
+struct cmd_ds_802_11_key_material {
+	struct cmd_header hdr;
+
+	__le16 action;
+	struct MrvlIEtype_keyParamSet keyParamSet[2];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_eeprom_access {
+	struct cmd_header hdr;
+	__le16 action;
+	__le16 offset;
+	__le16 len;
+	/* firmware says it returns a maximum of 20 bytes */
+#define LBS_EEPROM_READ_LEN 20
+	u8 value[LBS_EEPROM_READ_LEN];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_tpc_cfg {
+	struct cmd_header hdr;
+
+	__le16 action;
+	uint8_t enable;
+	int8_t P0;
+	int8_t P1;
+	int8_t P2;
+	uint8_t usesnr;
+} __attribute__ ((packed));
+
+
+struct cmd_ds_802_11_pa_cfg {
+	struct cmd_header hdr;
+
+	__le16 action;
+	uint8_t enable;
+	int8_t P0;
+	int8_t P1;
+	int8_t P2;
+} __attribute__ ((packed));
+
+
+struct cmd_ds_802_11_led_ctrl {
+	__le16 action;
+	__le16 numled;
+	u8 data[256];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_afc {
+	__le16 afc_auto;
+	union {
+		struct {
+			__le16 threshold;
+			__le16 period;
+		};
+		struct {
+			__le16 timing_offset; /* signed */
+			__le16 carrier_offset; /* signed */
+		};
+	};
+} __attribute__ ((packed));
+
+struct cmd_tx_rate_query {
+	__le16 txrate;
+} __attribute__ ((packed));
+
+struct cmd_ds_get_tsf {
+	__le64 tsfvalue;
+} __attribute__ ((packed));
+
+struct cmd_ds_bt_access {
+	__le16 action;
+	__le32 id;
+	u8 addr1[ETH_ALEN];
+	u8 addr2[ETH_ALEN];
+} __attribute__ ((packed));
+
+struct cmd_ds_fwt_access {
+	__le16 action;
+	__le32 id;
+	u8 valid;
+	u8 da[ETH_ALEN];
+	u8 dir;
+	u8 ra[ETH_ALEN];
+	__le32 ssn;
+	__le32 dsn;
+	__le32 metric;
+	u8 rate;
+	u8 hopcount;
+	u8 ttl;
+	__le32 expiration;
+	u8 sleepmode;
+	__le32 snr;
+	__le32 references;
+	u8 prec[ETH_ALEN];
+} __attribute__ ((packed));
+
+struct cmd_ds_mesh_config {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 channel;
+	__le16 type;
+	__le16 length;
+	u8 data[128];	/* last position reserved */
+} __attribute__ ((packed));
+
+struct cmd_ds_mesh_access {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le32 data[32];	/* last position reserved */
+} __attribute__ ((packed));
+
+/* Number of stats counters returned by the firmware */
+#define MESH_STATS_NUM 8
+
+struct cmd_ds_command {
+	/* command header */
+	__le16 command;
+	__le16 size;
+	__le16 seqnum;
+	__le16 result;
+
+	/* command Body */
+	union {
+		struct cmd_ds_802_11_ps_mode psmode;
+		struct cmd_ds_802_11_monitor_mode monitor;
+		struct cmd_ds_802_11_rssi rssi;
+		struct cmd_ds_802_11_rssi_rsp rssirsp;
+		struct cmd_ds_mac_reg_access macreg;
+		struct cmd_ds_bbp_reg_access bbpreg;
+		struct cmd_ds_rf_reg_access rfreg;
+
+		struct cmd_ds_802_11_tpc_cfg tpccfg;
+		struct cmd_ds_802_11_afc afc;
+		struct cmd_ds_802_11_led_ctrl ledgpio;
+
+		struct cmd_ds_bt_access bt;
+		struct cmd_ds_fwt_access fwt;
+		struct cmd_ds_802_11_beacon_control bcn_ctrl;
+	} params;
+} __attribute__ ((packed));
+
 #endif
diff --git a/drivers/net/wireless/libertas/hostcmd.h b/drivers/net/wireless/libertas/hostcmd.h
deleted file mode 100644
index c8a1998d4744..000000000000
--- a/drivers/net/wireless/libertas/hostcmd.h
+++ /dev/null
@@ -1,800 +0,0 @@
-/*
- * This file contains the function prototypes, data structure
- * and defines for all the host/station commands
- */
-#ifndef _LBS_HOSTCMD_H
-#define _LBS_HOSTCMD_H
-
-#include <linux/wireless.h>
-#include "11d.h"
-#include "types.h"
-
-/* 802.11-related definitions */
-
-/* TxPD descriptor */
-struct txpd {
-	/* union to cope up with later FW revisions */
-	union {
-		/* Current Tx packet status */
-		__le32 tx_status;
-		struct {
-			/* BSS type: client, AP, etc. */
-			u8 bss_type;
-			/* BSS number */
-			u8 bss_num;
-			/* Reserved */
-			__le16 reserved;
-		} bss;
-	} u;
-	/* Tx control */
-	__le32 tx_control;
-	__le32 tx_packet_location;
-	/* Tx packet length */
-	__le16 tx_packet_length;
-	/* First 2 byte of destination MAC address */
-	u8 tx_dest_addr_high[2];
-	/* Last 4 byte of destination MAC address */
-	u8 tx_dest_addr_low[4];
-	/* Pkt Priority */
-	u8 priority;
-	/* Pkt Trasnit Power control */
-	u8 powermgmt;
-	/* Amount of time the packet has been queued in the driver (units = 2ms) */
-	u8 pktdelay_2ms;
-	/* reserved */
-	u8 reserved1;
-} __attribute__ ((packed));
-
-/* RxPD Descriptor */
-struct rxpd {
-	/* union to cope up with later FW revisions */
-	union {
-		/* Current Rx packet status */
-		__le16 status;
-		struct {
-			/* BSS type: client, AP, etc. */
-			u8 bss_type;
-			/* BSS number */
-			u8 bss_num;
-		} __attribute__ ((packed)) bss;
-	} __attribute__ ((packed)) u;
-
-	/* SNR */
-	u8 snr;
-
-	/* Tx control */
-	u8 rx_control;
-
-	/* Pkt length */
-	__le16 pkt_len;
-
-	/* Noise Floor */
-	u8 nf;
-
-	/* Rx Packet Rate */
-	u8 rx_rate;
-
-	/* Pkt addr */
-	__le32 pkt_ptr;
-
-	/* Next Rx RxPD addr */
-	__le32 next_rxpd_ptr;
-
-	/* Pkt Priority */
-	u8 priority;
-	u8 reserved[3];
-} __attribute__ ((packed));
-
-struct cmd_header {
-	__le16 command;
-	__le16 size;
-	__le16 seqnum;
-	__le16 result;
-} __attribute__ ((packed));
-
-struct cmd_ctrl_node {
-	struct list_head list;
-	int result;
-	/* command response */
-	int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *);
-	unsigned long callback_arg;
-	/* command data */
-	struct cmd_header *cmdbuf;
-	/* wait queue */
-	u16 cmdwaitqwoken;
-	wait_queue_head_t cmdwait_q;
-};
-
-/* Generic structure to hold all key types. */
-struct enc_key {
-	u16 len;
-	u16 flags;  /* KEY_INFO_* from defs.h */
-	u16 type; /* KEY_TYPE_* from defs.h */
-	u8 key[32];
-};
-
-/* lbs_offset_value */
-struct lbs_offset_value {
-	u32 offset;
-	u32 value;
-} __attribute__ ((packed));
-
-/* Define general data structure */
-/* cmd_DS_GEN */
-struct cmd_ds_gen {
-	__le16 command;
-	__le16 size;
-	__le16 seqnum;
-	__le16 result;
-	void *cmdresp[0];
-} __attribute__ ((packed));
-
-#define S_DS_GEN sizeof(struct cmd_ds_gen)
-
-
-/*
- * Define data structure for CMD_GET_HW_SPEC
- * This structure defines the response for the GET_HW_SPEC command
- */
-struct cmd_ds_get_hw_spec {
-	struct cmd_header hdr;
-
-	/* HW Interface version number */
-	__le16 hwifversion;
-	/* HW version number */
-	__le16 version;
-	/* Max number of TxPD FW can handle */
-	__le16 nr_txpd;
-	/* Max no of Multicast address */
-	__le16 nr_mcast_adr;
-	/* MAC address */
-	u8 permanentaddr[6];
-
-	/* region Code */
-	__le16 regioncode;
-
-	/* Number of antenna used */
-	__le16 nr_antenna;
-
-	/* FW release number, example 0x01030304 = 2.3.4p1 */
-	__le32 fwrelease;
-
-	/* Base Address of TxPD queue */
-	__le32 wcb_base;
-	/* Read Pointer of RxPd queue */
-	__le32 rxpd_rdptr;
-
-	/* Write Pointer of RxPd queue */
-	__le32 rxpd_wrptr;
-
-	/*FW/HW capability */
-	__le32 fwcapinfo;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_subscribe_event {
-	struct cmd_header hdr;
-
-	__le16 action;
-	__le16 events;
-
-	/* A TLV to the CMD_802_11_SUBSCRIBE_EVENT command can contain a
-	 * number of TLVs. From the v5.1 manual, those TLVs would add up to
-	 * 40 bytes. However, future firmware might add additional TLVs, so I
-	 * bump this up a bit.
-	 */
-	uint8_t tlv[128];
-} __attribute__ ((packed));
-
-/*
- * This scan handle Country Information IE(802.11d compliant)
- * Define data structure for CMD_802_11_SCAN
- */
-struct cmd_ds_802_11_scan {
-	struct cmd_header hdr;
-
-	uint8_t bsstype;
-	uint8_t bssid[ETH_ALEN];
-	uint8_t tlvbuffer[0];
-#if 0
-	mrvlietypes_ssidparamset_t ssidParamSet;
-	mrvlietypes_chanlistparamset_t ChanListParamSet;
-	mrvlietypes_ratesparamset_t OpRateSet;
-#endif
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_scan_rsp {
-	struct cmd_header hdr;
-
-	__le16 bssdescriptsize;
-	uint8_t nr_sets;
-	uint8_t bssdesc_and_tlvbuffer[0];
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_get_log {
-	struct cmd_header hdr;
-
-	__le32 mcasttxframe;
-	__le32 failed;
-	__le32 retry;
-	__le32 multiretry;
-	__le32 framedup;
-	__le32 rtssuccess;
-	__le32 rtsfailure;
-	__le32 ackfailure;
-	__le32 rxfrag;
-	__le32 mcastrxframe;
-	__le32 fcserror;
-	__le32 txframe;
-	__le32 wepundecryptable;
-} __attribute__ ((packed));
-
-struct cmd_ds_mac_control {
-	struct cmd_header hdr;
-	__le16 action;
-	u16 reserved;
-} __attribute__ ((packed));
-
-struct cmd_ds_mac_multicast_adr {
-	struct cmd_header hdr;
-	__le16 action;
-	__le16 nr_of_adrs;
-	u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE];
-} __attribute__ ((packed));
-
-struct cmd_ds_gspi_bus_config {
-	struct cmd_header hdr;
-	__le16 action;
-	__le16 bus_delay_mode;
-	__le16 host_time_delay_to_read_port;
-	__le16 host_time_delay_to_read_register;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_authenticate {
-	struct cmd_header hdr;
-
-	u8 bssid[ETH_ALEN];
-	u8 authtype;
-	u8 reserved[10];
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_deauthenticate {
-	struct cmd_header hdr;
-
-	u8 macaddr[ETH_ALEN];
-	__le16 reasoncode;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_associate {
-	struct cmd_header hdr;
-
-	u8 bssid[6];
-	__le16 capability;
-	__le16 listeninterval;
-	__le16 bcnperiod;
-	u8 dtimperiod;
-	u8 iebuf[512];    /* Enough for required and most optional IEs */
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_associate_response {
-	struct cmd_header hdr;
-
-	__le16 capability;
-	__le16 statuscode;
-	__le16 aid;
-	u8 iebuf[512];
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_set_wep {
-	struct cmd_header hdr;
-
-	/* ACT_ADD, ACT_REMOVE or ACT_ENABLE */
-	__le16 action;
-
-	/* key Index selected for Tx */
-	__le16 keyindex;
-
-	/* 40, 128bit or TXWEP */
-	uint8_t keytype[4];
-	uint8_t keymaterial[4][16];
-} __attribute__ ((packed));
-
-struct cmd_ds_802_3_get_stat {
-	__le32 xmitok;
-	__le32 rcvok;
-	__le32 xmiterror;
-	__le32 rcverror;
-	__le32 rcvnobuffer;
-	__le32 rcvcrcerror;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_get_stat {
-	__le32 txfragmentcnt;
-	__le32 mcasttxframecnt;
-	__le32 failedcnt;
-	__le32 retrycnt;
-	__le32 Multipleretrycnt;
-	__le32 rtssuccesscnt;
-	__le32 rtsfailurecnt;
-	__le32 ackfailurecnt;
-	__le32 frameduplicatecnt;
-	__le32 rxfragmentcnt;
-	__le32 mcastrxframecnt;
-	__le32 fcserrorcnt;
-	__le32 bcasttxframecnt;
-	__le32 bcastrxframecnt;
-	__le32 txbeacon;
-	__le32 rxbeacon;
-	__le32 wepundecryptable;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_snmp_mib {
-	struct cmd_header hdr;
-
-	__le16 action;
-	__le16 oid;
-	__le16 bufsize;
-	u8 value[128];
-} __attribute__ ((packed));
-
-struct cmd_ds_mac_reg_map {
-	__le16 buffersize;
-	u8 regmap[128];
-	__le16 reserved;
-} __attribute__ ((packed));
-
-struct cmd_ds_bbp_reg_map {
-	__le16 buffersize;
-	u8 regmap[128];
-	__le16 reserved;
-} __attribute__ ((packed));
-
-struct cmd_ds_rf_reg_map {
-	__le16 buffersize;
-	u8 regmap[64];
-	__le16 reserved;
-} __attribute__ ((packed));
-
-struct cmd_ds_mac_reg_access {
-	__le16 action;
-	__le16 offset;
-	__le32 value;
-} __attribute__ ((packed));
-
-struct cmd_ds_bbp_reg_access {
-	__le16 action;
-	__le16 offset;
-	u8 value;
-	u8 reserved[3];
-} __attribute__ ((packed));
-
-struct cmd_ds_rf_reg_access {
-	__le16 action;
-	__le16 offset;
-	u8 value;
-	u8 reserved[3];
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_radio_control {
-	struct cmd_header hdr;
-
-	__le16 action;
-	__le16 control;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_beacon_control {
-	__le16 action;
-	__le16 beacon_enable;
-	__le16 beacon_period;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_sleep_params {
-	struct cmd_header hdr;
-
-	/* ACT_GET/ACT_SET */
-	__le16 action;
-
-	/* Sleep clock error in ppm */
-	__le16 error;
-
-	/* Wakeup offset in usec */
-	__le16 offset;
-
-	/* Clock stabilization time in usec */
-	__le16 stabletime;
-
-	/* control periodic calibration */
-	uint8_t calcontrol;
-
-	/* control the use of external sleep clock */
-	uint8_t externalsleepclk;
-
-	/* reserved field, should be set to zero */
-	__le16 reserved;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_inactivity_timeout {
-	struct cmd_header hdr;
-
-	/* ACT_GET/ACT_SET */
-	__le16 action;
-
-	/* Inactivity timeout in msec */
-	__le16 timeout;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_rf_channel {
-	struct cmd_header hdr;
-
-	__le16 action;
-	__le16 channel;
-	__le16 rftype;      /* unused */
-	__le16 reserved;    /* unused */
-	u8 channellist[32]; /* unused */
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_rssi {
-	/* weighting factor */
-	__le16 N;
-
-	__le16 reserved_0;
-	__le16 reserved_1;
-	__le16 reserved_2;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_rssi_rsp {
-	__le16 SNR;
-	__le16 noisefloor;
-	__le16 avgSNR;
-	__le16 avgnoisefloor;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_mac_address {
-	struct cmd_header hdr;
-
-	__le16 action;
-	u8 macadd[ETH_ALEN];
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_rf_tx_power {
-	struct cmd_header hdr;
-
-	__le16 action;
-	__le16 curlevel;
-	s8 maxlevel;
-	s8 minlevel;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_rf_antenna {
-	__le16 action;
-
-	/* Number of antennas or 0xffff(diversity) */
-	__le16 antennamode;
-
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_monitor_mode {
-	__le16 action;
-	__le16 mode;
-} __attribute__ ((packed));
-
-struct cmd_ds_set_boot2_ver {
-	struct cmd_header hdr;
-
-	__le16 action;
-	__le16 version;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_fw_wake_method {
-	struct cmd_header hdr;
-
-	__le16 action;
-	__le16 method;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_sleep_period {
-	struct cmd_header hdr;
-
-	__le16 action;
-	__le16 period;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_ps_mode {
-	__le16 action;
-	__le16 nullpktinterval;
-	__le16 multipledtim;
-	__le16 reserved;
-	__le16 locallisteninterval;
-} __attribute__ ((packed));
-
-struct cmd_confirm_sleep {
-	struct cmd_header hdr;
-
-	__le16 action;
-	__le16 nullpktinterval;
-	__le16 multipledtim;
-	__le16 reserved;
-	__le16 locallisteninterval;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_data_rate {
-	struct cmd_header hdr;
-
-	__le16 action;
-	__le16 reserved;
-	u8 rates[MAX_RATES];
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_rate_adapt_rateset {
-	struct cmd_header hdr;
-	__le16 action;
-	__le16 enablehwauto;
-	__le16 bitmap;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_ad_hoc_start {
-	struct cmd_header hdr;
-
-	u8 ssid[IW_ESSID_MAX_SIZE];
-	u8 bsstype;
-	__le16 beaconperiod;
-	u8 dtimperiod;   /* Reserved on v9 and later */
-	struct ieee_ie_ibss_param_set ibss;
-	u8 reserved1[4];
-	struct ieee_ie_ds_param_set ds;
-	u8 reserved2[4];
-	__le16 probedelay;  /* Reserved on v9 and later */
-	__le16 capability;
-	u8 rates[MAX_RATES];
-	u8 tlv_memory_size_pad[100];
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_ad_hoc_result {
-	struct cmd_header hdr;
-
-	u8 pad[3];
-	u8 bssid[ETH_ALEN];
-} __attribute__ ((packed));
-
-struct adhoc_bssdesc {
-	u8 bssid[ETH_ALEN];
-	u8 ssid[IW_ESSID_MAX_SIZE];
-	u8 type;
-	__le16 beaconperiod;
-	u8 dtimperiod;
-	__le64 timestamp;
-	__le64 localtime;
-	struct ieee_ie_ds_param_set ds;
-	u8 reserved1[4];
-	struct ieee_ie_ibss_param_set ibss;
-	u8 reserved2[4];
-	__le16 capability;
-	u8 rates[MAX_RATES];
-
-	/* DO NOT ADD ANY FIELDS TO THIS STRUCTURE. It is used below in the
-	 * Adhoc join command and will cause a binary layout mismatch with
-	 * the firmware
-	 */
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_ad_hoc_join {
-	struct cmd_header hdr;
-
-	struct adhoc_bssdesc bss;
-	__le16 failtimeout;   /* Reserved on v9 and later */
-	__le16 probedelay;    /* Reserved on v9 and later */
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_ad_hoc_stop {
-	struct cmd_header hdr;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_enable_rsn {
-	struct cmd_header hdr;
-
-	__le16 action;
-	__le16 enable;
-} __attribute__ ((packed));
-
-struct MrvlIEtype_keyParamSet {
-	/* type ID */
-	__le16 type;
-
-	/* length of Payload */
-	__le16 length;
-
-	/* type of key: WEP=0, TKIP=1, AES=2 */
-	__le16 keytypeid;
-
-	/* key control Info specific to a keytypeid */
-	__le16 keyinfo;
-
-	/* length of key */
-	__le16 keylen;
-
-	/* key material of size keylen */
-	u8 key[32];
-} __attribute__ ((packed));
-
-#define MAX_WOL_RULES 		16
-
-struct host_wol_rule {
-	uint8_t rule_no;
-	uint8_t rule_ops;
-	__le16 sig_offset;
-	__le16 sig_length;
-	__le16 reserve;
-	__be32 sig_mask;
-	__be32 signature;
-} __attribute__ ((packed));
-
-struct wol_config {
-	uint8_t action;
-	uint8_t pattern;
-	uint8_t no_rules_in_cmd;
-	uint8_t result;
-	struct host_wol_rule rule[MAX_WOL_RULES];
-} __attribute__ ((packed));
-
-struct cmd_ds_host_sleep {
-	struct cmd_header hdr;
-	__le32 criteria;
-	uint8_t gpio;
-	uint16_t gap;
-	struct wol_config wol_conf;
-} __attribute__ ((packed));
-
-
-
-struct cmd_ds_802_11_key_material {
-	struct cmd_header hdr;
-
-	__le16 action;
-	struct MrvlIEtype_keyParamSet keyParamSet[2];
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_eeprom_access {
-	struct cmd_header hdr;
-	__le16 action;
-	__le16 offset;
-	__le16 len;
-	/* firmware says it returns a maximum of 20 bytes */
-#define LBS_EEPROM_READ_LEN 20
-	u8 value[LBS_EEPROM_READ_LEN];
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_tpc_cfg {
-	struct cmd_header hdr;
-
-	__le16 action;
-	uint8_t enable;
-	int8_t P0;
-	int8_t P1;
-	int8_t P2;
-	uint8_t usesnr;
-} __attribute__ ((packed));
-
-
-struct cmd_ds_802_11_pa_cfg {
-	struct cmd_header hdr;
-
-	__le16 action;
-	uint8_t enable;
-	int8_t P0;
-	int8_t P1;
-	int8_t P2;
-} __attribute__ ((packed));
-
-
-struct cmd_ds_802_11_led_ctrl {
-	__le16 action;
-	__le16 numled;
-	u8 data[256];
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_afc {
-	__le16 afc_auto;
-	union {
-		struct {
-			__le16 threshold;
-			__le16 period;
-		};
-		struct {
-			__le16 timing_offset; /* signed */
-			__le16 carrier_offset; /* signed */
-		};
-	};
-} __attribute__ ((packed));
-
-struct cmd_tx_rate_query {
-	__le16 txrate;
-} __attribute__ ((packed));
-
-struct cmd_ds_get_tsf {
-	__le64 tsfvalue;
-} __attribute__ ((packed));
-
-struct cmd_ds_bt_access {
-	__le16 action;
-	__le32 id;
-	u8 addr1[ETH_ALEN];
-	u8 addr2[ETH_ALEN];
-} __attribute__ ((packed));
-
-struct cmd_ds_fwt_access {
-	__le16 action;
-	__le32 id;
-	u8 valid;
-	u8 da[ETH_ALEN];
-	u8 dir;
-	u8 ra[ETH_ALEN];
-	__le32 ssn;
-	__le32 dsn;
-	__le32 metric;
-	u8 rate;
-	u8 hopcount;
-	u8 ttl;
-	__le32 expiration;
-	u8 sleepmode;
-	__le32 snr;
-	__le32 references;
-	u8 prec[ETH_ALEN];
-} __attribute__ ((packed));
-
-
-struct cmd_ds_mesh_config {
-	struct cmd_header hdr;
-
-        __le16 action;
-        __le16 channel;
-        __le16 type;
-        __le16 length;
-        u8 data[128];   /* last position reserved */
-} __attribute__ ((packed));
-
-
-struct cmd_ds_mesh_access {
-	struct cmd_header hdr;
-
-	__le16 action;
-	__le32 data[32];	/* last position reserved */
-} __attribute__ ((packed));
-
-/* Number of stats counters returned by the firmware */
-#define MESH_STATS_NUM 8
-
-struct cmd_ds_command {
-	/* command header */
-	__le16 command;
-	__le16 size;
-	__le16 seqnum;
-	__le16 result;
-
-	/* command Body */
-	union {
-		struct cmd_ds_802_11_ps_mode psmode;
-		struct cmd_ds_802_11_get_stat gstat;
-		struct cmd_ds_802_3_get_stat gstat_8023;
-		struct cmd_ds_802_11_rf_antenna rant;
-		struct cmd_ds_802_11_monitor_mode monitor;
-		struct cmd_ds_802_11_rssi rssi;
-		struct cmd_ds_802_11_rssi_rsp rssirsp;
-		struct cmd_ds_mac_reg_access macreg;
-		struct cmd_ds_bbp_reg_access bbpreg;
-		struct cmd_ds_rf_reg_access rfreg;
-
-		struct cmd_ds_802_11d_domain_info domaininfo;
-		struct cmd_ds_802_11d_domain_info domaininforesp;
-
-		struct cmd_ds_802_11_tpc_cfg tpccfg;
-		struct cmd_ds_802_11_afc afc;
-		struct cmd_ds_802_11_led_ctrl ledgpio;
-
-		struct cmd_tx_rate_query txrate;
-		struct cmd_ds_bt_access bt;
-		struct cmd_ds_fwt_access fwt;
-		struct cmd_ds_get_tsf gettsf;
-		struct cmd_ds_802_11_beacon_control bcn_ctrl;
-	} params;
-} __attribute__ ((packed));
-
-#endif
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index b1d84592b959..1f6cb58dd66c 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -48,6 +48,7 @@
 MODULE_AUTHOR("Holger Schurig <hs4233@mail.mn-solutions.de>");
 MODULE_DESCRIPTION("Driver for Marvell 83xx compact flash WLAN cards");
 MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("libertas_cs_helper.fw");
 
 
 
@@ -932,6 +933,9 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
 	card->priv = priv;
 	priv->card = card;
 	priv->hw_host_to_card = if_cs_host_to_card;
+	priv->enter_deep_sleep = NULL;
+	priv->exit_deep_sleep = NULL;
+	priv->reset_deep_sleep_wakeup = NULL;
 	priv->fw_ready = 1;
 
 	/* Now actually get the IRQ */
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 485a8d406525..09fcfad742e7 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -99,6 +99,12 @@ static struct if_sdio_model if_sdio_models[] = {
 		.firmware = "sd8688.bin",
 	},
 };
+MODULE_FIRMWARE("sd8385_helper.bin");
+MODULE_FIRMWARE("sd8385.bin");
+MODULE_FIRMWARE("sd8686_helper.bin");
+MODULE_FIRMWARE("sd8686.bin");
+MODULE_FIRMWARE("sd8688_helper.bin");
+MODULE_FIRMWARE("sd8688.bin");
 
 struct if_sdio_packet {
 	struct if_sdio_packet	*next;
@@ -831,6 +837,58 @@ out:
 	return ret;
 }
 
+static int if_sdio_enter_deep_sleep(struct lbs_private *priv)
+{
+	int ret = -1;
+	struct cmd_header cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	lbs_deb_sdio("send DEEP_SLEEP command\n");
+	ret = __lbs_cmd(priv, CMD_802_11_DEEP_SLEEP, &cmd, sizeof(cmd),
+			lbs_cmd_copyback, (unsigned long) &cmd);
+	if (ret)
+		lbs_pr_err("DEEP_SLEEP cmd failed\n");
+
+	mdelay(200);
+	return ret;
+}
+
+static int if_sdio_exit_deep_sleep(struct lbs_private *priv)
+{
+	struct if_sdio_card *card = priv->card;
+	int ret = -1;
+
+	lbs_deb_enter(LBS_DEB_SDIO);
+	sdio_claim_host(card->func);
+
+	sdio_writeb(card->func, HOST_POWER_UP, CONFIGURATION_REG, &ret);
+	if (ret)
+		lbs_pr_err("sdio_writeb failed!\n");
+
+	sdio_release_host(card->func);
+	lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
+	return ret;
+}
+
+static int if_sdio_reset_deep_sleep_wakeup(struct lbs_private *priv)
+{
+	struct if_sdio_card *card = priv->card;
+	int ret = -1;
+
+	lbs_deb_enter(LBS_DEB_SDIO);
+	sdio_claim_host(card->func);
+
+	sdio_writeb(card->func, 0, CONFIGURATION_REG, &ret);
+	if (ret)
+		lbs_pr_err("sdio_writeb failed!\n");
+
+	sdio_release_host(card->func);
+	lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
+	return ret;
+
+}
+
 /*******************************************************************/
 /* SDIO callbacks                                                  */
 /*******************************************************************/
@@ -859,6 +917,7 @@ static void if_sdio_interrupt(struct sdio_func *func)
 	 * Ignore the define name, this really means the card has
 	 * successfully received the command.
 	 */
+	card->priv->is_activity_detected = 1;
 	if (cause & IF_SDIO_H_INT_DNLD)
 		lbs_host_to_card_done(card->priv);
 
@@ -998,6 +1057,9 @@ static int if_sdio_probe(struct sdio_func *func,
 
 	priv->card = card;
 	priv->hw_host_to_card = if_sdio_host_to_card;
+	priv->enter_deep_sleep = if_sdio_enter_deep_sleep;
+	priv->exit_deep_sleep = if_sdio_exit_deep_sleep;
+	priv->reset_deep_sleep_wakeup = if_sdio_reset_deep_sleep_wakeup;
 
 	priv->fw_ready = 1;
 
diff --git a/drivers/net/wireless/libertas/if_sdio.h b/drivers/net/wireless/libertas/if_sdio.h
index 60c9b2fcef03..12179c1dc9c9 100644
--- a/drivers/net/wireless/libertas/if_sdio.h
+++ b/drivers/net/wireless/libertas/if_sdio.h
@@ -51,5 +51,6 @@
 #define IF_SDIO_EVENT           0x80fc
 
 #define IF_SDIO_BLOCK_SIZE	256
-
+#define CONFIGURATION_REG               0x03
+#define HOST_POWER_UP                   (0x1U << 1)
 #endif
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 5b3672c4d0cc..bf4bfbae6227 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -32,12 +32,6 @@
 #include "dev.h"
 #include "if_spi.h"
 
-struct if_spi_packet {
-	struct list_head		list;
-	u16				blen;
-	u8				buffer[0] __attribute__((aligned(4)));
-};
-
 struct if_spi_card {
 	struct spi_device		*spi;
 	struct lbs_private		*priv;
@@ -66,33 +60,10 @@ struct if_spi_card {
 	struct semaphore		spi_thread_terminated;
 
 	u8				cmd_buffer[IF_SPI_CMD_BUF_SIZE];
-
-	/* A buffer of incoming packets from libertas core.
-	 * Since we can't sleep in hw_host_to_card, we have to buffer
-	 * them. */
-	struct list_head		cmd_packet_list;
-	struct list_head		data_packet_list;
-
-	/* Protects cmd_packet_list and data_packet_list */
-	spinlock_t			buffer_lock;
 };
 
 static void free_if_spi_card(struct if_spi_card *card)
 {
-	struct list_head *cursor, *next;
-	struct if_spi_packet *packet;
-
-	BUG_ON(card->run_thread);
-	list_for_each_safe(cursor, next, &card->cmd_packet_list) {
-		packet = container_of(cursor, struct if_spi_packet, list);
-		list_del(&packet->list);
-		kfree(packet);
-	}
-	list_for_each_safe(cursor, next, &card->data_packet_list) {
-		packet = container_of(cursor, struct if_spi_packet, list);
-		list_del(&packet->list);
-		kfree(packet);
-	}
 	spi_set_drvdata(card->spi, NULL);
 	kfree(card);
 }
@@ -774,40 +745,6 @@ out:
 	return err;
 }
 
-/* Move data or a command from the host to the card. */
-static void if_spi_h2c(struct if_spi_card *card,
-			struct if_spi_packet *packet, int type)
-{
-	int err = 0;
-	u16 int_type, port_reg;
-
-	switch (type) {
-	case MVMS_DAT:
-		int_type = IF_SPI_CIC_TX_DOWNLOAD_OVER;
-		port_reg = IF_SPI_DATA_RDWRPORT_REG;
-		break;
-	case MVMS_CMD:
-		int_type = IF_SPI_CIC_CMD_DOWNLOAD_OVER;
-		port_reg = IF_SPI_CMD_RDWRPORT_REG;
-		break;
-	default:
-		lbs_pr_err("can't transfer buffer of type %d\n", type);
-		err = -EINVAL;
-		goto out;
-	}
-
-	/* Write the data to the card */
-	err = spu_write(card, port_reg, packet->buffer, packet->blen);
-	if (err)
-		goto out;
-
-out:
-	kfree(packet);
-
-	if (err)
-		lbs_pr_err("%s: error %d\n", __func__, err);
-}
-
 /* Inform the host about a card event */
 static void if_spi_e2h(struct if_spi_card *card)
 {
@@ -837,8 +774,6 @@ static int lbs_spi_thread(void *data)
 	int err;
 	struct if_spi_card *card = data;
 	u16 hiStatus;
-	unsigned long flags;
-	struct if_spi_packet *packet;
 
 	while (1) {
 		/* Wait to be woken up by one of two things.  First, our ISR
@@ -877,43 +812,9 @@ static int lbs_spi_thread(void *data)
 		if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY ||
 		   (card->priv->psstate != PS_STATE_FULL_POWER &&
 		    (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) {
-			/* This means two things. First of all,
-			 * if there was a previous command sent, the card has
-			 * successfully received it.
-			 * Secondly, it is now ready to download another
-			 * command.
-			 */
 			lbs_host_to_card_done(card->priv);
-
-			/* Do we have any command packets from the host to
-			 * send? */
-			packet = NULL;
-			spin_lock_irqsave(&card->buffer_lock, flags);
-			if (!list_empty(&card->cmd_packet_list)) {
-				packet = (struct if_spi_packet *)(card->
-						cmd_packet_list.next);
-				list_del(&packet->list);
-			}
-			spin_unlock_irqrestore(&card->buffer_lock, flags);
-
-			if (packet)
-				if_spi_h2c(card, packet, MVMS_CMD);
 		}
-		if (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY) {
-			/* Do we have any data packets from the host to
-			 * send? */
-			packet = NULL;
-			spin_lock_irqsave(&card->buffer_lock, flags);
-			if (!list_empty(&card->data_packet_list)) {
-				packet = (struct if_spi_packet *)(card->
-						data_packet_list.next);
-				list_del(&packet->list);
-			}
-			spin_unlock_irqrestore(&card->buffer_lock, flags);
 
-			if (packet)
-				if_spi_h2c(card, packet, MVMS_DAT);
-		}
 		if (hiStatus & IF_SPI_HIST_CARD_EVENT)
 			if_spi_e2h(card);
 
@@ -942,40 +843,18 @@ static int if_spi_host_to_card(struct lbs_private *priv,
 				u8 type, u8 *buf, u16 nb)
 {
 	int err = 0;
-	unsigned long flags;
 	struct if_spi_card *card = priv->card;
-	struct if_spi_packet *packet;
-	u16 blen;
 
 	lbs_deb_enter_args(LBS_DEB_SPI, "type %d, bytes %d", type, nb);
 
-	if (nb == 0) {
-		lbs_pr_err("%s: invalid size requested: %d\n", __func__, nb);
-		err = -EINVAL;
-		goto out;
-	}
-	blen = ALIGN(nb, 4);
-	packet = kzalloc(sizeof(struct if_spi_packet) + blen, GFP_ATOMIC);
-	if (!packet) {
-		err = -ENOMEM;
-		goto out;
-	}
-	packet->blen = blen;
-	memcpy(packet->buffer, buf, nb);
-	memset(packet->buffer + nb, 0, blen - nb);
+	nb = ALIGN(nb, 4);
 
 	switch (type) {
 	case MVMS_CMD:
-		priv->dnld_sent = DNLD_CMD_SENT;
-		spin_lock_irqsave(&card->buffer_lock, flags);
-		list_add_tail(&packet->list, &card->cmd_packet_list);
-		spin_unlock_irqrestore(&card->buffer_lock, flags);
+		err = spu_write(card, IF_SPI_CMD_RDWRPORT_REG, buf, nb);
 		break;
 	case MVMS_DAT:
-		priv->dnld_sent = DNLD_DATA_SENT;
-		spin_lock_irqsave(&card->buffer_lock, flags);
-		list_add_tail(&packet->list, &card->data_packet_list);
-		spin_unlock_irqrestore(&card->buffer_lock, flags);
+		err = spu_write(card, IF_SPI_DATA_RDWRPORT_REG, buf, nb);
 		break;
 	default:
 		lbs_pr_err("can't transfer buffer of type %d", type);
@@ -983,9 +862,6 @@ static int if_spi_host_to_card(struct lbs_private *priv,
 		break;
 	}
 
-	/* Wake up the spi thread */
-	up(&card->spi_ready);
-out:
 	lbs_deb_leave_args(LBS_DEB_SPI, "err=%d", err);
 	return err;
 }
@@ -1026,6 +902,10 @@ static int if_spi_calculate_fw_names(u16 card_id,
 		 chip_id_to_device_name[i].name);
 	return 0;
 }
+MODULE_FIRMWARE("libertas/gspi8385_hlp.bin");
+MODULE_FIRMWARE("libertas/gspi8385.bin");
+MODULE_FIRMWARE("libertas/gspi8686_hlp.bin");
+MODULE_FIRMWARE("libertas/gspi8686.bin");
 
 static int __devinit if_spi_probe(struct spi_device *spi)
 {
@@ -1062,9 +942,6 @@ static int __devinit if_spi_probe(struct spi_device *spi)
 
 	sema_init(&card->spi_ready, 0);
 	sema_init(&card->spi_thread_terminated, 0);
-	INIT_LIST_HEAD(&card->cmd_packet_list);
-	INIT_LIST_HEAD(&card->data_packet_list);
-	spin_lock_init(&card->buffer_lock);
 
 	/* Initialize the SPI Interface Unit */
 	err = spu_init(card, pdata->use_dummy_writes);
@@ -1117,6 +994,9 @@ static int __devinit if_spi_probe(struct spi_device *spi)
 	card->priv = priv;
 	priv->card = card;
 	priv->hw_host_to_card = if_spi_host_to_card;
+	priv->enter_deep_sleep = NULL;
+	priv->exit_deep_sleep = NULL;
+	priv->reset_deep_sleep_wakeup = NULL;
 	priv->fw_ready = 1;
 
 	/* Initialize interrupt handling stuff. */
@@ -1138,6 +1018,9 @@ static int __devinit if_spi_probe(struct spi_device *spi)
 		goto terminate_thread;
 	}
 
+	/* poke the IRQ handler so that we don't miss the first interrupt */
+	up(&card->spi_ready);
+
 	/* Start the card.
 	 * This will call register_netdev, and we'll start
 	 * getting interrupts... */
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index 3fac4efa5ac8..65e174595d12 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -28,6 +28,8 @@
 static char *lbs_fw_name = "usb8388.bin";
 module_param_named(fw_name, lbs_fw_name, charp, 0644);
 
+MODULE_FIRMWARE("usb8388.bin");
+
 static struct usb_device_id if_usb_table[] = {
 	/* Enter the device signature inside */
 	{ USB_DEVICE(0x1286, 0x2001) },
@@ -300,6 +302,9 @@ static int if_usb_probe(struct usb_interface *intf,
 	cardp->priv->fw_ready = 1;
 
 	priv->hw_host_to_card = if_usb_host_to_card;
+	priv->enter_deep_sleep = NULL;
+	priv->exit_deep_sleep = NULL;
+	priv->reset_deep_sleep_wakeup = NULL;
 #ifdef CONFIG_OLPC
 	if (machine_is_olpc())
 		priv->reset_card = if_usb_reset_olpc_card;
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 87b4e497faa2..db38a5a719fa 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -14,11 +14,13 @@
 #include <linux/stddef.h>
 #include <linux/ieee80211.h>
 #include <net/iw_handler.h>
+#include <net/cfg80211.h>
 
 #include "host.h"
 #include "decl.h"
 #include "dev.h"
 #include "wext.h"
+#include "cfg.h"
 #include "debugfs.h"
 #include "scan.h"
 #include "assoc.h"
@@ -43,119 +45,6 @@ module_param_named(libertas_debug, lbs_debug, int, 0644);
 struct cmd_confirm_sleep confirm_sleep;
 
 
-#define LBS_TX_PWR_DEFAULT		20	/*100mW */
-#define LBS_TX_PWR_US_DEFAULT		20	/*100mW */
-#define LBS_TX_PWR_JP_DEFAULT		16	/*50mW */
-#define LBS_TX_PWR_FR_DEFAULT		20	/*100mW */
-#define LBS_TX_PWR_EMEA_DEFAULT	20	/*100mW */
-
-/* Format { channel, frequency (MHz), maxtxpower } */
-/* band: 'B/G', region: USA FCC/Canada IC */
-static struct chan_freq_power channel_freq_power_US_BG[] = {
-	{1, 2412, LBS_TX_PWR_US_DEFAULT},
-	{2, 2417, LBS_TX_PWR_US_DEFAULT},
-	{3, 2422, LBS_TX_PWR_US_DEFAULT},
-	{4, 2427, LBS_TX_PWR_US_DEFAULT},
-	{5, 2432, LBS_TX_PWR_US_DEFAULT},
-	{6, 2437, LBS_TX_PWR_US_DEFAULT},
-	{7, 2442, LBS_TX_PWR_US_DEFAULT},
-	{8, 2447, LBS_TX_PWR_US_DEFAULT},
-	{9, 2452, LBS_TX_PWR_US_DEFAULT},
-	{10, 2457, LBS_TX_PWR_US_DEFAULT},
-	{11, 2462, LBS_TX_PWR_US_DEFAULT}
-};
-
-/* band: 'B/G', region: Europe ETSI */
-static struct chan_freq_power channel_freq_power_EU_BG[] = {
-	{1, 2412, LBS_TX_PWR_EMEA_DEFAULT},
-	{2, 2417, LBS_TX_PWR_EMEA_DEFAULT},
-	{3, 2422, LBS_TX_PWR_EMEA_DEFAULT},
-	{4, 2427, LBS_TX_PWR_EMEA_DEFAULT},
-	{5, 2432, LBS_TX_PWR_EMEA_DEFAULT},
-	{6, 2437, LBS_TX_PWR_EMEA_DEFAULT},
-	{7, 2442, LBS_TX_PWR_EMEA_DEFAULT},
-	{8, 2447, LBS_TX_PWR_EMEA_DEFAULT},
-	{9, 2452, LBS_TX_PWR_EMEA_DEFAULT},
-	{10, 2457, LBS_TX_PWR_EMEA_DEFAULT},
-	{11, 2462, LBS_TX_PWR_EMEA_DEFAULT},
-	{12, 2467, LBS_TX_PWR_EMEA_DEFAULT},
-	{13, 2472, LBS_TX_PWR_EMEA_DEFAULT}
-};
-
-/* band: 'B/G', region: Spain */
-static struct chan_freq_power channel_freq_power_SPN_BG[] = {
-	{10, 2457, LBS_TX_PWR_DEFAULT},
-	{11, 2462, LBS_TX_PWR_DEFAULT}
-};
-
-/* band: 'B/G', region: France */
-static struct chan_freq_power channel_freq_power_FR_BG[] = {
-	{10, 2457, LBS_TX_PWR_FR_DEFAULT},
-	{11, 2462, LBS_TX_PWR_FR_DEFAULT},
-	{12, 2467, LBS_TX_PWR_FR_DEFAULT},
-	{13, 2472, LBS_TX_PWR_FR_DEFAULT}
-};
-
-/* band: 'B/G', region: Japan */
-static struct chan_freq_power channel_freq_power_JPN_BG[] = {
-	{1, 2412, LBS_TX_PWR_JP_DEFAULT},
-	{2, 2417, LBS_TX_PWR_JP_DEFAULT},
-	{3, 2422, LBS_TX_PWR_JP_DEFAULT},
-	{4, 2427, LBS_TX_PWR_JP_DEFAULT},
-	{5, 2432, LBS_TX_PWR_JP_DEFAULT},
-	{6, 2437, LBS_TX_PWR_JP_DEFAULT},
-	{7, 2442, LBS_TX_PWR_JP_DEFAULT},
-	{8, 2447, LBS_TX_PWR_JP_DEFAULT},
-	{9, 2452, LBS_TX_PWR_JP_DEFAULT},
-	{10, 2457, LBS_TX_PWR_JP_DEFAULT},
-	{11, 2462, LBS_TX_PWR_JP_DEFAULT},
-	{12, 2467, LBS_TX_PWR_JP_DEFAULT},
-	{13, 2472, LBS_TX_PWR_JP_DEFAULT},
-	{14, 2484, LBS_TX_PWR_JP_DEFAULT}
-};
-
-/**
- * the structure for channel, frequency and power
- */
-struct region_cfp_table {
-	u8 region;
-	struct chan_freq_power *cfp_BG;
-	int cfp_no_BG;
-};
-
-/**
- * the structure for the mapping between region and CFP
- */
-static struct region_cfp_table region_cfp_table[] = {
-	{0x10,			/*US FCC */
-	 channel_freq_power_US_BG,
-	 ARRAY_SIZE(channel_freq_power_US_BG),
-	 }
-	,
-	{0x20,			/*CANADA IC */
-	 channel_freq_power_US_BG,
-	 ARRAY_SIZE(channel_freq_power_US_BG),
-	 }
-	,
-	{0x30, /*EU*/ channel_freq_power_EU_BG,
-	 ARRAY_SIZE(channel_freq_power_EU_BG),
-	 }
-	,
-	{0x31, /*SPAIN*/ channel_freq_power_SPN_BG,
-	 ARRAY_SIZE(channel_freq_power_SPN_BG),
-	 }
-	,
-	{0x32, /*FRANCE*/ channel_freq_power_FR_BG,
-	 ARRAY_SIZE(channel_freq_power_FR_BG),
-	 }
-	,
-	{0x40, /*JAPAN*/ channel_freq_power_JPN_BG,
-	 ARRAY_SIZE(channel_freq_power_JPN_BG),
-	 }
-	,
-/*Add new region here */
-};
-
 /**
  * the table to keep region code
  */
@@ -163,13 +52,6 @@ u16 lbs_region_code_to_index[MRVDRV_MAX_REGION_CODE] =
     { 0x10, 0x20, 0x30, 0x31, 0x32, 0x40 };
 
 /**
- * 802.11b/g supported bitrates (in 500Kb/s units)
- */
-u8 lbs_bg_rates[MAX_RATES] =
-    { 0x02, 0x04, 0x0b, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c,
-0x00, 0x00 };
-
-/**
  * FW rate table.  FW refers to rates by their index in this table, not by the
  * rate value itself.  Values of 0x00 are
  * reserved positions.
@@ -212,107 +94,9 @@ u8 lbs_data_rate_to_fw_index(u32 rate)
 	return 0;
 }
 
-/**
- * Attributes exported through sysfs
- */
-
-/**
- * @brief Get function for sysfs attribute anycast_mask
- */
-static ssize_t lbs_anycast_get(struct device *dev,
-		struct device_attribute *attr, char * buf)
-{
-	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
-	struct cmd_ds_mesh_access mesh_access;
-	int ret;
-
-	memset(&mesh_access, 0, sizeof(mesh_access));
-
-	ret = lbs_mesh_access(priv, CMD_ACT_MESH_GET_ANYCAST, &mesh_access);
-	if (ret)
-		return ret;
-
-	return snprintf(buf, 12, "0x%X\n", le32_to_cpu(mesh_access.data[0]));
-}
-
-/**
- * @brief Set function for sysfs attribute anycast_mask
- */
-static ssize_t lbs_anycast_set(struct device *dev,
-		struct device_attribute *attr, const char * buf, size_t count)
-{
-	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
-	struct cmd_ds_mesh_access mesh_access;
-	uint32_t datum;
-	int ret;
-
-	memset(&mesh_access, 0, sizeof(mesh_access));
-	sscanf(buf, "%x", &datum);
-	mesh_access.data[0] = cpu_to_le32(datum);
-
-	ret = lbs_mesh_access(priv, CMD_ACT_MESH_SET_ANYCAST, &mesh_access);
-	if (ret)
-		return ret;
-
-	return strlen(buf);
-}
-
-/**
- * @brief Get function for sysfs attribute prb_rsp_limit
- */
-static ssize_t lbs_prb_rsp_limit_get(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
-	struct cmd_ds_mesh_access mesh_access;
-	int ret;
-	u32 retry_limit;
-
-	memset(&mesh_access, 0, sizeof(mesh_access));
-	mesh_access.data[0] = cpu_to_le32(CMD_ACT_GET);
-
-	ret = lbs_mesh_access(priv, CMD_ACT_MESH_SET_GET_PRB_RSP_LIMIT,
-			&mesh_access);
-	if (ret)
-		return ret;
-
-	retry_limit = le32_to_cpu(mesh_access.data[1]);
-	return snprintf(buf, 10, "%d\n", retry_limit);
-}
-
-/**
- * @brief Set function for sysfs attribute prb_rsp_limit
- */
-static ssize_t lbs_prb_rsp_limit_set(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t count)
-{
-	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
-	struct cmd_ds_mesh_access mesh_access;
-	int ret;
-	unsigned long retry_limit;
-
-	memset(&mesh_access, 0, sizeof(mesh_access));
-	mesh_access.data[0] = cpu_to_le32(CMD_ACT_SET);
-
-	if (!strict_strtoul(buf, 10, &retry_limit))
-		return -ENOTSUPP;
-	if (retry_limit > 15)
-		return -ENOTSUPP;
-
-	mesh_access.data[1] = cpu_to_le32(retry_limit);
-
-	ret = lbs_mesh_access(priv, CMD_ACT_MESH_SET_GET_PRB_RSP_LIMIT,
-			&mesh_access);
-	if (ret)
-		return ret;
-
-	return strlen(buf);
-}
 
 static int lbs_add_rtap(struct lbs_private *priv);
 static void lbs_remove_rtap(struct lbs_private *priv);
-static int lbs_add_mesh(struct lbs_private *priv);
-static void lbs_remove_mesh(struct lbs_private *priv);
 
 
 /**
@@ -378,74 +162,7 @@ static ssize_t lbs_rtap_set(struct device *dev,
 static DEVICE_ATTR(lbs_rtap, 0644, lbs_rtap_get, lbs_rtap_set );
 
 /**
- * Get function for sysfs attribute mesh
- */
-static ssize_t lbs_mesh_get(struct device *dev,
-		struct device_attribute *attr, char * buf)
-{
-	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
-	return snprintf(buf, 5, "0x%X\n", !!priv->mesh_dev);
-}
-
-/**
- *  Set function for sysfs attribute mesh
- */
-static ssize_t lbs_mesh_set(struct device *dev,
-		struct device_attribute *attr, const char * buf, size_t count)
-{
-	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
-	int enable;
-	int ret, action = CMD_ACT_MESH_CONFIG_STOP;
-
-	sscanf(buf, "%x", &enable);
-	enable = !!enable;
-	if (enable == !!priv->mesh_dev)
-		return count;
-	if (enable)
-		action = CMD_ACT_MESH_CONFIG_START;
-	ret = lbs_mesh_config(priv, action, priv->curbssparams.channel);
-	if (ret)
-		return ret;
-
-	if (enable)
-		lbs_add_mesh(priv);
-	else
-		lbs_remove_mesh(priv);
-
-	return count;
-}
-
-/**
- * lbs_mesh attribute to be exported per ethX interface
- * through sysfs (/sys/class/net/ethX/lbs_mesh)
- */
-static DEVICE_ATTR(lbs_mesh, 0644, lbs_mesh_get, lbs_mesh_set);
-
-/**
- * anycast_mask attribute to be exported per mshX interface
- * through sysfs (/sys/class/net/mshX/anycast_mask)
- */
-static DEVICE_ATTR(anycast_mask, 0644, lbs_anycast_get, lbs_anycast_set);
-
-/**
- * prb_rsp_limit attribute to be exported per mshX interface
- * through sysfs (/sys/class/net/mshX/prb_rsp_limit)
- */
-static DEVICE_ATTR(prb_rsp_limit, 0644, lbs_prb_rsp_limit_get,
-		lbs_prb_rsp_limit_set);
-
-static struct attribute *lbs_mesh_sysfs_entries[] = {
-	&dev_attr_anycast_mask.attr,
-	&dev_attr_prb_rsp_limit.attr,
-	NULL,
-};
-
-static struct attribute_group lbs_mesh_attr_group = {
-	.attrs = lbs_mesh_sysfs_entries,
-};
-
-/**
- *  @brief This function opens the ethX or mshX interface
+ *  @brief This function opens the ethX interface
  *
  *  @param dev     A pointer to net_device structure
  *  @return 	   0 or -EBUSY if monitor mode active
@@ -464,18 +181,12 @@ static int lbs_dev_open(struct net_device *dev)
 		goto out;
 	}
 
-	if (dev == priv->mesh_dev) {
-		priv->mesh_open = 1;
-		priv->mesh_connect_status = LBS_CONNECTED;
-		netif_carrier_on(dev);
-	} else {
-		priv->infra_open = 1;
+	priv->infra_open = 1;
 
-		if (priv->connect_status == LBS_CONNECTED)
-			netif_carrier_on(dev);
-		else
-			netif_carrier_off(dev);
-	}
+	if (priv->connect_status == LBS_CONNECTED)
+		netif_carrier_on(dev);
+	else
+		netif_carrier_off(dev);
 
 	if (!priv->tx_pending_len)
 		netif_wake_queue(dev);
@@ -487,33 +198,6 @@ static int lbs_dev_open(struct net_device *dev)
 }
 
 /**
- *  @brief This function closes the mshX interface
- *
- *  @param dev     A pointer to net_device structure
- *  @return 	   0
- */
-static int lbs_mesh_stop(struct net_device *dev)
-{
-	struct lbs_private *priv = dev->ml_priv;
-
-	lbs_deb_enter(LBS_DEB_MESH);
-	spin_lock_irq(&priv->driver_lock);
-
-	priv->mesh_open = 0;
-	priv->mesh_connect_status = LBS_DISCONNECTED;
-
-	netif_stop_queue(dev);
-	netif_carrier_off(dev);
-
-	spin_unlock_irq(&priv->driver_lock);
-
-	schedule_work(&priv->mcast_work);
-
-	lbs_deb_leave(LBS_DEB_MESH);
-	return 0;
-}
-
-/**
  *  @brief This function closes the ethX interface
  *
  *  @param dev     A pointer to net_device structure
@@ -574,15 +258,17 @@ void lbs_host_to_card_done(struct lbs_private *priv)
 	priv->dnld_sent = DNLD_RES_RECEIVED;
 
 	/* Wake main thread if commands are pending */
-	if (!priv->cur_cmd || priv->tx_pending_len > 0)
-		wake_up_interruptible(&priv->waitq);
+	if (!priv->cur_cmd || priv->tx_pending_len > 0) {
+		if (!priv->wakeup_dev_required)
+			wake_up_interruptible(&priv->waitq);
+	}
 
 	spin_unlock_irqrestore(&priv->driver_lock, flags);
 	lbs_deb_leave(LBS_DEB_THREAD);
 }
 EXPORT_SYMBOL_GPL(lbs_host_to_card_done);
 
-static int lbs_set_mac_address(struct net_device *dev, void *addr)
+int lbs_set_mac_address(struct net_device *dev, void *addr)
 {
 	int ret = 0;
 	struct lbs_private *priv = dev->ml_priv;
@@ -716,7 +402,7 @@ static void lbs_set_mcast_worker(struct work_struct *work)
 	lbs_deb_leave(LBS_DEB_NET);
 }
 
-static void lbs_set_multicast_list(struct net_device *dev)
+void lbs_set_multicast_list(struct net_device *dev)
 {
 	struct lbs_private *priv = dev->ml_priv;
 
@@ -770,7 +456,8 @@ static int lbs_thread(void *data)
 			shouldsleep = 0;	/* We have a command response */
 		else if (priv->cur_cmd)
 			shouldsleep = 1;	/* Can't send a command; one already running */
-		else if (!list_empty(&priv->cmdpendingq))
+		else if (!list_empty(&priv->cmdpendingq) &&
+					!(priv->wakeup_dev_required))
 			shouldsleep = 0;	/* We have a command to send */
 		else if (__kfifo_len(priv->event_fifo))
 			shouldsleep = 0;	/* We have an event to process */
@@ -822,6 +509,26 @@ static int lbs_thread(void *data)
 		}
 		spin_unlock_irq(&priv->driver_lock);
 
+		/* Process hardware events, e.g. card removed, link lost */
+		spin_lock_irq(&priv->driver_lock);
+		while (__kfifo_len(priv->event_fifo)) {
+			u32 event;
+			__kfifo_get(priv->event_fifo, (unsigned char *) &event,
+				sizeof(event));
+			spin_unlock_irq(&priv->driver_lock);
+			lbs_process_event(priv, event);
+			spin_lock_irq(&priv->driver_lock);
+		}
+		spin_unlock_irq(&priv->driver_lock);
+
+		if (priv->wakeup_dev_required) {
+			lbs_deb_thread("Waking up device...\n");
+			/* Wake up device */
+			if (priv->exit_deep_sleep(priv))
+				lbs_deb_thread("Wakeup device failed\n");
+			continue;
+		}
+
 		/* command timeout stuff */
 		if (priv->cmd_timed_out && priv->cur_cmd) {
 			struct cmd_ctrl_node *cmdnode = priv->cur_cmd;
@@ -849,18 +556,7 @@ static int lbs_thread(void *data)
 		}
 		priv->cmd_timed_out = 0;
 
-		/* Process hardware events, e.g. card removed, link lost */
-		spin_lock_irq(&priv->driver_lock);
-		while (__kfifo_len(priv->event_fifo)) {
-			u32 event;
 
-			__kfifo_get(priv->event_fifo, (unsigned char *) &event,
-				sizeof(event));
-			spin_unlock_irq(&priv->driver_lock);
-			lbs_process_event(priv, event);
-			spin_lock_irq(&priv->driver_lock);
-		}
-		spin_unlock_irq(&priv->driver_lock);
 
 		if (!priv->fw_ready)
 			continue;
@@ -894,6 +590,9 @@ static int lbs_thread(void *data)
 		    (priv->psstate == PS_STATE_PRE_SLEEP))
 			continue;
 
+		if (priv->is_deep_sleep)
+			continue;
+
 		/* Execute the next command */
 		if (!priv->dnld_sent && !priv->cur_cmd)
 			lbs_execute_next_command(priv);
@@ -928,6 +627,7 @@ static int lbs_thread(void *data)
 	}
 
 	del_timer(&priv->command_timer);
+	del_timer(&priv->auto_deepsleep_timer);
 	wake_up_all(&priv->cmd_pending);
 
 	lbs_deb_leave(LBS_DEB_THREAD);
@@ -1050,6 +750,62 @@ out:
 	lbs_deb_leave(LBS_DEB_CMD);
 }
 
+/**
+ *  This function put the device back to deep sleep mode when timer expires
+ *  and no activity (command, event, data etc.) is detected.
+ */
+static void auto_deepsleep_timer_fn(unsigned long data)
+{
+	struct lbs_private *priv = (struct lbs_private *)data;
+	int ret;
+
+	lbs_deb_enter(LBS_DEB_CMD);
+
+	if (priv->is_activity_detected) {
+		priv->is_activity_detected = 0;
+	} else {
+		if (priv->is_auto_deep_sleep_enabled &&
+				(!priv->wakeup_dev_required) &&
+				(priv->connect_status != LBS_CONNECTED)) {
+			lbs_deb_main("Entering auto deep sleep mode...\n");
+			ret = lbs_prepare_and_send_command(priv,
+					CMD_802_11_DEEP_SLEEP, 0,
+					0, 0, NULL);
+			if (ret)
+				lbs_pr_err("Enter Deep Sleep command failed\n");
+		}
+	}
+	mod_timer(&priv->auto_deepsleep_timer , jiffies +
+				(priv->auto_deep_sleep_timeout * HZ)/1000);
+	lbs_deb_leave(LBS_DEB_CMD);
+}
+
+int lbs_enter_auto_deep_sleep(struct lbs_private *priv)
+{
+	lbs_deb_enter(LBS_DEB_SDIO);
+
+	priv->is_auto_deep_sleep_enabled = 1;
+	if (priv->is_deep_sleep)
+		priv->wakeup_dev_required = 1;
+	mod_timer(&priv->auto_deepsleep_timer ,
+			jiffies + (priv->auto_deep_sleep_timeout * HZ)/1000);
+
+	lbs_deb_leave(LBS_DEB_SDIO);
+	return 0;
+}
+
+int lbs_exit_auto_deep_sleep(struct lbs_private *priv)
+{
+	lbs_deb_enter(LBS_DEB_SDIO);
+
+	priv->is_auto_deep_sleep_enabled = 0;
+	priv->auto_deep_sleep_timeout = 0;
+	del_timer(&priv->auto_deepsleep_timer);
+
+	lbs_deb_leave(LBS_DEB_SDIO);
+	return 0;
+}
+
 static void lbs_sync_channel_worker(struct work_struct *work)
 {
 	struct lbs_private *priv = container_of(work, struct lbs_private,
@@ -1092,18 +848,24 @@ static int lbs_init_adapter(struct lbs_private *priv)
 	priv->mesh_connect_status = LBS_DISCONNECTED;
 	priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
 	priv->mode = IW_MODE_INFRA;
-	priv->curbssparams.channel = DEFAULT_AD_HOC_CHANNEL;
+	priv->channel = DEFAULT_AD_HOC_CHANNEL;
 	priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON;
 	priv->radio_on = 1;
 	priv->enablehwauto = 1;
 	priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE;
 	priv->psmode = LBS802_11POWERMODECAM;
 	priv->psstate = PS_STATE_FULL_POWER;
+	priv->is_deep_sleep = 0;
+	priv->is_auto_deep_sleep_enabled = 0;
+	priv->wakeup_dev_required = 0;
+	init_waitqueue_head(&priv->ds_awake_q);
 
 	mutex_init(&priv->lock);
 
 	setup_timer(&priv->command_timer, command_timer_fn,
 		(unsigned long)priv);
+	setup_timer(&priv->auto_deepsleep_timer, auto_deepsleep_timer_fn,
+			(unsigned long)priv);
 
 	INIT_LIST_HEAD(&priv->cmdfreeq);
 	INIT_LIST_HEAD(&priv->cmdpendingq);
@@ -1142,6 +904,7 @@ static void lbs_free_adapter(struct lbs_private *priv)
 	if (priv->event_fifo)
 		kfifo_free(priv->event_fifo);
 	del_timer(&priv->command_timer);
+	del_timer(&priv->auto_deepsleep_timer);
 	kfree(priv->networks);
 	priv->networks = NULL;
 
@@ -1168,31 +931,41 @@ static const struct net_device_ops lbs_netdev_ops = {
  */
 struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
 {
-	struct net_device *dev = NULL;
+	struct net_device *dev;
+	struct wireless_dev *wdev;
 	struct lbs_private *priv = NULL;
 
 	lbs_deb_enter(LBS_DEB_MAIN);
 
 	/* Allocate an Ethernet device and register it */
-	dev = alloc_etherdev(sizeof(struct lbs_private));
-	if (!dev) {
-		lbs_pr_err("init wlanX device failed\n");
+	wdev = lbs_cfg_alloc(dmdev);
+	if (IS_ERR(wdev)) {
+		lbs_pr_err("cfg80211 init failed\n");
 		goto done;
 	}
-	priv = netdev_priv(dev);
-	dev->ml_priv = priv;
+	/* TODO? */
+	wdev->iftype = NL80211_IFTYPE_STATION;
+	priv = wdev_priv(wdev);
+	priv->wdev = wdev;
 
 	if (lbs_init_adapter(priv)) {
 		lbs_pr_err("failed to initialize adapter structure.\n");
-		goto err_init_adapter;
+		goto err_wdev;
 	}
 
+	//TODO? dev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES);
+	dev = alloc_netdev(0, "wlan%d", ether_setup);
+	if (!dev) {
+		dev_err(dmdev, "no memory for network device instance\n");
+		goto err_adapter;
+	}
+
+	dev->ieee80211_ptr = wdev;
+	dev->ml_priv = priv;
+	SET_NETDEV_DEV(dev, dmdev);
+	wdev->netdev = dev;
 	priv->dev = dev;
-	priv->card = card;
-	priv->mesh_open = 0;
-	priv->infra_open = 0;
 
-	/* Setup the OS Interface to our functions */
  	dev->netdev_ops = &lbs_netdev_ops;
 	dev->watchdog_timeo = 5 * HZ;
 	dev->ethtool_ops = &lbs_ethtool_ops;
@@ -1201,7 +974,13 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
 #endif
 	dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
 
-	SET_NETDEV_DEV(dev, dmdev);
+
+	// TODO: kzalloc + iwm_init_default_profile(iwm, iwm->umac_profile); ??
+
+
+	priv->card = card;
+	priv->infra_open = 0;
+
 
 	priv->rtap_net_dev = NULL;
 	strcpy(dev->name, "wlan%d");
@@ -1211,7 +990,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
 	priv->main_thread = kthread_run(lbs_thread, dev, "lbs_main");
 	if (IS_ERR(priv->main_thread)) {
 		lbs_deb_thread("Error creating main thread.\n");
-		goto err_init_adapter;
+		goto err_ndev;
 	}
 
 	priv->work_thread = create_singlethread_workqueue("lbs_worker");
@@ -1220,6 +999,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
 	INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker);
 	INIT_WORK(&priv->sync_channel, lbs_sync_channel_worker);
 
+	priv->mesh_open = 0;
 	sprintf(priv->mesh_ssid, "mesh");
 	priv->mesh_ssid_len = 4;
 
@@ -1228,9 +1008,15 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
 
 	goto done;
 
-err_init_adapter:
-	lbs_free_adapter(priv);
+ err_ndev:
 	free_netdev(dev);
+
+ err_adapter:
+	lbs_free_adapter(priv);
+
+ err_wdev:
+	lbs_cfg_free(priv);
+
 	priv = NULL;
 
 done:
@@ -1243,7 +1029,6 @@ EXPORT_SYMBOL_GPL(lbs_add_card);
 void lbs_remove_card(struct lbs_private *priv)
 {
 	struct net_device *dev = priv->dev;
-	union iwreq_data wrqu;
 
 	lbs_deb_enter(LBS_DEB_MAIN);
 
@@ -1268,15 +1053,19 @@ void lbs_remove_card(struct lbs_private *priv)
 		lbs_ps_wakeup(priv, CMD_OPTION_WAITFORRSP);
 	}
 
-	memset(wrqu.ap_addr.sa_data, 0xaa, ETH_ALEN);
-	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
-	wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
+	lbs_send_disconnect_notification(priv);
+
+	if (priv->is_deep_sleep) {
+		priv->is_deep_sleep = 0;
+		wake_up_interruptible(&priv->ds_awake_q);
+	}
 
 	/* Stop the thread servicing the interrupts */
 	priv->surpriseremoved = 1;
 	kthread_stop(priv->main_thread);
 
 	lbs_free_adapter(priv);
+	lbs_cfg_free(priv);
 
 	priv->dev = NULL;
 	free_netdev(dev);
@@ -1298,60 +1087,19 @@ int lbs_start_card(struct lbs_private *priv)
 	if (ret)
 		goto done;
 
-	/* init 802.11d */
-	lbs_init_11d(priv);
-
-	if (register_netdev(dev)) {
-		lbs_pr_err("cannot register ethX device\n");
+	if (lbs_cfg_register(priv)) {
+		lbs_pr_err("cannot register device\n");
 		goto done;
 	}
 
 	lbs_update_channel(priv);
 
-	/* Check mesh FW version and appropriately send the mesh start
-	 * command
+	/*
+	 * While rtap isn't related to mesh, only mesh-enabled
+	 * firmware implements the rtap functionality via
+	 * CMD_802_11_MONITOR_MODE.
 	 */
-	if (priv->mesh_fw_ver == MESH_FW_OLD) {
-		/* Enable mesh, if supported, and work out which TLV it uses.
-		   0x100 + 291 is an unofficial value used in 5.110.20.pXX
-		   0x100 + 37 is the official value used in 5.110.21.pXX
-		   but we check them in that order because 20.pXX doesn't
-		   give an error -- it just silently fails. */
-
-		/* 5.110.20.pXX firmware will fail the command if the channel
-		   doesn't match the existing channel. But only if the TLV
-		   is correct. If the channel is wrong, _BOTH_ versions will
-		   give an error to 0x100+291, and allow 0x100+37 to succeed.
-		   It's just that 5.110.20.pXX will not have done anything
-		   useful */
-
-		priv->mesh_tlv = TLV_TYPE_OLD_MESH_ID;
-		if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
-				    priv->curbssparams.channel)) {
-			priv->mesh_tlv = TLV_TYPE_MESH_ID;
-			if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
-					    priv->curbssparams.channel))
-				priv->mesh_tlv = 0;
-		}
-	} else if (priv->mesh_fw_ver == MESH_FW_NEW) {
-		/* 10.0.0.pXX new firmwares should succeed with TLV
-		 * 0x100+37; Do not invoke command with old TLV.
-		 */
-		priv->mesh_tlv = TLV_TYPE_MESH_ID;
-		if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
-				    priv->curbssparams.channel))
-			priv->mesh_tlv = 0;
-	}
-	if (priv->mesh_tlv) {
-		lbs_add_mesh(priv);
-
-		if (device_create_file(&dev->dev, &dev_attr_lbs_mesh))
-			lbs_pr_err("cannot register lbs_mesh attribute\n");
-
-		/* While rtap isn't related to mesh, only mesh-enabled
-		 * firmware implements the rtap functionality via
-		 * CMD_802_11_MONITOR_MODE.
-		 */
+	if (lbs_init_mesh(priv)) {
 		if (device_create_file(&dev->dev, &dev_attr_lbs_rtap))
 			lbs_pr_err("cannot register lbs_rtap attribute\n");
 	}
@@ -1385,13 +1133,12 @@ void lbs_stop_card(struct lbs_private *priv)
 	netif_carrier_off(dev);
 
 	lbs_debugfs_remove_one(priv);
-	if (priv->mesh_tlv) {
-		device_remove_file(&dev->dev, &dev_attr_lbs_mesh);
+	if (lbs_deinit_mesh(priv))
 		device_remove_file(&dev->dev, &dev_attr_lbs_rtap);
-	}
 
 	/* Delete the timeout of the currently processing command */
 	del_timer_sync(&priv->command_timer);
+	del_timer_sync(&priv->auto_deepsleep_timer);
 
 	/* Flush pending command nodes */
 	spin_lock_irqsave(&priv->driver_lock, flags);
@@ -1420,157 +1167,6 @@ out:
 EXPORT_SYMBOL_GPL(lbs_stop_card);
 
 
-static const struct net_device_ops mesh_netdev_ops = {
-	.ndo_open		= lbs_dev_open,
-	.ndo_stop 		= lbs_mesh_stop,
-	.ndo_start_xmit		= lbs_hard_start_xmit,
-	.ndo_set_mac_address	= lbs_set_mac_address,
-	.ndo_set_multicast_list = lbs_set_multicast_list,
-};
-
-/**
- * @brief This function adds mshX interface
- *
- *  @param priv    A pointer to the struct lbs_private structure
- *  @return 	   0 if successful, -X otherwise
- */
-static int lbs_add_mesh(struct lbs_private *priv)
-{
-	struct net_device *mesh_dev = NULL;
-	int ret = 0;
-
-	lbs_deb_enter(LBS_DEB_MESH);
-
-	/* Allocate a virtual mesh device */
-	if (!(mesh_dev = alloc_netdev(0, "msh%d", ether_setup))) {
-		lbs_deb_mesh("init mshX device failed\n");
-		ret = -ENOMEM;
-		goto done;
-	}
-	mesh_dev->ml_priv = priv;
-	priv->mesh_dev = mesh_dev;
-
-	mesh_dev->netdev_ops = &mesh_netdev_ops;
-	mesh_dev->ethtool_ops = &lbs_ethtool_ops;
-	memcpy(mesh_dev->dev_addr, priv->dev->dev_addr,
-			sizeof(priv->dev->dev_addr));
-
-	SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent);
-
-#ifdef	WIRELESS_EXT
-	mesh_dev->wireless_handlers = (struct iw_handler_def *)&mesh_handler_def;
-#endif
-	mesh_dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
-	/* Register virtual mesh interface */
-	ret = register_netdev(mesh_dev);
-	if (ret) {
-		lbs_pr_err("cannot register mshX virtual interface\n");
-		goto err_free;
-	}
-
-	ret = sysfs_create_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group);
-	if (ret)
-		goto err_unregister;
-
-	lbs_persist_config_init(mesh_dev);
-
-	/* Everything successful */
-	ret = 0;
-	goto done;
-
-err_unregister:
-	unregister_netdev(mesh_dev);
-
-err_free:
-	free_netdev(mesh_dev);
-
-done:
-	lbs_deb_leave_args(LBS_DEB_MESH, "ret %d", ret);
-	return ret;
-}
-
-static void lbs_remove_mesh(struct lbs_private *priv)
-{
-	struct net_device *mesh_dev;
-
-
-	mesh_dev = priv->mesh_dev;
-	if (!mesh_dev)
-		return;
-
-	lbs_deb_enter(LBS_DEB_MESH);
-	netif_stop_queue(mesh_dev);
-	netif_carrier_off(mesh_dev);
-	sysfs_remove_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group);
-	lbs_persist_config_remove(mesh_dev);
-	unregister_netdev(mesh_dev);
-	priv->mesh_dev = NULL;
-	free_netdev(mesh_dev);
-	lbs_deb_leave(LBS_DEB_MESH);
-}
-
-/**
- *  @brief This function finds the CFP in
- *  region_cfp_table based on region and band parameter.
- *
- *  @param region  The region code
- *  @param band	   The band
- *  @param cfp_no  A pointer to CFP number
- *  @return 	   A pointer to CFP
- */
-struct chan_freq_power *lbs_get_region_cfp_table(u8 region, int *cfp_no)
-{
-	int i, end;
-
-	lbs_deb_enter(LBS_DEB_MAIN);
-
-	end = ARRAY_SIZE(region_cfp_table);
-
-	for (i = 0; i < end ; i++) {
-		lbs_deb_main("region_cfp_table[i].region=%d\n",
-			region_cfp_table[i].region);
-		if (region_cfp_table[i].region == region) {
-			*cfp_no = region_cfp_table[i].cfp_no_BG;
-			lbs_deb_leave(LBS_DEB_MAIN);
-			return region_cfp_table[i].cfp_BG;
-		}
-	}
-
-	lbs_deb_leave_args(LBS_DEB_MAIN, "ret NULL");
-	return NULL;
-}
-
-int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band)
-{
-	int ret = 0;
-	int i = 0;
-
-	struct chan_freq_power *cfp;
-	int cfp_no;
-
-	lbs_deb_enter(LBS_DEB_MAIN);
-
-	memset(priv->region_channel, 0, sizeof(priv->region_channel));
-
-	cfp = lbs_get_region_cfp_table(region, &cfp_no);
-	if (cfp != NULL) {
-		priv->region_channel[i].nrcfp = cfp_no;
-		priv->region_channel[i].CFP = cfp;
-	} else {
-		lbs_deb_main("wrong region code %#x in band B/G\n",
-		       region);
-		ret = -1;
-		goto out;
-	}
-	priv->region_channel[i].valid = 1;
-	priv->region_channel[i].region = region;
-	priv->region_channel[i].band = band;
-	i++;
-out:
-	lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret);
-	return ret;
-}
-
 void lbs_queue_event(struct lbs_private *priv, u32 event)
 {
 	unsigned long flags;
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
new file mode 100644
index 000000000000..2f91c9b808af
--- /dev/null
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -0,0 +1,1141 @@
+#include <linux/moduleparam.h>
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/kthread.h>
+#include <linux/kfifo.h>
+
+#include "mesh.h"
+#include "decl.h"
+#include "cmd.h"
+
+
+/***************************************************************************
+ * Mesh sysfs support
+ */
+
+/**
+ * Attributes exported through sysfs
+ */
+
+/**
+ * @brief Get function for sysfs attribute anycast_mask
+ */
+static ssize_t lbs_anycast_get(struct device *dev,
+		struct device_attribute *attr, char * buf)
+{
+	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
+	struct cmd_ds_mesh_access mesh_access;
+	int ret;
+
+	memset(&mesh_access, 0, sizeof(mesh_access));
+
+	ret = lbs_mesh_access(priv, CMD_ACT_MESH_GET_ANYCAST, &mesh_access);
+	if (ret)
+		return ret;
+
+	return snprintf(buf, 12, "0x%X\n", le32_to_cpu(mesh_access.data[0]));
+}
+
+/**
+ * @brief Set function for sysfs attribute anycast_mask
+ */
+static ssize_t lbs_anycast_set(struct device *dev,
+		struct device_attribute *attr, const char * buf, size_t count)
+{
+	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
+	struct cmd_ds_mesh_access mesh_access;
+	uint32_t datum;
+	int ret;
+
+	memset(&mesh_access, 0, sizeof(mesh_access));
+	sscanf(buf, "%x", &datum);
+	mesh_access.data[0] = cpu_to_le32(datum);
+
+	ret = lbs_mesh_access(priv, CMD_ACT_MESH_SET_ANYCAST, &mesh_access);
+	if (ret)
+		return ret;
+
+	return strlen(buf);
+}
+
+/**
+ * @brief Get function for sysfs attribute prb_rsp_limit
+ */
+static ssize_t lbs_prb_rsp_limit_get(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
+	struct cmd_ds_mesh_access mesh_access;
+	int ret;
+	u32 retry_limit;
+
+	memset(&mesh_access, 0, sizeof(mesh_access));
+	mesh_access.data[0] = cpu_to_le32(CMD_ACT_GET);
+
+	ret = lbs_mesh_access(priv, CMD_ACT_MESH_SET_GET_PRB_RSP_LIMIT,
+			&mesh_access);
+	if (ret)
+		return ret;
+
+	retry_limit = le32_to_cpu(mesh_access.data[1]);
+	return snprintf(buf, 10, "%d\n", retry_limit);
+}
+
+/**
+ * @brief Set function for sysfs attribute prb_rsp_limit
+ */
+static ssize_t lbs_prb_rsp_limit_set(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
+	struct cmd_ds_mesh_access mesh_access;
+	int ret;
+	unsigned long retry_limit;
+
+	memset(&mesh_access, 0, sizeof(mesh_access));
+	mesh_access.data[0] = cpu_to_le32(CMD_ACT_SET);
+
+	if (!strict_strtoul(buf, 10, &retry_limit))
+		return -ENOTSUPP;
+	if (retry_limit > 15)
+		return -ENOTSUPP;
+
+	mesh_access.data[1] = cpu_to_le32(retry_limit);
+
+	ret = lbs_mesh_access(priv, CMD_ACT_MESH_SET_GET_PRB_RSP_LIMIT,
+			&mesh_access);
+	if (ret)
+		return ret;
+
+	return strlen(buf);
+}
+
+/**
+ * Get function for sysfs attribute mesh
+ */
+static ssize_t lbs_mesh_get(struct device *dev,
+		struct device_attribute *attr, char * buf)
+{
+	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
+	return snprintf(buf, 5, "0x%X\n", !!priv->mesh_dev);
+}
+
+/**
+ *  Set function for sysfs attribute mesh
+ */
+static ssize_t lbs_mesh_set(struct device *dev,
+		struct device_attribute *attr, const char * buf, size_t count)
+{
+	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
+	int enable;
+	int ret, action = CMD_ACT_MESH_CONFIG_STOP;
+
+	sscanf(buf, "%x", &enable);
+	enable = !!enable;
+	if (enable == !!priv->mesh_dev)
+		return count;
+	if (enable)
+		action = CMD_ACT_MESH_CONFIG_START;
+	ret = lbs_mesh_config(priv, action, priv->channel);
+	if (ret)
+		return ret;
+
+	if (enable)
+		lbs_add_mesh(priv);
+	else
+		lbs_remove_mesh(priv);
+
+	return count;
+}
+
+/**
+ * lbs_mesh attribute to be exported per ethX interface
+ * through sysfs (/sys/class/net/ethX/lbs_mesh)
+ */
+static DEVICE_ATTR(lbs_mesh, 0644, lbs_mesh_get, lbs_mesh_set);
+
+/**
+ * anycast_mask attribute to be exported per mshX interface
+ * through sysfs (/sys/class/net/mshX/anycast_mask)
+ */
+static DEVICE_ATTR(anycast_mask, 0644, lbs_anycast_get, lbs_anycast_set);
+
+/**
+ * prb_rsp_limit attribute to be exported per mshX interface
+ * through sysfs (/sys/class/net/mshX/prb_rsp_limit)
+ */
+static DEVICE_ATTR(prb_rsp_limit, 0644, lbs_prb_rsp_limit_get,
+		lbs_prb_rsp_limit_set);
+
+static struct attribute *lbs_mesh_sysfs_entries[] = {
+	&dev_attr_anycast_mask.attr,
+	&dev_attr_prb_rsp_limit.attr,
+	NULL,
+};
+
+static struct attribute_group lbs_mesh_attr_group = {
+	.attrs = lbs_mesh_sysfs_entries,
+};
+
+
+
+/***************************************************************************
+ * Initializing and starting, stopping mesh
+ */
+
+/*
+ * Check mesh FW version and appropriately send the mesh start
+ * command
+ */
+int lbs_init_mesh(struct lbs_private *priv)
+{
+	struct net_device *dev = priv->dev;
+	int ret = 0;
+
+	lbs_deb_enter(LBS_DEB_MESH);
+
+	if (priv->mesh_fw_ver == MESH_FW_OLD) {
+		/* Enable mesh, if supported, and work out which TLV it uses.
+		   0x100 + 291 is an unofficial value used in 5.110.20.pXX
+		   0x100 + 37 is the official value used in 5.110.21.pXX
+		   but we check them in that order because 20.pXX doesn't
+		   give an error -- it just silently fails. */
+
+		/* 5.110.20.pXX firmware will fail the command if the channel
+		   doesn't match the existing channel. But only if the TLV
+		   is correct. If the channel is wrong, _BOTH_ versions will
+		   give an error to 0x100+291, and allow 0x100+37 to succeed.
+		   It's just that 5.110.20.pXX will not have done anything
+		   useful */
+
+		priv->mesh_tlv = TLV_TYPE_OLD_MESH_ID;
+		if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
+				    priv->channel)) {
+			priv->mesh_tlv = TLV_TYPE_MESH_ID;
+			if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
+					    priv->channel))
+				priv->mesh_tlv = 0;
+		}
+	} else if (priv->mesh_fw_ver == MESH_FW_NEW) {
+		/* 10.0.0.pXX new firmwares should succeed with TLV
+		 * 0x100+37; Do not invoke command with old TLV.
+		 */
+		priv->mesh_tlv = TLV_TYPE_MESH_ID;
+		if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
+				    priv->channel))
+			priv->mesh_tlv = 0;
+	}
+	if (priv->mesh_tlv) {
+		lbs_add_mesh(priv);
+
+		if (device_create_file(&dev->dev, &dev_attr_lbs_mesh))
+			lbs_pr_err("cannot register lbs_mesh attribute\n");
+
+		ret = 1;
+	}
+
+	lbs_deb_leave_args(LBS_DEB_MESH, "ret %d", ret);
+	return ret;
+}
+
+
+int lbs_deinit_mesh(struct lbs_private *priv)
+{
+	struct net_device *dev = priv->dev;
+	int ret = 0;
+
+	lbs_deb_enter(LBS_DEB_MESH);
+
+	if (priv->mesh_tlv) {
+		device_remove_file(&dev->dev, &dev_attr_lbs_mesh);
+		ret = 1;
+	}
+
+	lbs_deb_leave_args(LBS_DEB_MESH, "ret %d", ret);
+	return ret;
+}
+
+
+/**
+ *  @brief This function closes the mshX interface
+ *
+ *  @param dev     A pointer to net_device structure
+ *  @return 	   0
+ */
+static int lbs_mesh_stop(struct net_device *dev)
+{
+	struct lbs_private *priv = dev->ml_priv;
+
+	lbs_deb_enter(LBS_DEB_MESH);
+	spin_lock_irq(&priv->driver_lock);
+
+	priv->mesh_open = 0;
+	priv->mesh_connect_status = LBS_DISCONNECTED;
+
+	netif_stop_queue(dev);
+	netif_carrier_off(dev);
+
+	spin_unlock_irq(&priv->driver_lock);
+
+	schedule_work(&priv->mcast_work);
+
+	lbs_deb_leave(LBS_DEB_MESH);
+	return 0;
+}
+
+/**
+ *  @brief This function opens the mshX interface
+ *
+ *  @param dev     A pointer to net_device structure
+ *  @return 	   0 or -EBUSY if monitor mode active
+ */
+static int lbs_mesh_dev_open(struct net_device *dev)
+{
+	struct lbs_private *priv = dev->ml_priv;
+	int ret = 0;
+
+	lbs_deb_enter(LBS_DEB_NET);
+
+	spin_lock_irq(&priv->driver_lock);
+
+	if (priv->monitormode) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	priv->mesh_open = 1;
+	priv->mesh_connect_status = LBS_CONNECTED;
+	netif_carrier_on(dev);
+
+	if (!priv->tx_pending_len)
+		netif_wake_queue(dev);
+ out:
+
+	spin_unlock_irq(&priv->driver_lock);
+	lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret);
+	return ret;
+}
+
+static const struct net_device_ops mesh_netdev_ops = {
+	.ndo_open		= lbs_mesh_dev_open,
+	.ndo_stop 		= lbs_mesh_stop,
+	.ndo_start_xmit		= lbs_hard_start_xmit,
+	.ndo_set_mac_address	= lbs_set_mac_address,
+	.ndo_set_multicast_list = lbs_set_multicast_list,
+};
+
+/**
+ * @brief This function adds mshX interface
+ *
+ *  @param priv    A pointer to the struct lbs_private structure
+ *  @return 	   0 if successful, -X otherwise
+ */
+int lbs_add_mesh(struct lbs_private *priv)
+{
+	struct net_device *mesh_dev = NULL;
+	int ret = 0;
+
+	lbs_deb_enter(LBS_DEB_MESH);
+
+	/* Allocate a virtual mesh device */
+	mesh_dev = alloc_netdev(0, "msh%d", ether_setup);
+	if (!mesh_dev) {
+		lbs_deb_mesh("init mshX device failed\n");
+		ret = -ENOMEM;
+		goto done;
+	}
+	mesh_dev->ml_priv = priv;
+	priv->mesh_dev = mesh_dev;
+
+	mesh_dev->netdev_ops = &mesh_netdev_ops;
+	mesh_dev->ethtool_ops = &lbs_ethtool_ops;
+	memcpy(mesh_dev->dev_addr, priv->dev->dev_addr,
+			sizeof(priv->dev->dev_addr));
+
+	SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent);
+
+#ifdef	WIRELESS_EXT
+	mesh_dev->wireless_handlers = &mesh_handler_def;
+#endif
+	mesh_dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
+	/* Register virtual mesh interface */
+	ret = register_netdev(mesh_dev);
+	if (ret) {
+		lbs_pr_err("cannot register mshX virtual interface\n");
+		goto err_free;
+	}
+
+	ret = sysfs_create_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group);
+	if (ret)
+		goto err_unregister;
+
+	lbs_persist_config_init(mesh_dev);
+
+	/* Everything successful */
+	ret = 0;
+	goto done;
+
+err_unregister:
+	unregister_netdev(mesh_dev);
+
+err_free:
+	free_netdev(mesh_dev);
+
+done:
+	lbs_deb_leave_args(LBS_DEB_MESH, "ret %d", ret);
+	return ret;
+}
+
+void lbs_remove_mesh(struct lbs_private *priv)
+{
+	struct net_device *mesh_dev;
+
+	mesh_dev = priv->mesh_dev;
+	if (!mesh_dev)
+		return;
+
+	lbs_deb_enter(LBS_DEB_MESH);
+	netif_stop_queue(mesh_dev);
+	netif_carrier_off(mesh_dev);
+	sysfs_remove_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group);
+	lbs_persist_config_remove(mesh_dev);
+	unregister_netdev(mesh_dev);
+	priv->mesh_dev = NULL;
+	free_netdev(mesh_dev);
+	lbs_deb_leave(LBS_DEB_MESH);
+}
+
+
+
+/***************************************************************************
+ * Sending and receiving
+ */
+struct net_device *lbs_mesh_set_dev(struct lbs_private *priv,
+	struct net_device *dev, struct rxpd *rxpd)
+{
+	if (priv->mesh_dev) {
+		if (priv->mesh_fw_ver == MESH_FW_OLD) {
+			if (rxpd->rx_control & RxPD_MESH_FRAME)
+				dev = priv->mesh_dev;
+		} else if (priv->mesh_fw_ver == MESH_FW_NEW) {
+			if (rxpd->u.bss.bss_num == MESH_IFACE_ID)
+				dev = priv->mesh_dev;
+		}
+	}
+	return dev;
+}
+
+
+void lbs_mesh_set_txpd(struct lbs_private *priv,
+	struct net_device *dev, struct txpd *txpd)
+{
+	if (dev == priv->mesh_dev) {
+		if (priv->mesh_fw_ver == MESH_FW_OLD)
+			txpd->tx_control |= cpu_to_le32(TxPD_MESH_FRAME);
+		else if (priv->mesh_fw_ver == MESH_FW_NEW)
+			txpd->u.bss.bss_num = MESH_IFACE_ID;
+	}
+}
+
+
+/***************************************************************************
+ * Mesh command handling
+ */
+
+int lbs_cmd_bt_access(struct cmd_ds_command *cmd,
+			       u16 cmd_action, void *pdata_buf)
+{
+	struct cmd_ds_bt_access *bt_access = &cmd->params.bt;
+	lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action);
+
+	cmd->command = cpu_to_le16(CMD_BT_ACCESS);
+	cmd->size = cpu_to_le16(sizeof(struct cmd_ds_bt_access) +
+		sizeof(struct cmd_header));
+	cmd->result = 0;
+	bt_access->action = cpu_to_le16(cmd_action);
+
+	switch (cmd_action) {
+	case CMD_ACT_BT_ACCESS_ADD:
+		memcpy(bt_access->addr1, pdata_buf, 2 * ETH_ALEN);
+		lbs_deb_hex(LBS_DEB_MESH, "BT_ADD: blinded MAC addr",
+			bt_access->addr1, 6);
+		break;
+	case CMD_ACT_BT_ACCESS_DEL:
+		memcpy(bt_access->addr1, pdata_buf, 1 * ETH_ALEN);
+		lbs_deb_hex(LBS_DEB_MESH, "BT_DEL: blinded MAC addr",
+			bt_access->addr1, 6);
+		break;
+	case CMD_ACT_BT_ACCESS_LIST:
+		bt_access->id = cpu_to_le32(*(u32 *) pdata_buf);
+		break;
+	case CMD_ACT_BT_ACCESS_RESET:
+		break;
+	case CMD_ACT_BT_ACCESS_SET_INVERT:
+		bt_access->id = cpu_to_le32(*(u32 *) pdata_buf);
+		break;
+	case CMD_ACT_BT_ACCESS_GET_INVERT:
+		break;
+	default:
+		break;
+	}
+	lbs_deb_leave(LBS_DEB_CMD);
+	return 0;
+}
+
+int lbs_cmd_fwt_access(struct cmd_ds_command *cmd,
+			       u16 cmd_action, void *pdata_buf)
+{
+	struct cmd_ds_fwt_access *fwt_access = &cmd->params.fwt;
+	lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action);
+
+	cmd->command = cpu_to_le16(CMD_FWT_ACCESS);
+	cmd->size = cpu_to_le16(sizeof(struct cmd_ds_fwt_access) +
+		sizeof(struct cmd_header));
+	cmd->result = 0;
+
+	if (pdata_buf)
+		memcpy(fwt_access, pdata_buf, sizeof(*fwt_access));
+	else
+		memset(fwt_access, 0, sizeof(*fwt_access));
+
+	fwt_access->action = cpu_to_le16(cmd_action);
+
+	lbs_deb_leave(LBS_DEB_CMD);
+	return 0;
+}
+
+int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
+		    struct cmd_ds_mesh_access *cmd)
+{
+	int ret;
+
+	lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action);
+
+	cmd->hdr.command = cpu_to_le16(CMD_MESH_ACCESS);
+	cmd->hdr.size = cpu_to_le16(sizeof(*cmd));
+	cmd->hdr.result = 0;
+
+	cmd->action = cpu_to_le16(cmd_action);
+
+	ret = lbs_cmd_with_response(priv, CMD_MESH_ACCESS, cmd);
+
+	lbs_deb_leave(LBS_DEB_CMD);
+	return ret;
+}
+
+static int __lbs_mesh_config_send(struct lbs_private *priv,
+				  struct cmd_ds_mesh_config *cmd,
+				  uint16_t action, uint16_t type)
+{
+	int ret;
+	u16 command = CMD_MESH_CONFIG_OLD;
+
+	lbs_deb_enter(LBS_DEB_CMD);
+
+	/*
+	 * Command id is 0xac for v10 FW along with mesh interface
+	 * id in bits 14-13-12.
+	 */
+	if (priv->mesh_fw_ver == MESH_FW_NEW)
+		command = CMD_MESH_CONFIG |
+			  (MESH_IFACE_ID << MESH_IFACE_BIT_OFFSET);
+
+	cmd->hdr.command = cpu_to_le16(command);
+	cmd->hdr.size = cpu_to_le16(sizeof(struct cmd_ds_mesh_config));
+	cmd->hdr.result = 0;
+
+	cmd->type = cpu_to_le16(type);
+	cmd->action = cpu_to_le16(action);
+
+	ret = lbs_cmd_with_response(priv, command, cmd);
+
+	lbs_deb_leave(LBS_DEB_CMD);
+	return ret;
+}
+
+int lbs_mesh_config_send(struct lbs_private *priv,
+			 struct cmd_ds_mesh_config *cmd,
+			 uint16_t action, uint16_t type)
+{
+	int ret;
+
+	if (!(priv->fwcapinfo & FW_CAPINFO_PERSISTENT_CONFIG))
+		return -EOPNOTSUPP;
+
+	ret = __lbs_mesh_config_send(priv, cmd, action, type);
+	return ret;
+}
+
+/* This function is the CMD_MESH_CONFIG legacy function.  It only handles the
+ * START and STOP actions.  The extended actions supported by CMD_MESH_CONFIG
+ * are all handled by preparing a struct cmd_ds_mesh_config and passing it to
+ * lbs_mesh_config_send.
+ */
+int lbs_mesh_config(struct lbs_private *priv, uint16_t action, uint16_t chan)
+{
+	struct cmd_ds_mesh_config cmd;
+	struct mrvl_meshie *ie;
+	DECLARE_SSID_BUF(ssid);
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.channel = cpu_to_le16(chan);
+	ie = (struct mrvl_meshie *)cmd.data;
+
+	switch (action) {
+	case CMD_ACT_MESH_CONFIG_START:
+		ie->id = WLAN_EID_GENERIC;
+		ie->val.oui[0] = 0x00;
+		ie->val.oui[1] = 0x50;
+		ie->val.oui[2] = 0x43;
+		ie->val.type = MARVELL_MESH_IE_TYPE;
+		ie->val.subtype = MARVELL_MESH_IE_SUBTYPE;
+		ie->val.version = MARVELL_MESH_IE_VERSION;
+		ie->val.active_protocol_id = MARVELL_MESH_PROTO_ID_HWMP;
+		ie->val.active_metric_id = MARVELL_MESH_METRIC_ID;
+		ie->val.mesh_capability = MARVELL_MESH_CAPABILITY;
+		ie->val.mesh_id_len = priv->mesh_ssid_len;
+		memcpy(ie->val.mesh_id, priv->mesh_ssid, priv->mesh_ssid_len);
+		ie->len = sizeof(struct mrvl_meshie_val) -
+			IEEE80211_MAX_SSID_LEN + priv->mesh_ssid_len;
+		cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie_val));
+		break;
+	case CMD_ACT_MESH_CONFIG_STOP:
+		break;
+	default:
+		return -1;
+	}
+	lbs_deb_cmd("mesh config action %d type %x channel %d SSID %s\n",
+		    action, priv->mesh_tlv, chan,
+		    print_ssid(ssid, priv->mesh_ssid, priv->mesh_ssid_len));
+
+	return __lbs_mesh_config_send(priv, &cmd, action, priv->mesh_tlv);
+}
+
+
+
+/***************************************************************************
+ * Persistent configuration support
+ */
+
+static int mesh_get_default_parameters(struct device *dev,
+				       struct mrvl_mesh_defaults *defs)
+{
+	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
+	struct cmd_ds_mesh_config cmd;
+	int ret;
+
+	memset(&cmd, 0, sizeof(struct cmd_ds_mesh_config));
+	ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_GET,
+				   CMD_TYPE_MESH_GET_DEFAULTS);
+
+	if (ret)
+		return -EOPNOTSUPP;
+
+	memcpy(defs, &cmd.data[0], sizeof(struct mrvl_mesh_defaults));
+
+	return 0;
+}
+
+/**
+ * @brief Get function for sysfs attribute bootflag
+ */
+static ssize_t bootflag_get(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	struct mrvl_mesh_defaults defs;
+	int ret;
+
+	ret = mesh_get_default_parameters(dev, &defs);
+
+	if (ret)
+		return ret;
+
+	return snprintf(buf, 12, "%d\n", le32_to_cpu(defs.bootflag));
+}
+
+/**
+ * @brief Set function for sysfs attribute bootflag
+ */
+static ssize_t bootflag_set(struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
+	struct cmd_ds_mesh_config cmd;
+	uint32_t datum;
+	int ret;
+
+	memset(&cmd, 0, sizeof(cmd));
+	ret = sscanf(buf, "%d", &datum);
+	if ((ret != 1) || (datum > 1))
+		return -EINVAL;
+
+	*((__le32 *)&cmd.data[0]) = cpu_to_le32(!!datum);
+	cmd.length = cpu_to_le16(sizeof(uint32_t));
+	ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
+				   CMD_TYPE_MESH_SET_BOOTFLAG);
+	if (ret)
+		return ret;
+
+	return strlen(buf);
+}
+
+/**
+ * @brief Get function for sysfs attribute boottime
+ */
+static ssize_t boottime_get(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	struct mrvl_mesh_defaults defs;
+	int ret;
+
+	ret = mesh_get_default_parameters(dev, &defs);
+
+	if (ret)
+		return ret;
+
+	return snprintf(buf, 12, "%d\n", defs.boottime);
+}
+
+/**
+ * @brief Set function for sysfs attribute boottime
+ */
+static ssize_t boottime_set(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
+	struct cmd_ds_mesh_config cmd;
+	uint32_t datum;
+	int ret;
+
+	memset(&cmd, 0, sizeof(cmd));
+	ret = sscanf(buf, "%d", &datum);
+	if ((ret != 1) || (datum > 255))
+		return -EINVAL;
+
+	/* A too small boot time will result in the device booting into
+	 * standalone (no-host) mode before the host can take control of it,
+	 * so the change will be hard to revert.  This may be a desired
+	 * feature (e.g to configure a very fast boot time for devices that
+	 * will not be attached to a host), but dangerous.  So I'm enforcing a
+	 * lower limit of 20 seconds:  remove and recompile the driver if this
+	 * does not work for you.
+	 */
+	datum = (datum < 20) ? 20 : datum;
+	cmd.data[0] = datum;
+	cmd.length = cpu_to_le16(sizeof(uint8_t));
+	ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
+				   CMD_TYPE_MESH_SET_BOOTTIME);
+	if (ret)
+		return ret;
+
+	return strlen(buf);
+}
+
+/**
+ * @brief Get function for sysfs attribute channel
+ */
+static ssize_t channel_get(struct device *dev,
+			   struct device_attribute *attr, char *buf)
+{
+	struct mrvl_mesh_defaults defs;
+	int ret;
+
+	ret = mesh_get_default_parameters(dev, &defs);
+
+	if (ret)
+		return ret;
+
+	return snprintf(buf, 12, "%d\n", le16_to_cpu(defs.channel));
+}
+
+/**
+ * @brief Set function for sysfs attribute channel
+ */
+static ssize_t channel_set(struct device *dev, struct device_attribute *attr,
+			   const char *buf, size_t count)
+{
+	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
+	struct cmd_ds_mesh_config cmd;
+	uint32_t datum;
+	int ret;
+
+	memset(&cmd, 0, sizeof(cmd));
+	ret = sscanf(buf, "%d", &datum);
+	if (ret != 1 || datum < 1 || datum > 11)
+		return -EINVAL;
+
+	*((__le16 *)&cmd.data[0]) = cpu_to_le16(datum);
+	cmd.length = cpu_to_le16(sizeof(uint16_t));
+	ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
+				   CMD_TYPE_MESH_SET_DEF_CHANNEL);
+	if (ret)
+		return ret;
+
+	return strlen(buf);
+}
+
+/**
+ * @brief Get function for sysfs attribute mesh_id
+ */
+static ssize_t mesh_id_get(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct mrvl_mesh_defaults defs;
+	int maxlen;
+	int ret;
+
+	ret = mesh_get_default_parameters(dev, &defs);
+
+	if (ret)
+		return ret;
+
+	if (defs.meshie.val.mesh_id_len > IEEE80211_MAX_SSID_LEN) {
+		lbs_pr_err("inconsistent mesh ID length");
+		defs.meshie.val.mesh_id_len = IEEE80211_MAX_SSID_LEN;
+	}
+
+	/* SSID not null terminated: reserve room for \0 + \n */
+	maxlen = defs.meshie.val.mesh_id_len + 2;
+	maxlen = (PAGE_SIZE > maxlen) ? maxlen : PAGE_SIZE;
+
+	defs.meshie.val.mesh_id[defs.meshie.val.mesh_id_len] = '\0';
+
+	return snprintf(buf, maxlen, "%s\n", defs.meshie.val.mesh_id);
+}
+
+/**
+ * @brief Set function for sysfs attribute mesh_id
+ */
+static ssize_t mesh_id_set(struct device *dev, struct device_attribute *attr,
+			   const char *buf, size_t count)
+{
+	struct cmd_ds_mesh_config cmd;
+	struct mrvl_mesh_defaults defs;
+	struct mrvl_meshie *ie;
+	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
+	int len;
+	int ret;
+
+	if (count < 2 || count > IEEE80211_MAX_SSID_LEN + 1)
+		return -EINVAL;
+
+	memset(&cmd, 0, sizeof(struct cmd_ds_mesh_config));
+	ie = (struct mrvl_meshie *) &cmd.data[0];
+
+	/* fetch all other Information Element parameters */
+	ret = mesh_get_default_parameters(dev, &defs);
+
+	cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie));
+
+	/* transfer IE elements */
+	memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie));
+
+	len = count - 1;
+	memcpy(ie->val.mesh_id, buf, len);
+	/* SSID len */
+	ie->val.mesh_id_len = len;
+	/* IE len */
+	ie->len = sizeof(struct mrvl_meshie_val) - IEEE80211_MAX_SSID_LEN + len;
+
+	ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
+				   CMD_TYPE_MESH_SET_MESH_IE);
+	if (ret)
+		return ret;
+
+	return strlen(buf);
+}
+
+/**
+ * @brief Get function for sysfs attribute protocol_id
+ */
+static ssize_t protocol_id_get(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct mrvl_mesh_defaults defs;
+	int ret;
+
+	ret = mesh_get_default_parameters(dev, &defs);
+
+	if (ret)
+		return ret;
+
+	return snprintf(buf, 5, "%d\n", defs.meshie.val.active_protocol_id);
+}
+
+/**
+ * @brief Set function for sysfs attribute protocol_id
+ */
+static ssize_t protocol_id_set(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct cmd_ds_mesh_config cmd;
+	struct mrvl_mesh_defaults defs;
+	struct mrvl_meshie *ie;
+	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
+	uint32_t datum;
+	int ret;
+
+	memset(&cmd, 0, sizeof(cmd));
+	ret = sscanf(buf, "%d", &datum);
+	if ((ret != 1) || (datum > 255))
+		return -EINVAL;
+
+	/* fetch all other Information Element parameters */
+	ret = mesh_get_default_parameters(dev, &defs);
+
+	cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie));
+
+	/* transfer IE elements */
+	ie = (struct mrvl_meshie *) &cmd.data[0];
+	memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie));
+	/* update protocol id */
+	ie->val.active_protocol_id = datum;
+
+	ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
+				   CMD_TYPE_MESH_SET_MESH_IE);
+	if (ret)
+		return ret;
+
+	return strlen(buf);
+}
+
+/**
+ * @brief Get function for sysfs attribute metric_id
+ */
+static ssize_t metric_id_get(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mrvl_mesh_defaults defs;
+	int ret;
+
+	ret = mesh_get_default_parameters(dev, &defs);
+
+	if (ret)
+		return ret;
+
+	return snprintf(buf, 5, "%d\n", defs.meshie.val.active_metric_id);
+}
+
+/**
+ * @brief Set function for sysfs attribute metric_id
+ */
+static ssize_t metric_id_set(struct device *dev, struct device_attribute *attr,
+			     const char *buf, size_t count)
+{
+	struct cmd_ds_mesh_config cmd;
+	struct mrvl_mesh_defaults defs;
+	struct mrvl_meshie *ie;
+	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
+	uint32_t datum;
+	int ret;
+
+	memset(&cmd, 0, sizeof(cmd));
+	ret = sscanf(buf, "%d", &datum);
+	if ((ret != 1) || (datum > 255))
+		return -EINVAL;
+
+	/* fetch all other Information Element parameters */
+	ret = mesh_get_default_parameters(dev, &defs);
+
+	cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie));
+
+	/* transfer IE elements */
+	ie = (struct mrvl_meshie *) &cmd.data[0];
+	memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie));
+	/* update metric id */
+	ie->val.active_metric_id = datum;
+
+	ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
+				   CMD_TYPE_MESH_SET_MESH_IE);
+	if (ret)
+		return ret;
+
+	return strlen(buf);
+}
+
+/**
+ * @brief Get function for sysfs attribute capability
+ */
+static ssize_t capability_get(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mrvl_mesh_defaults defs;
+	int ret;
+
+	ret = mesh_get_default_parameters(dev, &defs);
+
+	if (ret)
+		return ret;
+
+	return snprintf(buf, 5, "%d\n", defs.meshie.val.mesh_capability);
+}
+
+/**
+ * @brief Set function for sysfs attribute capability
+ */
+static ssize_t capability_set(struct device *dev, struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct cmd_ds_mesh_config cmd;
+	struct mrvl_mesh_defaults defs;
+	struct mrvl_meshie *ie;
+	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
+	uint32_t datum;
+	int ret;
+
+	memset(&cmd, 0, sizeof(cmd));
+	ret = sscanf(buf, "%d", &datum);
+	if ((ret != 1) || (datum > 255))
+		return -EINVAL;
+
+	/* fetch all other Information Element parameters */
+	ret = mesh_get_default_parameters(dev, &defs);
+
+	cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie));
+
+	/* transfer IE elements */
+	ie = (struct mrvl_meshie *) &cmd.data[0];
+	memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie));
+	/* update value */
+	ie->val.mesh_capability = datum;
+
+	ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
+				   CMD_TYPE_MESH_SET_MESH_IE);
+	if (ret)
+		return ret;
+
+	return strlen(buf);
+}
+
+
+static DEVICE_ATTR(bootflag, 0644, bootflag_get, bootflag_set);
+static DEVICE_ATTR(boottime, 0644, boottime_get, boottime_set);
+static DEVICE_ATTR(channel, 0644, channel_get, channel_set);
+static DEVICE_ATTR(mesh_id, 0644, mesh_id_get, mesh_id_set);
+static DEVICE_ATTR(protocol_id, 0644, protocol_id_get, protocol_id_set);
+static DEVICE_ATTR(metric_id, 0644, metric_id_get, metric_id_set);
+static DEVICE_ATTR(capability, 0644, capability_get, capability_set);
+
+static struct attribute *boot_opts_attrs[] = {
+	&dev_attr_bootflag.attr,
+	&dev_attr_boottime.attr,
+	&dev_attr_channel.attr,
+	NULL
+};
+
+static struct attribute_group boot_opts_group = {
+	.name = "boot_options",
+	.attrs = boot_opts_attrs,
+};
+
+static struct attribute *mesh_ie_attrs[] = {
+	&dev_attr_mesh_id.attr,
+	&dev_attr_protocol_id.attr,
+	&dev_attr_metric_id.attr,
+	&dev_attr_capability.attr,
+	NULL
+};
+
+static struct attribute_group mesh_ie_group = {
+	.name = "mesh_ie",
+	.attrs = mesh_ie_attrs,
+};
+
+void lbs_persist_config_init(struct net_device *dev)
+{
+	int ret;
+	ret = sysfs_create_group(&(dev->dev.kobj), &boot_opts_group);
+	ret = sysfs_create_group(&(dev->dev.kobj), &mesh_ie_group);
+}
+
+void lbs_persist_config_remove(struct net_device *dev)
+{
+	sysfs_remove_group(&(dev->dev.kobj), &boot_opts_group);
+	sysfs_remove_group(&(dev->dev.kobj), &mesh_ie_group);
+}
+
+
+
+/***************************************************************************
+ * Ethtool related
+ */
+
+static const char *mesh_stat_strings[] = {
+			"drop_duplicate_bcast",
+			"drop_ttl_zero",
+			"drop_no_fwd_route",
+			"drop_no_buffers",
+			"fwded_unicast_cnt",
+			"fwded_bcast_cnt",
+			"drop_blind_table",
+			"tx_failed_cnt"
+};
+
+void lbs_mesh_ethtool_get_stats(struct net_device *dev,
+	struct ethtool_stats *stats, uint64_t *data)
+{
+	struct lbs_private *priv = dev->ml_priv;
+	struct cmd_ds_mesh_access mesh_access;
+	int ret;
+
+	lbs_deb_enter(LBS_DEB_ETHTOOL);
+
+	/* Get Mesh Statistics */
+	ret = lbs_mesh_access(priv, CMD_ACT_MESH_GET_STATS, &mesh_access);
+
+	if (ret) {
+		memset(data, 0, MESH_STATS_NUM*(sizeof(uint64_t)));
+		return;
+	}
+
+	priv->mstats.fwd_drop_rbt = le32_to_cpu(mesh_access.data[0]);
+	priv->mstats.fwd_drop_ttl = le32_to_cpu(mesh_access.data[1]);
+	priv->mstats.fwd_drop_noroute = le32_to_cpu(mesh_access.data[2]);
+	priv->mstats.fwd_drop_nobuf = le32_to_cpu(mesh_access.data[3]);
+	priv->mstats.fwd_unicast_cnt = le32_to_cpu(mesh_access.data[4]);
+	priv->mstats.fwd_bcast_cnt = le32_to_cpu(mesh_access.data[5]);
+	priv->mstats.drop_blind = le32_to_cpu(mesh_access.data[6]);
+	priv->mstats.tx_failed_cnt = le32_to_cpu(mesh_access.data[7]);
+
+	data[0] = priv->mstats.fwd_drop_rbt;
+	data[1] = priv->mstats.fwd_drop_ttl;
+	data[2] = priv->mstats.fwd_drop_noroute;
+	data[3] = priv->mstats.fwd_drop_nobuf;
+	data[4] = priv->mstats.fwd_unicast_cnt;
+	data[5] = priv->mstats.fwd_bcast_cnt;
+	data[6] = priv->mstats.drop_blind;
+	data[7] = priv->mstats.tx_failed_cnt;
+
+	lbs_deb_enter(LBS_DEB_ETHTOOL);
+}
+
+int lbs_mesh_ethtool_get_sset_count(struct net_device *dev, int sset)
+{
+	struct lbs_private *priv = dev->ml_priv;
+
+	if (sset == ETH_SS_STATS && dev == priv->mesh_dev)
+		return MESH_STATS_NUM;
+
+	return -EOPNOTSUPP;
+}
+
+void lbs_mesh_ethtool_get_strings(struct net_device *dev,
+	uint32_t stringset, uint8_t *s)
+{
+	int i;
+
+	lbs_deb_enter(LBS_DEB_ETHTOOL);
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < MESH_STATS_NUM; i++) {
+			memcpy(s + i * ETH_GSTRING_LEN,
+					mesh_stat_strings[i],
+					ETH_GSTRING_LEN);
+		}
+		break;
+	}
+	lbs_deb_enter(LBS_DEB_ETHTOOL);
+}
diff --git a/drivers/net/wireless/libertas/mesh.h b/drivers/net/wireless/libertas/mesh.h
new file mode 100644
index 000000000000..fea9b5d005fc
--- /dev/null
+++ b/drivers/net/wireless/libertas/mesh.h
@@ -0,0 +1,78 @@
+/**
+  * Contains all definitions needed for the Libertas' MESH implementation.
+  */
+#ifndef _LBS_MESH_H_
+#define _LBS_MESH_H_
+
+
+#include <net/iw_handler.h>
+#include <net/lib80211.h>
+
+
+/* Mesh statistics */
+struct lbs_mesh_stats {
+	u32	fwd_bcast_cnt;		/* Fwd: Broadcast counter */
+	u32	fwd_unicast_cnt;	/* Fwd: Unicast counter */
+	u32	fwd_drop_ttl;		/* Fwd: TTL zero */
+	u32	fwd_drop_rbt;		/* Fwd: Recently Broadcasted */
+	u32	fwd_drop_noroute; 	/* Fwd: No route to Destination */
+	u32	fwd_drop_nobuf;		/* Fwd: Run out of internal buffers */
+	u32	drop_blind;		/* Rx:  Dropped by blinding table */
+	u32	tx_failed_cnt;		/* Tx:  Failed transmissions */
+};
+
+
+struct net_device;
+struct lbs_private;
+
+int lbs_init_mesh(struct lbs_private *priv);
+int lbs_deinit_mesh(struct lbs_private *priv);
+
+int lbs_add_mesh(struct lbs_private *priv);
+void lbs_remove_mesh(struct lbs_private *priv);
+
+
+/* Sending / Receiving */
+
+struct rxpd;
+struct txpd;
+
+struct net_device *lbs_mesh_set_dev(struct lbs_private *priv,
+	struct net_device *dev, struct rxpd *rxpd);
+void lbs_mesh_set_txpd(struct lbs_private *priv,
+	struct net_device *dev, struct txpd *txpd);
+
+
+/* Command handling */
+
+struct cmd_ds_command;
+
+int lbs_cmd_bt_access(struct cmd_ds_command *cmd,
+	u16 cmd_action, void *pdata_buf);
+int lbs_cmd_fwt_access(struct cmd_ds_command *cmd,
+	u16 cmd_action, void *pdata_buf);
+
+
+/* Persistent configuration */
+
+void lbs_persist_config_init(struct net_device *net);
+void lbs_persist_config_remove(struct net_device *net);
+
+
+/* WEXT handler */
+
+extern struct iw_handler_def mesh_handler_def;
+
+
+/* Ethtool statistics */
+
+struct ethtool_stats;
+
+void lbs_mesh_ethtool_get_stats(struct net_device *dev,
+	struct ethtool_stats *stats, uint64_t *data);
+int lbs_mesh_ethtool_get_sset_count(struct net_device *dev, int sset);
+void lbs_mesh_ethtool_get_strings(struct net_device *dev,
+	uint32_t stringset, uint8_t *s);
+
+
+#endif
diff --git a/drivers/net/wireless/libertas/persistcfg.c b/drivers/net/wireless/libertas/persistcfg.c
deleted file mode 100644
index 18fe29faf99b..000000000000
--- a/drivers/net/wireless/libertas/persistcfg.c
+++ /dev/null
@@ -1,453 +0,0 @@
-#include <linux/moduleparam.h>
-#include <linux/delay.h>
-#include <linux/etherdevice.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/kthread.h>
-#include <linux/kfifo.h>
-
-#include "host.h"
-#include "decl.h"
-#include "dev.h"
-#include "wext.h"
-#include "debugfs.h"
-#include "scan.h"
-#include "assoc.h"
-#include "cmd.h"
-
-static int mesh_get_default_parameters(struct device *dev,
-				       struct mrvl_mesh_defaults *defs)
-{
-	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
-	struct cmd_ds_mesh_config cmd;
-	int ret;
-
-	memset(&cmd, 0, sizeof(struct cmd_ds_mesh_config));
-	ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_GET,
-				   CMD_TYPE_MESH_GET_DEFAULTS);
-
-	if (ret)
-		return -EOPNOTSUPP;
-
-	memcpy(defs, &cmd.data[0], sizeof(struct mrvl_mesh_defaults));
-
-	return 0;
-}
-
-/**
- * @brief Get function for sysfs attribute bootflag
- */
-static ssize_t bootflag_get(struct device *dev,
-			    struct device_attribute *attr, char *buf)
-{
-	struct mrvl_mesh_defaults defs;
-	int ret;
-
-	ret = mesh_get_default_parameters(dev, &defs);
-
-	if (ret)
-		return ret;
-
-	return snprintf(buf, 12, "%d\n", le32_to_cpu(defs.bootflag));
-}
-
-/**
- * @brief Set function for sysfs attribute bootflag
- */
-static ssize_t bootflag_set(struct device *dev, struct device_attribute *attr,
-			    const char *buf, size_t count)
-{
-	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
-	struct cmd_ds_mesh_config cmd;
-	uint32_t datum;
-	int ret;
-
-	memset(&cmd, 0, sizeof(cmd));
-	ret = sscanf(buf, "%d", &datum);
-	if ((ret != 1) || (datum > 1))
-		return -EINVAL;
-
-	*((__le32 *)&cmd.data[0]) = cpu_to_le32(!!datum);
-	cmd.length = cpu_to_le16(sizeof(uint32_t));
-	ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
-				   CMD_TYPE_MESH_SET_BOOTFLAG);
-	if (ret)
-		return ret;
-
-	return strlen(buf);
-}
-
-/**
- * @brief Get function for sysfs attribute boottime
- */
-static ssize_t boottime_get(struct device *dev,
-			    struct device_attribute *attr, char *buf)
-{
-	struct mrvl_mesh_defaults defs;
-	int ret;
-
-	ret = mesh_get_default_parameters(dev, &defs);
-
-	if (ret)
-		return ret;
-
-	return snprintf(buf, 12, "%d\n", defs.boottime);
-}
-
-/**
- * @brief Set function for sysfs attribute boottime
- */
-static ssize_t boottime_set(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t count)
-{
-	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
-	struct cmd_ds_mesh_config cmd;
-	uint32_t datum;
-	int ret;
-
-	memset(&cmd, 0, sizeof(cmd));
-	ret = sscanf(buf, "%d", &datum);
-	if ((ret != 1) || (datum > 255))
-		return -EINVAL;
-
-	/* A too small boot time will result in the device booting into
-	 * standalone (no-host) mode before the host can take control of it,
-	 * so the change will be hard to revert.  This may be a desired
-	 * feature (e.g to configure a very fast boot time for devices that
-	 * will not be attached to a host), but dangerous.  So I'm enforcing a
-	 * lower limit of 20 seconds:  remove and recompile the driver if this
-	 * does not work for you.
-	 */
-	datum = (datum < 20) ? 20 : datum;
-	cmd.data[0] = datum;
-	cmd.length = cpu_to_le16(sizeof(uint8_t));
-	ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
-				   CMD_TYPE_MESH_SET_BOOTTIME);
-	if (ret)
-		return ret;
-
-	return strlen(buf);
-}
-
-/**
- * @brief Get function for sysfs attribute channel
- */
-static ssize_t channel_get(struct device *dev,
-			   struct device_attribute *attr, char *buf)
-{
-	struct mrvl_mesh_defaults defs;
-	int ret;
-
-	ret = mesh_get_default_parameters(dev, &defs);
-
-	if (ret)
-		return ret;
-
-	return snprintf(buf, 12, "%d\n", le16_to_cpu(defs.channel));
-}
-
-/**
- * @brief Set function for sysfs attribute channel
- */
-static ssize_t channel_set(struct device *dev, struct device_attribute *attr,
-			   const char *buf, size_t count)
-{
-	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
-	struct cmd_ds_mesh_config cmd;
-	uint32_t datum;
-	int ret;
-
-	memset(&cmd, 0, sizeof(cmd));
-	ret = sscanf(buf, "%d", &datum);
-	if (ret != 1 || datum < 1 || datum > 11)
-		return -EINVAL;
-
-	*((__le16 *)&cmd.data[0]) = cpu_to_le16(datum);
-	cmd.length = cpu_to_le16(sizeof(uint16_t));
-	ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
-				   CMD_TYPE_MESH_SET_DEF_CHANNEL);
-	if (ret)
-		return ret;
-
-	return strlen(buf);
-}
-
-/**
- * @brief Get function for sysfs attribute mesh_id
- */
-static ssize_t mesh_id_get(struct device *dev, struct device_attribute *attr,
-			   char *buf)
-{
-	struct mrvl_mesh_defaults defs;
-	int maxlen;
-	int ret;
-
-	ret = mesh_get_default_parameters(dev, &defs);
-
-	if (ret)
-		return ret;
-
-	if (defs.meshie.val.mesh_id_len > IW_ESSID_MAX_SIZE) {
-		lbs_pr_err("inconsistent mesh ID length");
-		defs.meshie.val.mesh_id_len = IW_ESSID_MAX_SIZE;
-	}
-
-	/* SSID not null terminated: reserve room for \0 + \n */
-	maxlen = defs.meshie.val.mesh_id_len + 2;
-	maxlen = (PAGE_SIZE > maxlen) ? maxlen : PAGE_SIZE;
-
-	defs.meshie.val.mesh_id[defs.meshie.val.mesh_id_len] = '\0';
-
-	return snprintf(buf, maxlen, "%s\n", defs.meshie.val.mesh_id);
-}
-
-/**
- * @brief Set function for sysfs attribute mesh_id
- */
-static ssize_t mesh_id_set(struct device *dev, struct device_attribute *attr,
-			   const char *buf, size_t count)
-{
-	struct cmd_ds_mesh_config cmd;
-	struct mrvl_mesh_defaults defs;
-	struct mrvl_meshie *ie;
-	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
-	int len;
-	int ret;
-
-	if (count < 2 || count > IW_ESSID_MAX_SIZE + 1)
-		return -EINVAL;
-
-	memset(&cmd, 0, sizeof(struct cmd_ds_mesh_config));
-	ie = (struct mrvl_meshie *) &cmd.data[0];
-
-	/* fetch all other Information Element parameters */
-	ret = mesh_get_default_parameters(dev, &defs);
-
-	cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie));
-
-	/* transfer IE elements */
-	memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie));
-
-	len = count - 1;
-	memcpy(ie->val.mesh_id, buf, len);
-	/* SSID len */
-	ie->val.mesh_id_len = len;
-	/* IE len */
-	ie->len = sizeof(struct mrvl_meshie_val) - IW_ESSID_MAX_SIZE + len;
-
-	ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
-				   CMD_TYPE_MESH_SET_MESH_IE);
-	if (ret)
-		return ret;
-
-	return strlen(buf);
-}
-
-/**
- * @brief Get function for sysfs attribute protocol_id
- */
-static ssize_t protocol_id_get(struct device *dev,
-			       struct device_attribute *attr, char *buf)
-{
-	struct mrvl_mesh_defaults defs;
-	int ret;
-
-	ret = mesh_get_default_parameters(dev, &defs);
-
-	if (ret)
-		return ret;
-
-	return snprintf(buf, 5, "%d\n", defs.meshie.val.active_protocol_id);
-}
-
-/**
- * @brief Set function for sysfs attribute protocol_id
- */
-static ssize_t protocol_id_set(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t count)
-{
-	struct cmd_ds_mesh_config cmd;
-	struct mrvl_mesh_defaults defs;
-	struct mrvl_meshie *ie;
-	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
-	uint32_t datum;
-	int ret;
-
-	memset(&cmd, 0, sizeof(cmd));
-	ret = sscanf(buf, "%d", &datum);
-	if ((ret != 1) || (datum > 255))
-		return -EINVAL;
-
-	/* fetch all other Information Element parameters */
-	ret = mesh_get_default_parameters(dev, &defs);
-
-	cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie));
-
-	/* transfer IE elements */
-	ie = (struct mrvl_meshie *) &cmd.data[0];
-	memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie));
-	/* update protocol id */
-	ie->val.active_protocol_id = datum;
-
-	ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
-				   CMD_TYPE_MESH_SET_MESH_IE);
-	if (ret)
-		return ret;
-
-	return strlen(buf);
-}
-
-/**
- * @brief Get function for sysfs attribute metric_id
- */
-static ssize_t metric_id_get(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	struct mrvl_mesh_defaults defs;
-	int ret;
-
-	ret = mesh_get_default_parameters(dev, &defs);
-
-	if (ret)
-		return ret;
-
-	return snprintf(buf, 5, "%d\n", defs.meshie.val.active_metric_id);
-}
-
-/**
- * @brief Set function for sysfs attribute metric_id
- */
-static ssize_t metric_id_set(struct device *dev, struct device_attribute *attr,
-			     const char *buf, size_t count)
-{
-	struct cmd_ds_mesh_config cmd;
-	struct mrvl_mesh_defaults defs;
-	struct mrvl_meshie *ie;
-	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
-	uint32_t datum;
-	int ret;
-
-	memset(&cmd, 0, sizeof(cmd));
-	ret = sscanf(buf, "%d", &datum);
-	if ((ret != 1) || (datum > 255))
-		return -EINVAL;
-
-	/* fetch all other Information Element parameters */
-	ret = mesh_get_default_parameters(dev, &defs);
-
-	cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie));
-
-	/* transfer IE elements */
-	ie = (struct mrvl_meshie *) &cmd.data[0];
-	memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie));
-	/* update metric id */
-	ie->val.active_metric_id = datum;
-
-	ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
-				   CMD_TYPE_MESH_SET_MESH_IE);
-	if (ret)
-		return ret;
-
-	return strlen(buf);
-}
-
-/**
- * @brief Get function for sysfs attribute capability
- */
-static ssize_t capability_get(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	struct mrvl_mesh_defaults defs;
-	int ret;
-
-	ret = mesh_get_default_parameters(dev, &defs);
-
-	if (ret)
-		return ret;
-
-	return snprintf(buf, 5, "%d\n", defs.meshie.val.mesh_capability);
-}
-
-/**
- * @brief Set function for sysfs attribute capability
- */
-static ssize_t capability_set(struct device *dev, struct device_attribute *attr,
-			      const char *buf, size_t count)
-{
-	struct cmd_ds_mesh_config cmd;
-	struct mrvl_mesh_defaults defs;
-	struct mrvl_meshie *ie;
-	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
-	uint32_t datum;
-	int ret;
-
-	memset(&cmd, 0, sizeof(cmd));
-	ret = sscanf(buf, "%d", &datum);
-	if ((ret != 1) || (datum > 255))
-		return -EINVAL;
-
-	/* fetch all other Information Element parameters */
-	ret = mesh_get_default_parameters(dev, &defs);
-
-	cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie));
-
-	/* transfer IE elements */
-	ie = (struct mrvl_meshie *) &cmd.data[0];
-	memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie));
-	/* update value */
-	ie->val.mesh_capability = datum;
-
-	ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
-				   CMD_TYPE_MESH_SET_MESH_IE);
-	if (ret)
-		return ret;
-
-	return strlen(buf);
-}
-
-
-static DEVICE_ATTR(bootflag, 0644, bootflag_get, bootflag_set);
-static DEVICE_ATTR(boottime, 0644, boottime_get, boottime_set);
-static DEVICE_ATTR(channel, 0644, channel_get, channel_set);
-static DEVICE_ATTR(mesh_id, 0644, mesh_id_get, mesh_id_set);
-static DEVICE_ATTR(protocol_id, 0644, protocol_id_get, protocol_id_set);
-static DEVICE_ATTR(metric_id, 0644, metric_id_get, metric_id_set);
-static DEVICE_ATTR(capability, 0644, capability_get, capability_set);
-
-static struct attribute *boot_opts_attrs[] = {
-	&dev_attr_bootflag.attr,
-	&dev_attr_boottime.attr,
-	&dev_attr_channel.attr,
-	NULL
-};
-
-static struct attribute_group boot_opts_group = {
-	.name = "boot_options",
-	.attrs = boot_opts_attrs,
-};
-
-static struct attribute *mesh_ie_attrs[] = {
-	&dev_attr_mesh_id.attr,
-	&dev_attr_protocol_id.attr,
-	&dev_attr_metric_id.attr,
-	&dev_attr_capability.attr,
-	NULL
-};
-
-static struct attribute_group mesh_ie_group = {
-	.name = "mesh_ie",
-	.attrs = mesh_ie_attrs,
-};
-
-void lbs_persist_config_init(struct net_device *dev)
-{
-	int ret;
-	ret = sysfs_create_group(&(dev->dev.kobj), &boot_opts_group);
-	ret = sysfs_create_group(&(dev->dev.kobj), &mesh_ie_group);
-}
-
-void lbs_persist_config_remove(struct net_device *dev)
-{
-	sysfs_remove_group(&(dev->dev.kobj), &boot_opts_group);
-	sysfs_remove_group(&(dev->dev.kobj), &mesh_ie_group);
-}
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 65f02cc6752f..2daf8ffdb7e1 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -4,7 +4,7 @@
 #include <linux/etherdevice.h>
 #include <linux/types.h>
 
-#include "hostcmd.h"
+#include "host.h"
 #include "radiotap.h"
 #include "decl.h"
 #include "dev.h"
@@ -160,15 +160,8 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
 	p_rx_pd = (struct rxpd *) skb->data;
 	p_rx_pkt = (struct rxpackethdr *) ((u8 *)p_rx_pd +
 		le32_to_cpu(p_rx_pd->pkt_ptr));
-	if (priv->mesh_dev) {
-		if (priv->mesh_fw_ver == MESH_FW_OLD) {
-			if (p_rx_pd->rx_control & RxPD_MESH_FRAME)
-				dev = priv->mesh_dev;
-		} else if (priv->mesh_fw_ver == MESH_FW_NEW) {
-			if (p_rx_pd->u.bss.bss_num == MESH_IFACE_ID)
-				dev = priv->mesh_dev;
-		}
-	}
+
+	dev = lbs_mesh_set_dev(priv, dev, p_rx_pd);
 
 	lbs_deb_hex(LBS_DEB_RX, "RX Data: Before chop rxpd", skb->data,
 		 min_t(unsigned int, skb->len, 100));
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index 6c95af3023cc..c6a6c042b82f 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -12,18 +12,19 @@
 #include <net/lib80211.h>
 
 #include "host.h"
-#include "decl.h"
 #include "dev.h"
 #include "scan.h"
+#include "assoc.h"
+#include "wext.h"
 #include "cmd.h"
 
 //! Approximate amount of data needed to pass a scan result back to iwlist
 #define MAX_SCAN_CELL_SIZE  (IW_EV_ADDR_LEN             \
-                             + IW_ESSID_MAX_SIZE        \
+                             + IEEE80211_MAX_SSID_LEN   \
                              + IW_EV_UINT_LEN           \
                              + IW_EV_FREQ_LEN           \
                              + IW_EV_QUAL_LEN           \
-                             + IW_ESSID_MAX_SIZE        \
+                             + IEEE80211_MAX_SSID_LEN   \
                              + IW_EV_PARAM_LEN          \
                              + 40)	/* 40 for WPAIE */
 
@@ -121,6 +122,189 @@ static inline int is_same_network(struct bss_descriptor *src,
 
 
 
+/*********************************************************************/
+/*                                                                   */
+/* Region channel support                                            */
+/*                                                                   */
+/*********************************************************************/
+
+#define LBS_TX_PWR_DEFAULT		20	/*100mW */
+#define LBS_TX_PWR_US_DEFAULT		20	/*100mW */
+#define LBS_TX_PWR_JP_DEFAULT		16	/*50mW */
+#define LBS_TX_PWR_FR_DEFAULT		20	/*100mW */
+#define LBS_TX_PWR_EMEA_DEFAULT	20	/*100mW */
+
+/* Format { channel, frequency (MHz), maxtxpower } */
+/* band: 'B/G', region: USA FCC/Canada IC */
+static struct chan_freq_power channel_freq_power_US_BG[] = {
+	{1, 2412, LBS_TX_PWR_US_DEFAULT},
+	{2, 2417, LBS_TX_PWR_US_DEFAULT},
+	{3, 2422, LBS_TX_PWR_US_DEFAULT},
+	{4, 2427, LBS_TX_PWR_US_DEFAULT},
+	{5, 2432, LBS_TX_PWR_US_DEFAULT},
+	{6, 2437, LBS_TX_PWR_US_DEFAULT},
+	{7, 2442, LBS_TX_PWR_US_DEFAULT},
+	{8, 2447, LBS_TX_PWR_US_DEFAULT},
+	{9, 2452, LBS_TX_PWR_US_DEFAULT},
+	{10, 2457, LBS_TX_PWR_US_DEFAULT},
+	{11, 2462, LBS_TX_PWR_US_DEFAULT}
+};
+
+/* band: 'B/G', region: Europe ETSI */
+static struct chan_freq_power channel_freq_power_EU_BG[] = {
+	{1, 2412, LBS_TX_PWR_EMEA_DEFAULT},
+	{2, 2417, LBS_TX_PWR_EMEA_DEFAULT},
+	{3, 2422, LBS_TX_PWR_EMEA_DEFAULT},
+	{4, 2427, LBS_TX_PWR_EMEA_DEFAULT},
+	{5, 2432, LBS_TX_PWR_EMEA_DEFAULT},
+	{6, 2437, LBS_TX_PWR_EMEA_DEFAULT},
+	{7, 2442, LBS_TX_PWR_EMEA_DEFAULT},
+	{8, 2447, LBS_TX_PWR_EMEA_DEFAULT},
+	{9, 2452, LBS_TX_PWR_EMEA_DEFAULT},
+	{10, 2457, LBS_TX_PWR_EMEA_DEFAULT},
+	{11, 2462, LBS_TX_PWR_EMEA_DEFAULT},
+	{12, 2467, LBS_TX_PWR_EMEA_DEFAULT},
+	{13, 2472, LBS_TX_PWR_EMEA_DEFAULT}
+};
+
+/* band: 'B/G', region: Spain */
+static struct chan_freq_power channel_freq_power_SPN_BG[] = {
+	{10, 2457, LBS_TX_PWR_DEFAULT},
+	{11, 2462, LBS_TX_PWR_DEFAULT}
+};
+
+/* band: 'B/G', region: France */
+static struct chan_freq_power channel_freq_power_FR_BG[] = {
+	{10, 2457, LBS_TX_PWR_FR_DEFAULT},
+	{11, 2462, LBS_TX_PWR_FR_DEFAULT},
+	{12, 2467, LBS_TX_PWR_FR_DEFAULT},
+	{13, 2472, LBS_TX_PWR_FR_DEFAULT}
+};
+
+/* band: 'B/G', region: Japan */
+static struct chan_freq_power channel_freq_power_JPN_BG[] = {
+	{1, 2412, LBS_TX_PWR_JP_DEFAULT},
+	{2, 2417, LBS_TX_PWR_JP_DEFAULT},
+	{3, 2422, LBS_TX_PWR_JP_DEFAULT},
+	{4, 2427, LBS_TX_PWR_JP_DEFAULT},
+	{5, 2432, LBS_TX_PWR_JP_DEFAULT},
+	{6, 2437, LBS_TX_PWR_JP_DEFAULT},
+	{7, 2442, LBS_TX_PWR_JP_DEFAULT},
+	{8, 2447, LBS_TX_PWR_JP_DEFAULT},
+	{9, 2452, LBS_TX_PWR_JP_DEFAULT},
+	{10, 2457, LBS_TX_PWR_JP_DEFAULT},
+	{11, 2462, LBS_TX_PWR_JP_DEFAULT},
+	{12, 2467, LBS_TX_PWR_JP_DEFAULT},
+	{13, 2472, LBS_TX_PWR_JP_DEFAULT},
+	{14, 2484, LBS_TX_PWR_JP_DEFAULT}
+};
+
+/**
+ * the structure for channel, frequency and power
+ */
+struct region_cfp_table {
+	u8 region;
+	struct chan_freq_power *cfp_BG;
+	int cfp_no_BG;
+};
+
+/**
+ * the structure for the mapping between region and CFP
+ */
+static struct region_cfp_table region_cfp_table[] = {
+	{0x10,			/*US FCC */
+	 channel_freq_power_US_BG,
+	 ARRAY_SIZE(channel_freq_power_US_BG),
+	 }
+	,
+	{0x20,			/*CANADA IC */
+	 channel_freq_power_US_BG,
+	 ARRAY_SIZE(channel_freq_power_US_BG),
+	 }
+	,
+	{0x30, /*EU*/ channel_freq_power_EU_BG,
+	 ARRAY_SIZE(channel_freq_power_EU_BG),
+	 }
+	,
+	{0x31, /*SPAIN*/ channel_freq_power_SPN_BG,
+	 ARRAY_SIZE(channel_freq_power_SPN_BG),
+	 }
+	,
+	{0x32, /*FRANCE*/ channel_freq_power_FR_BG,
+	 ARRAY_SIZE(channel_freq_power_FR_BG),
+	 }
+	,
+	{0x40, /*JAPAN*/ channel_freq_power_JPN_BG,
+	 ARRAY_SIZE(channel_freq_power_JPN_BG),
+	 }
+	,
+/*Add new region here */
+};
+
+/**
+ *  @brief This function finds the CFP in
+ *  region_cfp_table based on region and band parameter.
+ *
+ *  @param region  The region code
+ *  @param band	   The band
+ *  @param cfp_no  A pointer to CFP number
+ *  @return 	   A pointer to CFP
+ */
+static struct chan_freq_power *lbs_get_region_cfp_table(u8 region, int *cfp_no)
+{
+	int i, end;
+
+	lbs_deb_enter(LBS_DEB_MAIN);
+
+	end = ARRAY_SIZE(region_cfp_table);
+
+	for (i = 0; i < end ; i++) {
+		lbs_deb_main("region_cfp_table[i].region=%d\n",
+			region_cfp_table[i].region);
+		if (region_cfp_table[i].region == region) {
+			*cfp_no = region_cfp_table[i].cfp_no_BG;
+			lbs_deb_leave(LBS_DEB_MAIN);
+			return region_cfp_table[i].cfp_BG;
+		}
+	}
+
+	lbs_deb_leave_args(LBS_DEB_MAIN, "ret NULL");
+	return NULL;
+}
+
+int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band)
+{
+	int ret = 0;
+	int i = 0;
+
+	struct chan_freq_power *cfp;
+	int cfp_no;
+
+	lbs_deb_enter(LBS_DEB_MAIN);
+
+	memset(priv->region_channel, 0, sizeof(priv->region_channel));
+
+	cfp = lbs_get_region_cfp_table(region, &cfp_no);
+	if (cfp != NULL) {
+		priv->region_channel[i].nrcfp = cfp_no;
+		priv->region_channel[i].CFP = cfp;
+	} else {
+		lbs_deb_main("wrong region code %#x in band B/G\n",
+		       region);
+		ret = -1;
+		goto out;
+	}
+	priv->region_channel[i].valid = 1;
+	priv->region_channel[i].region = region;
+	priv->region_channel[i].band = band;
+	i++;
+out:
+	lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret);
+	return ret;
+}
+
+
+
 
 /*********************************************************************/
 /*                                                                   */
@@ -161,31 +345,15 @@ static int lbs_scan_create_channel_list(struct lbs_private *priv,
 	scantype = CMD_SCAN_TYPE_ACTIVE;
 
 	for (rgnidx = 0; rgnidx < ARRAY_SIZE(priv->region_channel); rgnidx++) {
-		if (priv->enable11d && (priv->connect_status != LBS_CONNECTED)
-		    && (priv->mesh_connect_status != LBS_CONNECTED)) {
-			/* Scan all the supported chan for the first scan */
-			if (!priv->universal_channel[rgnidx].valid)
-				continue;
-			scanregion = &priv->universal_channel[rgnidx];
-
-			/* clear the parsed_region_chan for the first scan */
-			memset(&priv->parsed_region_chan, 0x00,
-			       sizeof(priv->parsed_region_chan));
-		} else {
-			if (!priv->region_channel[rgnidx].valid)
-				continue;
-			scanregion = &priv->region_channel[rgnidx];
-		}
+		if (!priv->region_channel[rgnidx].valid)
+			continue;
+		scanregion = &priv->region_channel[rgnidx];
 
 		for (nextchan = 0; nextchan < scanregion->nrcfp; nextchan++, chanidx++) {
 			struct chanscanparamset *chan = &scanchanlist[chanidx];
 
 			cfp = scanregion->CFP + nextchan;
 
-			if (priv->enable11d)
-				scantype = lbs_get_scan_type_11d(cfp->channel,
-								 &priv->parsed_region_chan);
-
 			if (scanregion->band == BAND_B || scanregion->band == BAND_G)
 				chan->radiotype = CMD_SCAN_RADIO_TYPE_BG;
 
@@ -519,7 +687,6 @@ static int lbs_process_bss(struct bss_descriptor *bss,
 	struct ieee_ie_cf_param_set *cf;
 	struct ieee_ie_ibss_param_set *ibss;
 	DECLARE_SSID_BUF(ssid);
-	struct ieee_ie_country_info_set *pcountryinfo;
 	uint8_t *pos, *end, *p;
 	uint8_t n_ex_rates = 0, got_basic_rates = 0, n_basic_rates = 0;
 	uint16_t beaconsize = 0;
@@ -642,26 +809,6 @@ static int lbs_process_bss(struct bss_descriptor *bss,
 			lbs_deb_scan("got IBSS IE\n");
 			break;
 
-		case WLAN_EID_COUNTRY:
-			pcountryinfo = (struct ieee_ie_country_info_set *) pos;
-			lbs_deb_scan("got COUNTRY IE\n");
-			if (pcountryinfo->header.len < sizeof(pcountryinfo->countrycode)
-			    || pcountryinfo->header.len > 254) {
-				lbs_deb_scan("%s: 11D- Err CountryInfo len %d, min %zd, max 254\n",
-					     __func__,
-					     pcountryinfo->header.len,
-					     sizeof(pcountryinfo->countrycode));
-				ret = -1;
-				goto done;
-			}
-
-			memcpy(&bss->countryinfo, pcountryinfo,
-				pcountryinfo->header.len + 2);
-			lbs_deb_hex(LBS_DEB_SCAN, "process_bss: 11d countryinfo",
-				    (uint8_t *) pcountryinfo,
-				    (int) (pcountryinfo->header.len + 2));
-			break;
-
 		case WLAN_EID_EXT_SUPP_RATES:
 			/* only process extended supported rate if data rate is
 			 * already found. Data rate IE should come before
@@ -812,7 +959,7 @@ static inline char *lbs_translate_scan(struct lbs_private *priv,
 	/* SSID */
 	iwe.cmd = SIOCGIWESSID;
 	iwe.u.data.flags = 1;
-	iwe.u.data.length = min((uint32_t) bss->ssid_len, (uint32_t) IW_ESSID_MAX_SIZE);
+	iwe.u.data.length = min((uint32_t) bss->ssid_len, (uint32_t) IEEE80211_MAX_SSID_LEN);
 	start = iwe_stream_add_point(info, start, stop, &iwe, bss->ssid);
 
 	/* Mode */
@@ -1022,9 +1169,12 @@ int lbs_get_scan(struct net_device *dev, struct iw_request_info *info,
 		return -EAGAIN;
 
 	/* Update RSSI if current BSS is a locally created ad-hoc BSS */
-	if ((priv->mode == IW_MODE_ADHOC) && priv->adhoccreate)
-		lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0,
-					     CMD_OPTION_WAITFORRSP, 0, NULL);
+	if ((priv->mode == IW_MODE_ADHOC) && priv->adhoccreate) {
+		err = lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0,
+				CMD_OPTION_WAITFORRSP, 0, NULL);
+		if (err)
+			goto out;
+	}
 
 	mutex_lock(&priv->lock);
 	list_for_each_entry_safe (iter_bss, safe, &priv->network_list, list) {
@@ -1058,7 +1208,7 @@ int lbs_get_scan(struct net_device *dev, struct iw_request_info *info,
 
 	dwrq->length = (ev - extra);
 	dwrq->flags = 0;
-
+out:
 	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", err);
 	return err;
 }
@@ -1141,11 +1291,11 @@ static int lbs_ret_80211_scan(struct lbs_private *priv, unsigned long dummy,
 	/* The size of the TLV buffer is equal to the entire command response
 	 *   size (scanrespsize) minus the fixed fields (sizeof()'s), the
 	 *   BSS Descriptions (bssdescriptsize as bytesLef) and the command
-	 *   response header (S_DS_GEN)
+	 *   response header (sizeof(struct cmd_header))
 	 */
 	tlvbufsize = scanrespsize - (bytesleft + sizeof(scanresp->bssdescriptsize)
 				     + sizeof(scanresp->nr_sets)
-				     + S_DS_GEN);
+				     + sizeof(struct cmd_header));
 
 	/*
 	 *  Process each scan response returned (scanresp->nr_sets). Save
diff --git a/drivers/net/wireless/libertas/scan.h b/drivers/net/wireless/libertas/scan.h
index fab7d5d097fc..8fb1706d7526 100644
--- a/drivers/net/wireless/libertas/scan.h
+++ b/drivers/net/wireless/libertas/scan.h
@@ -9,8 +9,36 @@
 
 #include <net/iw_handler.h>
 
+struct lbs_private;
+
 #define MAX_NETWORK_COUNT 128
 
+/** Chan-freq-TxPower mapping table*/
+struct chan_freq_power {
+	/** channel Number		*/
+	u16 channel;
+	/** frequency of this channel	*/
+	u32 freq;
+	/** Max allowed Tx power level	*/
+	u16 maxtxpower;
+	/** TRUE:channel unsupported;  FLASE:supported*/
+	u8 unsupported;
+};
+
+/** region-band mapping table*/
+struct region_channel {
+	/** TRUE if this entry is valid		     */
+	u8 valid;
+	/** region code for US, Japan ...	     */
+	u8 region;
+	/** band B/G/A, used for BAND_CONFIG cmd	     */
+	u8 band;
+	/** Actual No. of elements in the array below */
+	u8 nrcfp;
+	/** chan-freq-txpower mapping table*/
+	struct chan_freq_power *CFP;
+};
+
 /**
  *  @brief Maximum number of channels that can be sent in a setuserscan ioctl
  */
@@ -18,6 +46,8 @@
 
 int lbs_ssid_cmp(u8 *ssid1, u8 ssid1_len, u8 *ssid2, u8 ssid2_len);
 
+int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band);
+
 int lbs_send_specific_ssid_scan(struct lbs_private *priv, u8 *ssid,
 				u8 ssid_len);
 
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index 8c3766a6e8e7..315d1ce286ca 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -5,7 +5,7 @@
 #include <linux/etherdevice.h>
 #include <linux/sched.h>
 
-#include "hostcmd.h"
+#include "host.h"
 #include "radiotap.h"
 #include "decl.h"
 #include "defs.h"
@@ -131,12 +131,7 @@ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	txpd->tx_packet_length = cpu_to_le16(pkt_len);
 	txpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd));
 
-	if (dev == priv->mesh_dev) {
-		if (priv->mesh_fw_ver == MESH_FW_OLD)
-			txpd->tx_control |= cpu_to_le32(TxPD_MESH_FRAME);
-		else if (priv->mesh_fw_ver == MESH_FW_NEW)
-			txpd->u.bss.bss_num = MESH_IFACE_ID;
-	}
+	lbs_mesh_set_txpd(priv, dev, txpd);
 
 	lbs_deb_hex(LBS_DEB_TX, "txpd", (u8 *) &txpd, sizeof(struct txpd));
 
diff --git a/drivers/net/wireless/libertas/types.h b/drivers/net/wireless/libertas/types.h
index 99905df65b25..3e72c86ceca8 100644
--- a/drivers/net/wireless/libertas/types.h
+++ b/drivers/net/wireless/libertas/types.h
@@ -5,8 +5,8 @@
 #define _LBS_TYPES_H_
 
 #include <linux/if_ether.h>
+#include <linux/ieee80211.h>
 #include <asm/byteorder.h>
-#include <linux/wireless.h>
 
 struct ieee_ie_header {
 	u8 id;
@@ -247,7 +247,7 @@ struct mrvl_meshie_val {
 	uint8_t active_metric_id;
 	uint8_t mesh_capability;
 	uint8_t mesh_id_len;
-	uint8_t mesh_id[IW_ESSID_MAX_SIZE];
+	uint8_t mesh_id[IEEE80211_MAX_SSID_LEN];
 } __attribute__ ((packed));
 
 struct mrvl_meshie {
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index be837a0d2517..a8eb9e1fcf36 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -45,6 +45,63 @@ static inline void lbs_cancel_association_work(struct lbs_private *priv)
 	priv->pending_assoc_req = NULL;
 }
 
+void lbs_send_disconnect_notification(struct lbs_private *priv)
+{
+	union iwreq_data wrqu;
+
+	memset(wrqu.ap_addr.sa_data, 0x00, ETH_ALEN);
+	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+	wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
+}
+
+static void lbs_send_iwevcustom_event(struct lbs_private *priv, s8 *str)
+{
+	union iwreq_data iwrq;
+	u8 buf[50];
+
+	lbs_deb_enter(LBS_DEB_WEXT);
+
+	memset(&iwrq, 0, sizeof(union iwreq_data));
+	memset(buf, 0, sizeof(buf));
+
+	snprintf(buf, sizeof(buf) - 1, "%s", str);
+
+	iwrq.data.length = strlen(buf) + 1 + IW_EV_LCP_LEN;
+
+	/* Send Event to upper layer */
+	lbs_deb_wext("event indication string %s\n", (char *)buf);
+	lbs_deb_wext("event indication length %d\n", iwrq.data.length);
+	lbs_deb_wext("sending wireless event IWEVCUSTOM for %s\n", str);
+
+	wireless_send_event(priv->dev, IWEVCUSTOM, &iwrq, buf);
+
+	lbs_deb_leave(LBS_DEB_WEXT);
+}
+
+/**
+ *  @brief This function handles MIC failure event.
+ *
+ *  @param priv    A pointer to struct lbs_private structure
+ *  @para  event   the event id
+ *  @return 	   n/a
+ */
+void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event)
+{
+	char buf[50];
+
+	lbs_deb_enter(LBS_DEB_CMD);
+	memset(buf, 0, sizeof(buf));
+
+	sprintf(buf, "%s", "MLME-MICHAELMICFAILURE.indication ");
+
+	if (event == MACREG_INT_CODE_MIC_ERR_UNICAST)
+		strcat(buf, "unicast ");
+	else
+		strcat(buf, "multicast ");
+
+	lbs_send_iwevcustom_event(priv, buf);
+	lbs_deb_leave(LBS_DEB_CMD);
+}
 
 /**
  *  @brief Find the channel frequency power info with specific channel
@@ -66,8 +123,6 @@ struct chan_freq_power *lbs_find_cfp_by_band_and_channel(
 	for (j = 0; !cfp && (j < ARRAY_SIZE(priv->region_channel)); j++) {
 		rc = &priv->region_channel[j];
 
-		if (priv->enable11d)
-			rc = &priv->universal_channel[j];
 		if (!rc->valid || !rc->CFP)
 			continue;
 		if (rc->band != band)
@@ -107,8 +162,6 @@ static struct chan_freq_power *find_cfp_by_band_and_freq(
 	for (j = 0; !cfp && (j < ARRAY_SIZE(priv->region_channel)); j++) {
 		rc = &priv->region_channel[j];
 
-		if (priv->enable11d)
-			rc = &priv->universal_channel[j];
 		if (!rc->valid || !rc->CFP)
 			continue;
 		if (rc->band != band)
@@ -169,12 +222,12 @@ static int lbs_get_freq(struct net_device *dev, struct iw_request_info *info,
 	lbs_deb_enter(LBS_DEB_WEXT);
 
 	cfp = lbs_find_cfp_by_band_and_channel(priv, 0,
-					   priv->curbssparams.channel);
+					   priv->channel);
 
 	if (!cfp) {
-		if (priv->curbssparams.channel)
+		if (priv->channel)
 			lbs_deb_wext("invalid channel %d\n",
-			       priv->curbssparams.channel);
+			       priv->channel);
 		return -EINVAL;
 	}
 
@@ -547,8 +600,6 @@ static int lbs_get_range(struct net_device *dev, struct iw_request_info *info,
 	struct chan_freq_power *cfp;
 	u8 rates[MAX_RATES + 1];
 
-	u8 flag = 0;
-
 	lbs_deb_enter(LBS_DEB_WEXT);
 
 	dwrq->length = sizeof(struct iw_range);
@@ -570,52 +621,21 @@ static int lbs_get_range(struct net_device *dev, struct iw_request_info *info,
 
 	range->scan_capa = IW_SCAN_CAPA_ESSID;
 
-	if (priv->enable11d &&
-	    (priv->connect_status == LBS_CONNECTED ||
-	    priv->mesh_connect_status == LBS_CONNECTED)) {
-		u8 chan_no;
-		u8 band;
-
-		struct parsed_region_chan_11d *parsed_region_chan =
-		    &priv->parsed_region_chan;
-
-		if (parsed_region_chan == NULL) {
-			lbs_deb_wext("11d: parsed_region_chan is NULL\n");
-			goto out;
-		}
-		band = parsed_region_chan->band;
-		lbs_deb_wext("band %d, nr_char %d\n", band,
-		       parsed_region_chan->nr_chan);
-
+	for (j = 0; (range->num_frequency < IW_MAX_FREQUENCIES)
+	     && (j < ARRAY_SIZE(priv->region_channel)); j++) {
+		cfp = priv->region_channel[j].CFP;
 		for (i = 0; (range->num_frequency < IW_MAX_FREQUENCIES)
-		     && (i < parsed_region_chan->nr_chan); i++) {
-			chan_no = parsed_region_chan->chanpwr[i].chan;
-			lbs_deb_wext("chan_no %d\n", chan_no);
-			range->freq[range->num_frequency].i = (long)chan_no;
+		     && priv->region_channel[j].valid
+		     && cfp
+		     && (i < priv->region_channel[j].nrcfp); i++) {
+			range->freq[range->num_frequency].i =
+			    (long)cfp->channel;
 			range->freq[range->num_frequency].m =
-			    (long)lbs_chan_2_freq(chan_no) * 100000;
+			    (long)cfp->freq * 100000;
 			range->freq[range->num_frequency].e = 1;
+			cfp++;
 			range->num_frequency++;
 		}
-		flag = 1;
-	}
-	if (!flag) {
-		for (j = 0; (range->num_frequency < IW_MAX_FREQUENCIES)
-		     && (j < ARRAY_SIZE(priv->region_channel)); j++) {
-			cfp = priv->region_channel[j].CFP;
-			for (i = 0; (range->num_frequency < IW_MAX_FREQUENCIES)
-			     && priv->region_channel[j].valid
-			     && cfp
-			     && (i < priv->region_channel[j].nrcfp); i++) {
-				range->freq[range->num_frequency].i =
-				    (long)cfp->channel;
-				range->freq[range->num_frequency].m =
-				    (long)cfp->freq * 100000;
-				range->freq[range->num_frequency].e = 1;
-				cfp++;
-				range->num_frequency++;
-			}
-		}
 	}
 
 	lbs_deb_wext("IW_MAX_FREQUENCIES %d, num_frequency %d\n",
@@ -700,7 +720,6 @@ static int lbs_get_range(struct net_device *dev, struct iw_request_info *info,
 		                  | IW_ENC_CAPA_CIPHER_CCMP;
 	}
 
-out:
 	lbs_deb_leave(LBS_DEB_WEXT);
 	return 0;
 }
@@ -709,6 +728,7 @@ static int lbs_set_power(struct net_device *dev, struct iw_request_info *info,
 			  struct iw_param *vwrq, char *extra)
 {
 	struct lbs_private *priv = dev->ml_priv;
+	int ret = 0;
 
 	lbs_deb_enter(LBS_DEB_WEXT);
 
@@ -737,8 +757,54 @@ static int lbs_set_power(struct net_device *dev, struct iw_request_info *info,
 		       "setting power timeout is not supported\n");
 		return -EINVAL;
 	} else if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_PERIOD) {
-		lbs_deb_wext("setting power period not supported\n");
-		return -EINVAL;
+		vwrq->value = vwrq->value / 1000;
+		if (!priv->enter_deep_sleep) {
+			lbs_pr_err("deep sleep feature is not implemented "
+					"for this interface driver\n");
+			return -EINVAL;
+		}
+
+		if (priv->connect_status == LBS_CONNECTED) {
+			if ((priv->is_auto_deep_sleep_enabled) &&
+						(vwrq->value == -1000)) {
+				lbs_exit_auto_deep_sleep(priv);
+				return 0;
+			} else {
+				lbs_pr_err("can't use deep sleep cmd in "
+						"connected state\n");
+				return -EINVAL;
+			}
+		}
+
+		if ((vwrq->value < 0) && (vwrq->value != -1000)) {
+			lbs_pr_err("unknown option\n");
+			return -EINVAL;
+		}
+
+		if (vwrq->value > 0) {
+			if (!priv->is_auto_deep_sleep_enabled) {
+				priv->is_activity_detected = 0;
+				priv->auto_deep_sleep_timeout = vwrq->value;
+				lbs_enter_auto_deep_sleep(priv);
+			} else {
+				priv->auto_deep_sleep_timeout = vwrq->value;
+				lbs_deb_debugfs("auto deep sleep: "
+						"already enabled\n");
+			}
+			return 0;
+		} else {
+			if (priv->is_auto_deep_sleep_enabled) {
+				lbs_exit_auto_deep_sleep(priv);
+				/* Try to exit deep sleep if auto */
+				/*deep sleep disabled */
+				ret = lbs_set_deep_sleep(priv, 0);
+			}
+			if (vwrq->value == 0)
+				ret = lbs_set_deep_sleep(priv, 1);
+			else if (vwrq->value == -1000)
+				ret = lbs_set_deep_sleep(priv, 0);
+			return ret;
+		}
 	}
 
 	if (priv->psmode != LBS802_11POWERMODECAM) {
@@ -752,6 +818,7 @@ static int lbs_set_power(struct net_device *dev, struct iw_request_info *info,
 	}
 
 	lbs_deb_leave(LBS_DEB_WEXT);
+
 	return 0;
 }
 
@@ -785,7 +852,7 @@ static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
 	u32 rssi_qual;
 	u32 tx_qual;
 	u32 quality = 0;
-	int stats_valid = 0;
+	int ret, stats_valid = 0;
 	u8 rssi;
 	u32 tx_retries;
 	struct cmd_ds_802_11_get_log log;
@@ -834,7 +901,9 @@ static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
 
 	memset(&log, 0, sizeof(log));
 	log.hdr.size = cpu_to_le16(sizeof(log));
-	lbs_cmd_with_response(priv, CMD_802_11_GET_LOG, &log);
+	ret = lbs_cmd_with_response(priv, CMD_802_11_GET_LOG, &log);
+	if (ret)
+		goto out;
 
 	tx_retries = le32_to_cpu(log.retry);
 
@@ -862,8 +931,10 @@ static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
 	stats_valid = 1;
 
 	/* update stats asynchronously for future calls */
-	lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0,
+	ret = lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0,
 					0, 0, NULL);
+	if (ret)
+		lbs_pr_err("RSSI command failed\n");
 out:
 	if (!stats_valid) {
 		priv->wstats.miss.beacon = 0;
@@ -973,7 +1044,7 @@ static int lbs_mesh_set_freq(struct net_device *dev,
 		goto out;
 	}
 
-	if (fwrq->m != priv->curbssparams.channel) {
+	if (fwrq->m != priv->channel) {
 		lbs_deb_wext("mesh channel change forces eth disconnect\n");
 		if (priv->mode == IW_MODE_INFRA)
 			lbs_cmd_80211_deauthenticate(priv,
@@ -1000,6 +1071,7 @@ static int lbs_set_rate(struct net_device *dev, struct iw_request_info *info,
 	u8 rates[MAX_RATES + 1];
 
 	lbs_deb_enter(LBS_DEB_WEXT);
+
 	lbs_deb_wext("vwrq->value %d\n", vwrq->value);
 	lbs_deb_wext("vwrq->fixed %d\n", vwrq->fixed);
 
@@ -1975,7 +2047,7 @@ static int lbs_set_essid(struct net_device *dev, struct iw_request_info *info,
 {
 	struct lbs_private *priv = dev->ml_priv;
 	int ret = 0;
-	u8 ssid[IW_ESSID_MAX_SIZE];
+	u8 ssid[IEEE80211_MAX_SSID_LEN];
 	u8 ssid_len = 0;
 	struct assoc_request * assoc_req;
 	int in_ssid_len = dwrq->length;
@@ -1989,7 +2061,7 @@ static int lbs_set_essid(struct net_device *dev, struct iw_request_info *info,
 	}
 
 	/* Check the size of the string */
-	if (in_ssid_len > IW_ESSID_MAX_SIZE) {
+	if (in_ssid_len > IEEE80211_MAX_SSID_LEN) {
 		ret = -E2BIG;
 		goto out;
 	}
@@ -2020,7 +2092,7 @@ out:
 			ret = -ENOMEM;
 		} else {
 			/* Copy the SSID to the association request */
-			memcpy(&assoc_req->ssid, &ssid, IW_ESSID_MAX_SIZE);
+			memcpy(&assoc_req->ssid, &ssid, IEEE80211_MAX_SSID_LEN);
 			assoc_req->ssid_len = ssid_len;
 			set_bit(ASSOC_FLAG_SSID, &assoc_req->flags);
 			lbs_postpone_association_work(priv);
@@ -2071,7 +2143,7 @@ static int lbs_mesh_set_essid(struct net_device *dev,
 	}
 
 	/* Check the size of the string */
-	if (dwrq->length > IW_ESSID_MAX_SIZE) {
+	if (dwrq->length > IEEE80211_MAX_SSID_LEN) {
 		ret = -E2BIG;
 		goto out;
 	}
@@ -2086,7 +2158,7 @@ static int lbs_mesh_set_essid(struct net_device *dev,
 	}
 
 	lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
-			priv->curbssparams.channel);
+			priv->channel);
  out:
 	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
 	return ret;
diff --git a/drivers/net/wireless/libertas/wext.h b/drivers/net/wireless/libertas/wext.h
index 4c08db497606..f3f19fe8c6c6 100644
--- a/drivers/net/wireless/libertas/wext.h
+++ b/drivers/net/wireless/libertas/wext.h
@@ -4,7 +4,14 @@
 #ifndef	_LBS_WEXT_H_
 #define	_LBS_WEXT_H_
 
+void lbs_send_disconnect_notification(struct lbs_private *priv);
+void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event);
+
+struct chan_freq_power *lbs_find_cfp_by_band_and_channel(
+	struct lbs_private *priv,
+	u8 band,
+	u16 channel);
+
 extern struct iw_handler_def lbs_handler_def;
-extern struct iw_handler_def mesh_handler_def;
 
 #endif
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index 392337b37b1d..3691c307e674 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -23,6 +23,8 @@
 static char *lbtf_fw_name = "lbtf_usb.bin";
 module_param_named(fw_name, lbtf_fw_name, charp, 0644);
 
+MODULE_FIRMWARE("lbtf_usb.bin");
+
 static struct usb_device_id if_usb_table[] = {
 	/* Enter the device signature inside */
 	{ USB_DEVICE(0x1286, 0x2001) },
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 38cfd79e0590..88e41176e7fd 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -284,7 +284,7 @@ struct mac80211_hwsim_data {
 	struct ieee80211_channel *channel;
 	unsigned long beacon_int; /* in jiffies unit */
 	unsigned int rx_filter;
-	int started;
+	bool started, idle;
 	struct timer_list beacon_timer;
 	enum ps_mode {
 		PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL
@@ -365,6 +365,49 @@ static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw,
 }
 
 
+static void mac80211_hwsim_monitor_ack(struct ieee80211_hw *hw, const u8 *addr)
+{
+	struct mac80211_hwsim_data *data = hw->priv;
+	struct sk_buff *skb;
+	struct hwsim_radiotap_hdr *hdr;
+	u16 flags;
+	struct ieee80211_hdr *hdr11;
+
+	if (!netif_running(hwsim_mon))
+		return;
+
+	skb = dev_alloc_skb(100);
+	if (skb == NULL)
+		return;
+
+	hdr = (struct hwsim_radiotap_hdr *) skb_put(skb, sizeof(*hdr));
+	hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION;
+	hdr->hdr.it_pad = 0;
+	hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr));
+	hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
+					  (1 << IEEE80211_RADIOTAP_CHANNEL));
+	hdr->rt_flags = 0;
+	hdr->rt_rate = 0;
+	hdr->rt_channel = cpu_to_le16(data->channel->center_freq);
+	flags = IEEE80211_CHAN_2GHZ;
+	hdr->rt_chbitmask = cpu_to_le16(flags);
+
+	hdr11 = (struct ieee80211_hdr *) skb_put(skb, 10);
+	hdr11->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
+					   IEEE80211_STYPE_ACK);
+	hdr11->duration_id = cpu_to_le16(0);
+	memcpy(hdr11->addr1, addr, ETH_ALEN);
+
+	skb->dev = hwsim_mon;
+	skb_set_mac_header(skb, 0);
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+	skb->pkt_type = PACKET_OTHERHOST;
+	skb->protocol = htons(ETH_P_802_2);
+	memset(skb->cb, 0, sizeof(skb->cb));
+	netif_rx(skb);
+}
+
+
 static bool hwsim_ps_rx_ok(struct mac80211_hwsim_data *data,
 			   struct sk_buff *skb)
 {
@@ -402,6 +445,12 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_rx_status rx_status;
 
+	if (data->idle) {
+		printk(KERN_DEBUG "%s: Trying to TX when idle - reject\n",
+		       wiphy_name(hw->wiphy));
+		return false;
+	}
+
 	memset(&rx_status, 0, sizeof(rx_status));
 	/* TODO: set mactime */
 	rx_status.freq = data->channel->center_freq;
@@ -428,7 +477,8 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
 		if (data == data2)
 			continue;
 
-		if (!data2->started || !hwsim_ps_rx_ok(data2, skb) ||
+		if (data2->idle || !data2->started ||
+		    !hwsim_ps_rx_ok(data2, skb) ||
 		    !data->channel || !data2->channel ||
 		    data->channel->center_freq != data2->channel->center_freq ||
 		    !(data->group & data2->group))
@@ -464,6 +514,10 @@ static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 	}
 
 	ack = mac80211_hwsim_tx_frame(hw, skb);
+	if (ack && skb->len >= 16) {
+		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+		mac80211_hwsim_monitor_ack(hw, hdr->addr2);
+	}
 
 	txi = IEEE80211_SKB_CB(skb);
 
@@ -571,6 +625,8 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
 	       !!(conf->flags & IEEE80211_CONF_IDLE),
 	       !!(conf->flags & IEEE80211_CONF_PS));
 
+	data->idle = !!(conf->flags & IEEE80211_CONF_IDLE);
+
 	data->channel = conf->channel;
 	if (!data->started || !data->beacon_int)
 		del_timer(&data->beacon_timer);
@@ -1045,19 +1101,20 @@ static int __init init_mac80211_hwsim(void)
 				sband->channels = data->channels_2ghz;
 				sband->n_channels =
 					ARRAY_SIZE(hwsim_channels_2ghz);
+				sband->bitrates = data->rates;
+				sband->n_bitrates = ARRAY_SIZE(hwsim_rates);
 				break;
 			case IEEE80211_BAND_5GHZ:
 				sband->channels = data->channels_5ghz;
 				sband->n_channels =
 					ARRAY_SIZE(hwsim_channels_5ghz);
+				sband->bitrates = data->rates + 4;
+				sband->n_bitrates = ARRAY_SIZE(hwsim_rates) - 4;
 				break;
 			default:
 				break;
 			}
 
-			sband->bitrates = data->rates;
-			sband->n_bitrates = ARRAY_SIZE(hwsim_rates);
-
 			sband->ht_cap.ht_supported = true;
 			sband->ht_cap.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
 				IEEE80211_HT_CAP_GRN_FLD |
@@ -1089,46 +1146,46 @@ static int __init init_mac80211_hwsim(void)
 			break;
 		case HWSIM_REGTEST_WORLD_ROAM:
 			if (i == 0) {
-				hw->wiphy->custom_regulatory = true;
+				hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
 				wiphy_apply_custom_regulatory(hw->wiphy,
 					&hwsim_world_regdom_custom_01);
 			}
 			break;
 		case HWSIM_REGTEST_CUSTOM_WORLD:
-			hw->wiphy->custom_regulatory = true;
+			hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
 			wiphy_apply_custom_regulatory(hw->wiphy,
 				&hwsim_world_regdom_custom_01);
 			break;
 		case HWSIM_REGTEST_CUSTOM_WORLD_2:
 			if (i == 0) {
-				hw->wiphy->custom_regulatory = true;
+				hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
 				wiphy_apply_custom_regulatory(hw->wiphy,
 					&hwsim_world_regdom_custom_01);
 			} else if (i == 1) {
-				hw->wiphy->custom_regulatory = true;
+				hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
 				wiphy_apply_custom_regulatory(hw->wiphy,
 					&hwsim_world_regdom_custom_02);
 			}
 			break;
 		case HWSIM_REGTEST_STRICT_ALL:
-			hw->wiphy->strict_regulatory = true;
+			hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY;
 			break;
 		case HWSIM_REGTEST_STRICT_FOLLOW:
 		case HWSIM_REGTEST_STRICT_AND_DRIVER_REG:
 			if (i == 0)
-				hw->wiphy->strict_regulatory = true;
+				hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY;
 			break;
 		case HWSIM_REGTEST_ALL:
 			if (i == 0) {
-				hw->wiphy->custom_regulatory = true;
+				hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
 				wiphy_apply_custom_regulatory(hw->wiphy,
 					&hwsim_world_regdom_custom_01);
 			} else if (i == 1) {
-				hw->wiphy->custom_regulatory = true;
+				hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
 				wiphy_apply_custom_regulatory(hw->wiphy,
 					&hwsim_world_regdom_custom_02);
 			} else if (i == 4)
-				hw->wiphy->strict_regulatory = true;
+				hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY;
 			break;
 		default:
 			break;
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 746532ebe5a8..0cb5ecc822a8 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -12,6 +12,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/sched.h>
 #include <linux/spinlock.h>
 #include <linux/list.h>
 #include <linux/pci.h>
@@ -27,18 +28,6 @@
 #define MWL8K_NAME	KBUILD_MODNAME
 #define MWL8K_VERSION	"0.10"
 
-MODULE_DESCRIPTION(MWL8K_DESC);
-MODULE_VERSION(MWL8K_VERSION);
-MODULE_AUTHOR("Lennert Buytenhek <buytenh@marvell.com>");
-MODULE_LICENSE("GPL");
-
-static DEFINE_PCI_DEVICE_TABLE(mwl8k_table) = {
-	{ PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = 8687, },
-	{ PCI_VDEVICE(MARVELL, 0x2a30), .driver_data = 8687, },
-	{ }
-};
-MODULE_DEVICE_TABLE(pci, mwl8k_table);
-
 /* Register definitions */
 #define MWL8K_HIU_GEN_PTR			0x00000c10
 #define  MWL8K_MODE_STA				 0x0000005a
@@ -88,72 +77,89 @@ MODULE_DEVICE_TABLE(pci, mwl8k_table);
 				 MWL8K_A2H_INT_RX_READY | \
 				 MWL8K_A2H_INT_TX_DONE)
 
-/* WME stream classes */
-#define WME_AC_BE	0		/* best effort */
-#define WME_AC_BK	1		/* background */
-#define WME_AC_VI	2		/* video */
-#define WME_AC_VO	3		/* voice */
-
 #define MWL8K_RX_QUEUES		1
 #define MWL8K_TX_QUEUES		4
 
+struct rxd_ops {
+	int rxd_size;
+	void (*rxd_init)(void *rxd, dma_addr_t next_dma_addr);
+	void (*rxd_refill)(void *rxd, dma_addr_t addr, int len);
+	int (*rxd_process)(void *rxd, struct ieee80211_rx_status *status);
+};
+
+struct mwl8k_device_info {
+	char *part_name;
+	char *helper_image;
+	char *fw_image;
+	struct rxd_ops *rxd_ops;
+	u16 modes;
+};
+
 struct mwl8k_rx_queue {
-	int rx_desc_count;
+	int rxd_count;
 
 	/* hw receives here */
-	int rx_head;
+	int head;
 
 	/* refill descs here */
-	int rx_tail;
+	int tail;
 
-	struct mwl8k_rx_desc *rx_desc_area;
-	dma_addr_t rx_desc_dma;
-	struct sk_buff **rx_skb;
+	void *rxd;
+	dma_addr_t rxd_dma;
+	struct {
+		struct sk_buff *skb;
+		DECLARE_PCI_UNMAP_ADDR(dma)
+	} *buf;
 };
 
 struct mwl8k_tx_queue {
 	/* hw transmits here */
-	int tx_head;
+	int head;
 
 	/* sw appends here */
-	int tx_tail;
+	int tail;
 
-	struct ieee80211_tx_queue_stats tx_stats;
-	struct mwl8k_tx_desc *tx_desc_area;
-	dma_addr_t tx_desc_dma;
-	struct sk_buff **tx_skb;
+	struct ieee80211_tx_queue_stats stats;
+	struct mwl8k_tx_desc *txd;
+	dma_addr_t txd_dma;
+	struct sk_buff **skb;
 };
 
 /* Pointers to the firmware data and meta information about it.  */
 struct mwl8k_firmware {
-	/* Microcode */
-	struct firmware *ucode;
-
 	/* Boot helper code */
 	struct firmware *helper;
+
+	/* Microcode */
+	struct firmware *ucode;
 };
 
 struct mwl8k_priv {
+	void __iomem *sram;
 	void __iomem *regs;
 	struct ieee80211_hw *hw;
 
 	struct pci_dev *pdev;
-	u8 name[16];
+
+	struct mwl8k_device_info *device_info;
+	bool ap_fw;
+	struct rxd_ops *rxd_ops;
 
 	/* firmware files and meta data */
 	struct mwl8k_firmware fw;
-	u32 part_num;
 
 	/* firmware access */
 	struct mutex fw_mutex;
 	struct task_struct *fw_mutex_owner;
 	int fw_mutex_depth;
-	struct completion *tx_wait;
 	struct completion *hostcmd_wait;
 
 	/* lock held over TX and TX reap */
 	spinlock_t tx_lock;
 
+	/* TX quiesce completion, protected by fw_mutex and tx_lock */
+	struct completion *tx_wait;
+
 	struct ieee80211_vif *vif;
 
 	struct ieee80211_channel *current_channel;
@@ -178,10 +184,11 @@ struct mwl8k_priv {
 	/* PHY parameters */
 	struct ieee80211_supported_band band;
 	struct ieee80211_channel channels[14];
-	struct ieee80211_rate rates[12];
+	struct ieee80211_rate rates[13];
 
 	bool radio_on;
 	bool radio_short_preamble;
+	bool sniffer_enabled;
 	bool wmm_enabled;
 
 	/* XXX need to convert this to handle multiple interfaces */
@@ -199,9 +206,6 @@ struct mwl8k_priv {
 
 	/* Tasklet to reclaim TX descriptors and buffers after tx */
 	struct tasklet_struct tx_reclaim_task;
-
-	/* Work thread to serialize configuration requests */
-	struct workqueue_struct *config_wq;
 };
 
 /* Per interface specific private data */
@@ -220,7 +224,7 @@ struct mwl8k_vif {
 	 * Subset of supported legacy rates.
 	 * Intersection of AP and STA supported rates.
 	 */
-	struct ieee80211_rate legacy_rates[12];
+	struct ieee80211_rate legacy_rates[13];
 
 	/* number of supported legacy rates */
 	u8	legacy_nrates;
@@ -252,9 +256,10 @@ static const struct ieee80211_rate mwl8k_rates[] = {
 	{ .bitrate = 10, .hw_value = 2, },
 	{ .bitrate = 20, .hw_value = 4, },
 	{ .bitrate = 55, .hw_value = 11, },
+	{ .bitrate = 110, .hw_value = 22, },
+	{ .bitrate = 220, .hw_value = 44, },
 	{ .bitrate = 60, .hw_value = 12, },
 	{ .bitrate = 90, .hw_value = 18, },
-	{ .bitrate = 110, .hw_value = 22, },
 	{ .bitrate = 120, .hw_value = 24, },
 	{ .bitrate = 180, .hw_value = 36, },
 	{ .bitrate = 240, .hw_value = 48, },
@@ -270,10 +275,12 @@ static const struct ieee80211_rate mwl8k_rates[] = {
 /* Firmware command codes */
 #define MWL8K_CMD_CODE_DNLD		0x0001
 #define MWL8K_CMD_GET_HW_SPEC		0x0003
+#define MWL8K_CMD_SET_HW_SPEC		0x0004
 #define MWL8K_CMD_MAC_MULTICAST_ADR	0x0010
 #define MWL8K_CMD_GET_STAT		0x0014
 #define MWL8K_CMD_RADIO_CONTROL		0x001c
 #define MWL8K_CMD_RF_TX_POWER		0x001e
+#define MWL8K_CMD_RF_ANTENNA		0x0020
 #define MWL8K_CMD_SET_PRE_SCAN		0x0107
 #define MWL8K_CMD_SET_POST_SCAN		0x0108
 #define MWL8K_CMD_SET_RF_CHANNEL	0x010a
@@ -287,6 +294,7 @@ static const struct ieee80211_rate mwl8k_rates[] = {
 #define MWL8K_CMD_MIMO_CONFIG		0x0125
 #define MWL8K_CMD_USE_FIXED_RATE	0x0126
 #define MWL8K_CMD_ENABLE_SNIFFER	0x0150
+#define MWL8K_CMD_SET_MAC_ADDR		0x0202
 #define MWL8K_CMD_SET_RATEADAPT_MODE	0x0203
 #define MWL8K_CMD_UPDATE_STADB		0x1123
 
@@ -299,10 +307,12 @@ static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
 	switch (cmd & ~0x8000) {
 		MWL8K_CMDNAME(CODE_DNLD);
 		MWL8K_CMDNAME(GET_HW_SPEC);
+		MWL8K_CMDNAME(SET_HW_SPEC);
 		MWL8K_CMDNAME(MAC_MULTICAST_ADR);
 		MWL8K_CMDNAME(GET_STAT);
 		MWL8K_CMDNAME(RADIO_CONTROL);
 		MWL8K_CMDNAME(RF_TX_POWER);
+		MWL8K_CMDNAME(RF_ANTENNA);
 		MWL8K_CMDNAME(SET_PRE_SCAN);
 		MWL8K_CMDNAME(SET_POST_SCAN);
 		MWL8K_CMDNAME(SET_RF_CHANNEL);
@@ -316,6 +326,7 @@ static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
 		MWL8K_CMDNAME(MIMO_CONFIG);
 		MWL8K_CMDNAME(USE_FIXED_RATE);
 		MWL8K_CMDNAME(ENABLE_SNIFFER);
+		MWL8K_CMDNAME(SET_MAC_ADDR);
 		MWL8K_CMDNAME(SET_RATEADAPT_MODE);
 		MWL8K_CMDNAME(UPDATE_STADB);
 	default:
@@ -353,41 +364,35 @@ static void mwl8k_release_firmware(struct mwl8k_priv *priv)
 
 /* Request fw image */
 static int mwl8k_request_fw(struct mwl8k_priv *priv,
-				const char *fname, struct firmware **fw)
+			    const char *fname, struct firmware **fw)
 {
 	/* release current image */
 	if (*fw != NULL)
 		mwl8k_release_fw(fw);
 
 	return request_firmware((const struct firmware **)fw,
-						fname, &priv->pdev->dev);
+				fname, &priv->pdev->dev);
 }
 
-static int mwl8k_request_firmware(struct mwl8k_priv *priv, u32 part_num)
+static int mwl8k_request_firmware(struct mwl8k_priv *priv)
 {
-	u8 filename[64];
+	struct mwl8k_device_info *di = priv->device_info;
 	int rc;
 
-	priv->part_num = part_num;
-
-	snprintf(filename, sizeof(filename),
-		 "mwl8k/helper_%u.fw", priv->part_num);
-
-	rc = mwl8k_request_fw(priv, filename, &priv->fw.helper);
-	if (rc) {
-		printk(KERN_ERR
-			"%s Error requesting helper firmware file %s\n",
-			pci_name(priv->pdev), filename);
-		return rc;
+	if (di->helper_image != NULL) {
+		rc = mwl8k_request_fw(priv, di->helper_image, &priv->fw.helper);
+		if (rc) {
+			printk(KERN_ERR "%s: Error requesting helper "
+			       "firmware file %s\n", pci_name(priv->pdev),
+			       di->helper_image);
+			return rc;
+		}
 	}
 
-	snprintf(filename, sizeof(filename),
-		 "mwl8k/fmimage_%u.fw", priv->part_num);
-
-	rc = mwl8k_request_fw(priv, filename, &priv->fw.ucode);
+	rc = mwl8k_request_fw(priv, di->fw_image, &priv->fw.ucode);
 	if (rc) {
-		printk(KERN_ERR "%s Error requesting firmware file %s\n",
-					pci_name(priv->pdev), filename);
+		printk(KERN_ERR "%s: Error requesting firmware file %s\n",
+		       pci_name(priv->pdev), di->fw_image);
 		mwl8k_release_fw(&priv->fw.helper);
 		return rc;
 	}
@@ -395,6 +400,9 @@ static int mwl8k_request_firmware(struct mwl8k_priv *priv, u32 part_num)
 	return 0;
 }
 
+MODULE_FIRMWARE("mwl8k/helper_8687.fw");
+MODULE_FIRMWARE("mwl8k/fmimage_8687.fw");
+
 struct mwl8k_cmd_pkt {
 	__le16	code;
 	__le16	length;
@@ -434,6 +442,7 @@ mwl8k_send_fw_load_cmd(struct mwl8k_priv *priv, void *data, int length)
 			break;
 		}
 
+		cond_resched();
 		udelay(1);
 	} while (--loops);
 
@@ -542,43 +551,62 @@ static int mwl8k_feed_fw_image(struct mwl8k_priv *priv,
 	return rc;
 }
 
-static int mwl8k_load_firmware(struct mwl8k_priv *priv)
+static int mwl8k_load_firmware(struct ieee80211_hw *hw)
 {
-	int loops, rc;
+	struct mwl8k_priv *priv = hw->priv;
+	struct firmware *fw = priv->fw.ucode;
+	struct mwl8k_device_info *di = priv->device_info;
+	int rc;
+	int loops;
 
-	const u8 *ucode = priv->fw.ucode->data;
-	size_t ucode_len = priv->fw.ucode->size;
-	const u8 *helper = priv->fw.helper->data;
-	size_t helper_len = priv->fw.helper->size;
+	if (!memcmp(fw->data, "\x01\x00\x00\x00", 4)) {
+		struct firmware *helper = priv->fw.helper;
 
-	if (!memcmp(ucode, "\x01\x00\x00\x00", 4)) {
-		rc = mwl8k_load_fw_image(priv, helper, helper_len);
+		if (helper == NULL) {
+			printk(KERN_ERR "%s: helper image needed but none "
+			       "given\n", pci_name(priv->pdev));
+			return -EINVAL;
+		}
+
+		rc = mwl8k_load_fw_image(priv, helper->data, helper->size);
 		if (rc) {
 			printk(KERN_ERR "%s: unable to load firmware "
-				"helper image\n", pci_name(priv->pdev));
+			       "helper image\n", pci_name(priv->pdev));
 			return rc;
 		}
 		msleep(1);
 
-		rc = mwl8k_feed_fw_image(priv, ucode, ucode_len);
+		rc = mwl8k_feed_fw_image(priv, fw->data, fw->size);
 	} else {
-		rc = mwl8k_load_fw_image(priv, ucode, ucode_len);
+		rc = mwl8k_load_fw_image(priv, fw->data, fw->size);
 	}
 
 	if (rc) {
-		printk(KERN_ERR "%s: unable to load firmware data\n",
-			pci_name(priv->pdev));
+		printk(KERN_ERR "%s: unable to load firmware image\n",
+		       pci_name(priv->pdev));
 		return rc;
 	}
 
-	iowrite32(MWL8K_MODE_STA, priv->regs + MWL8K_HIU_GEN_PTR);
+	if (di->modes & BIT(NL80211_IFTYPE_AP))
+		iowrite32(MWL8K_MODE_AP, priv->regs + MWL8K_HIU_GEN_PTR);
+	else
+		iowrite32(MWL8K_MODE_STA, priv->regs + MWL8K_HIU_GEN_PTR);
 	msleep(1);
 
 	loops = 200000;
 	do {
-		if (ioread32(priv->regs + MWL8K_HIU_INT_CODE)
-						== MWL8K_FWSTA_READY)
+		u32 ready_code;
+
+		ready_code = ioread32(priv->regs + MWL8K_HIU_INT_CODE);
+		if (ready_code == MWL8K_FWAP_READY) {
+			priv->ap_fw = 1;
+			break;
+		} else if (ready_code == MWL8K_FWSTA_READY) {
+			priv->ap_fw = 0;
 			break;
+		}
+
+		cond_resched();
 		udelay(1);
 	} while (--loops);
 
@@ -605,7 +633,7 @@ struct ewc_ht_info {
 /* Peer Entry flags - used to define the type of the peer node */
 #define MWL8K_PEER_TYPE_ACCESSPOINT	2
 
-#define MWL8K_IEEE_LEGACY_DATA_RATES	12
+#define MWL8K_IEEE_LEGACY_DATA_RATES	13
 #define MWL8K_MCS_BITMAP_SIZE		16
 
 struct peer_capability_info {
@@ -731,16 +759,96 @@ static inline void mwl8k_add_dma_header(struct sk_buff *skb)
 
 
 /*
- * Packet reception.
+ * Packet reception for 88w8366.
  */
-#define MWL8K_RX_CTRL_OWNED_BY_HOST	0x02
+struct mwl8k_rxd_8366 {
+	__le16 pkt_len;
+	__u8 sq2;
+	__u8 rate;
+	__le32 pkt_phys_addr;
+	__le32 next_rxd_phys_addr;
+	__le16 qos_control;
+	__le16 htsig2;
+	__le32 hw_rssi_info;
+	__le32 hw_noise_floor_info;
+	__u8 noise_floor;
+	__u8 pad0[3];
+	__u8 rssi;
+	__u8 rx_status;
+	__u8 channel;
+	__u8 rx_ctrl;
+} __attribute__((packed));
+
+#define MWL8K_8366_RX_CTRL_OWNED_BY_HOST	0x80
+
+static void mwl8k_rxd_8366_init(void *_rxd, dma_addr_t next_dma_addr)
+{
+	struct mwl8k_rxd_8366 *rxd = _rxd;
+
+	rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr);
+	rxd->rx_ctrl = MWL8K_8366_RX_CTRL_OWNED_BY_HOST;
+}
+
+static void mwl8k_rxd_8366_refill(void *_rxd, dma_addr_t addr, int len)
+{
+	struct mwl8k_rxd_8366 *rxd = _rxd;
+
+	rxd->pkt_len = cpu_to_le16(len);
+	rxd->pkt_phys_addr = cpu_to_le32(addr);
+	wmb();
+	rxd->rx_ctrl = 0;
+}
+
+static int
+mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status)
+{
+	struct mwl8k_rxd_8366 *rxd = _rxd;
+
+	if (!(rxd->rx_ctrl & MWL8K_8366_RX_CTRL_OWNED_BY_HOST))
+		return -1;
+	rmb();
+
+	memset(status, 0, sizeof(*status));
+
+	status->signal = -rxd->rssi;
+	status->noise = -rxd->noise_floor;
 
-struct mwl8k_rx_desc {
+	if (rxd->rate & 0x80) {
+		status->flag |= RX_FLAG_HT;
+		status->rate_idx = rxd->rate & 0x7f;
+	} else {
+		int i;
+
+		for (i = 0; i < ARRAY_SIZE(mwl8k_rates); i++) {
+			if (mwl8k_rates[i].hw_value == rxd->rate) {
+				status->rate_idx = i;
+				break;
+			}
+		}
+	}
+
+	status->band = IEEE80211_BAND_2GHZ;
+	status->freq = ieee80211_channel_to_frequency(rxd->channel);
+
+	return le16_to_cpu(rxd->pkt_len);
+}
+
+static struct rxd_ops rxd_8366_ops = {
+	.rxd_size	= sizeof(struct mwl8k_rxd_8366),
+	.rxd_init	= mwl8k_rxd_8366_init,
+	.rxd_refill	= mwl8k_rxd_8366_refill,
+	.rxd_process	= mwl8k_rxd_8366_process,
+};
+
+/*
+ * Packet reception for 88w8687.
+ */
+struct mwl8k_rxd_8687 {
 	__le16 pkt_len;
 	__u8 link_quality;
 	__u8 noise_level;
 	__le32 pkt_phys_addr;
-	__le32 next_rx_desc_phys_addr;
+	__le32 next_rxd_phys_addr;
 	__le16 qos_control;
 	__le16 rate_info;
 	__le32 pad0[4];
@@ -752,6 +860,76 @@ struct mwl8k_rx_desc {
 	__u8 pad2[2];
 } __attribute__((packed));
 
+#define MWL8K_8687_RATE_INFO_SHORTPRE		0x8000
+#define MWL8K_8687_RATE_INFO_ANTSELECT(x)	(((x) >> 11) & 0x3)
+#define MWL8K_8687_RATE_INFO_RATEID(x)		(((x) >> 3) & 0x3f)
+#define MWL8K_8687_RATE_INFO_40MHZ		0x0004
+#define MWL8K_8687_RATE_INFO_SHORTGI		0x0002
+#define MWL8K_8687_RATE_INFO_MCS_FORMAT		0x0001
+
+#define MWL8K_8687_RX_CTRL_OWNED_BY_HOST	0x02
+
+static void mwl8k_rxd_8687_init(void *_rxd, dma_addr_t next_dma_addr)
+{
+	struct mwl8k_rxd_8687 *rxd = _rxd;
+
+	rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr);
+	rxd->rx_ctrl = MWL8K_8687_RX_CTRL_OWNED_BY_HOST;
+}
+
+static void mwl8k_rxd_8687_refill(void *_rxd, dma_addr_t addr, int len)
+{
+	struct mwl8k_rxd_8687 *rxd = _rxd;
+
+	rxd->pkt_len = cpu_to_le16(len);
+	rxd->pkt_phys_addr = cpu_to_le32(addr);
+	wmb();
+	rxd->rx_ctrl = 0;
+}
+
+static int
+mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status)
+{
+	struct mwl8k_rxd_8687 *rxd = _rxd;
+	u16 rate_info;
+
+	if (!(rxd->rx_ctrl & MWL8K_8687_RX_CTRL_OWNED_BY_HOST))
+		return -1;
+	rmb();
+
+	rate_info = le16_to_cpu(rxd->rate_info);
+
+	memset(status, 0, sizeof(*status));
+
+	status->signal = -rxd->rssi;
+	status->noise = -rxd->noise_level;
+	status->qual = rxd->link_quality;
+	status->antenna = MWL8K_8687_RATE_INFO_ANTSELECT(rate_info);
+	status->rate_idx = MWL8K_8687_RATE_INFO_RATEID(rate_info);
+
+	if (rate_info & MWL8K_8687_RATE_INFO_SHORTPRE)
+		status->flag |= RX_FLAG_SHORTPRE;
+	if (rate_info & MWL8K_8687_RATE_INFO_40MHZ)
+		status->flag |= RX_FLAG_40MHZ;
+	if (rate_info & MWL8K_8687_RATE_INFO_SHORTGI)
+		status->flag |= RX_FLAG_SHORT_GI;
+	if (rate_info & MWL8K_8687_RATE_INFO_MCS_FORMAT)
+		status->flag |= RX_FLAG_HT;
+
+	status->band = IEEE80211_BAND_2GHZ;
+	status->freq = ieee80211_channel_to_frequency(rxd->channel);
+
+	return le16_to_cpu(rxd->pkt_len);
+}
+
+static struct rxd_ops rxd_8687_ops = {
+	.rxd_size	= sizeof(struct mwl8k_rxd_8687),
+	.rxd_init	= mwl8k_rxd_8687_init,
+	.rxd_refill	= mwl8k_rxd_8687_refill,
+	.rxd_process	= mwl8k_rxd_8687_process,
+};
+
+
 #define MWL8K_RX_DESCS		256
 #define MWL8K_RX_MAXSZ		3800
 
@@ -762,43 +940,44 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index)
 	int size;
 	int i;
 
-	rxq->rx_desc_count = 0;
-	rxq->rx_head = 0;
-	rxq->rx_tail = 0;
+	rxq->rxd_count = 0;
+	rxq->head = 0;
+	rxq->tail = 0;
 
-	size = MWL8K_RX_DESCS * sizeof(struct mwl8k_rx_desc);
+	size = MWL8K_RX_DESCS * priv->rxd_ops->rxd_size;
 
-	rxq->rx_desc_area =
-		pci_alloc_consistent(priv->pdev, size, &rxq->rx_desc_dma);
-	if (rxq->rx_desc_area == NULL) {
+	rxq->rxd = pci_alloc_consistent(priv->pdev, size, &rxq->rxd_dma);
+	if (rxq->rxd == NULL) {
 		printk(KERN_ERR "%s: failed to alloc RX descriptors\n",
-		       priv->name);
+		       wiphy_name(hw->wiphy));
 		return -ENOMEM;
 	}
-	memset(rxq->rx_desc_area, 0, size);
+	memset(rxq->rxd, 0, size);
 
-	rxq->rx_skb = kmalloc(MWL8K_RX_DESCS *
-				sizeof(*rxq->rx_skb), GFP_KERNEL);
-	if (rxq->rx_skb == NULL) {
+	rxq->buf = kmalloc(MWL8K_RX_DESCS * sizeof(*rxq->buf), GFP_KERNEL);
+	if (rxq->buf == NULL) {
 		printk(KERN_ERR "%s: failed to alloc RX skbuff list\n",
-			priv->name);
-		pci_free_consistent(priv->pdev, size,
-				    rxq->rx_desc_area, rxq->rx_desc_dma);
+		       wiphy_name(hw->wiphy));
+		pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma);
 		return -ENOMEM;
 	}
-	memset(rxq->rx_skb, 0, MWL8K_RX_DESCS * sizeof(*rxq->rx_skb));
+	memset(rxq->buf, 0, MWL8K_RX_DESCS * sizeof(*rxq->buf));
 
 	for (i = 0; i < MWL8K_RX_DESCS; i++) {
-		struct mwl8k_rx_desc *rx_desc;
+		int desc_size;
+		void *rxd;
 		int nexti;
+		dma_addr_t next_dma_addr;
 
-		rx_desc = rxq->rx_desc_area + i;
-		nexti = (i + 1) % MWL8K_RX_DESCS;
+		desc_size = priv->rxd_ops->rxd_size;
+		rxd = rxq->rxd + (i * priv->rxd_ops->rxd_size);
 
-		rx_desc->next_rx_desc_phys_addr =
-			cpu_to_le32(rxq->rx_desc_dma
-						+ nexti * sizeof(*rx_desc));
-		rx_desc->rx_ctrl = MWL8K_RX_CTRL_OWNED_BY_HOST;
+		nexti = i + 1;
+		if (nexti == MWL8K_RX_DESCS)
+			nexti = 0;
+		next_dma_addr = rxq->rxd_dma + (nexti * desc_size);
+
+		priv->rxd_ops->rxd_init(rxd, next_dma_addr);
 	}
 
 	return 0;
@@ -811,27 +990,28 @@ static int rxq_refill(struct ieee80211_hw *hw, int index, int limit)
 	int refilled;
 
 	refilled = 0;
-	while (rxq->rx_desc_count < MWL8K_RX_DESCS && limit--) {
+	while (rxq->rxd_count < MWL8K_RX_DESCS && limit--) {
 		struct sk_buff *skb;
+		dma_addr_t addr;
 		int rx;
+		void *rxd;
 
 		skb = dev_alloc_skb(MWL8K_RX_MAXSZ);
 		if (skb == NULL)
 			break;
 
-		rxq->rx_desc_count++;
-
-		rx = rxq->rx_tail;
-		rxq->rx_tail = (rx + 1) % MWL8K_RX_DESCS;
+		addr = pci_map_single(priv->pdev, skb->data,
+				      MWL8K_RX_MAXSZ, DMA_FROM_DEVICE);
 
-		rxq->rx_desc_area[rx].pkt_phys_addr =
-			cpu_to_le32(pci_map_single(priv->pdev, skb->data,
-					MWL8K_RX_MAXSZ, DMA_FROM_DEVICE));
+		rxq->rxd_count++;
+		rx = rxq->tail++;
+		if (rxq->tail == MWL8K_RX_DESCS)
+			rxq->tail = 0;
+		rxq->buf[rx].skb = skb;
+		pci_unmap_addr_set(&rxq->buf[rx], dma, addr);
 
-		rxq->rx_desc_area[rx].pkt_len = cpu_to_le16(MWL8K_RX_MAXSZ);
-		rxq->rx_skb[rx] = skb;
-		wmb();
-		rxq->rx_desc_area[rx].rx_ctrl = 0;
+		rxd = rxq->rxd + (rx * priv->rxd_ops->rxd_size);
+		priv->rxd_ops->rxd_refill(rxd, addr, MWL8K_RX_MAXSZ);
 
 		refilled++;
 	}
@@ -847,24 +1027,24 @@ static void mwl8k_rxq_deinit(struct ieee80211_hw *hw, int index)
 	int i;
 
 	for (i = 0; i < MWL8K_RX_DESCS; i++) {
-		if (rxq->rx_skb[i] != NULL) {
-			unsigned long addr;
-
-			addr = le32_to_cpu(rxq->rx_desc_area[i].pkt_phys_addr);
-			pci_unmap_single(priv->pdev, addr, MWL8K_RX_MAXSZ,
-					 PCI_DMA_FROMDEVICE);
-			kfree_skb(rxq->rx_skb[i]);
-			rxq->rx_skb[i] = NULL;
+		if (rxq->buf[i].skb != NULL) {
+			pci_unmap_single(priv->pdev,
+					 pci_unmap_addr(&rxq->buf[i], dma),
+					 MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE);
+			pci_unmap_addr_set(&rxq->buf[i], dma, 0);
+
+			kfree_skb(rxq->buf[i].skb);
+			rxq->buf[i].skb = NULL;
 		}
 	}
 
-	kfree(rxq->rx_skb);
-	rxq->rx_skb = NULL;
+	kfree(rxq->buf);
+	rxq->buf = NULL;
 
 	pci_free_consistent(priv->pdev,
-			    MWL8K_RX_DESCS * sizeof(struct mwl8k_rx_desc),
-			    rxq->rx_desc_area, rxq->rx_desc_dma);
-	rxq->rx_desc_area = NULL;
+			    MWL8K_RX_DESCS * priv->rxd_ops->rxd_size,
+			    rxq->rxd, rxq->rxd_dma);
+	rxq->rxd = NULL;
 }
 
 
@@ -880,9 +1060,11 @@ mwl8k_capture_bssid(struct mwl8k_priv *priv, struct ieee80211_hdr *wh)
 		!compare_ether_addr(wh->addr3, priv->capture_bssid);
 }
 
-static inline void mwl8k_save_beacon(struct mwl8k_priv *priv,
-							struct sk_buff *skb)
+static inline void mwl8k_save_beacon(struct ieee80211_hw *hw,
+				     struct sk_buff *skb)
 {
+	struct mwl8k_priv *priv = hw->priv;
+
 	priv->capture_beacon = false;
 	memset(priv->capture_bssid, 0, ETH_ALEN);
 
@@ -893,8 +1075,7 @@ static inline void mwl8k_save_beacon(struct mwl8k_priv *priv,
 	 */
 	priv->beacon_skb = skb_copy(skb, GFP_ATOMIC);
 	if (priv->beacon_skb != NULL)
-		queue_work(priv->config_wq,
-				&priv->finalize_join_worker);
+		ieee80211_queue_work(hw, &priv->finalize_join_worker);
 }
 
 static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
@@ -904,53 +1085,46 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
 	int processed;
 
 	processed = 0;
-	while (rxq->rx_desc_count && limit--) {
-		struct mwl8k_rx_desc *rx_desc;
+	while (rxq->rxd_count && limit--) {
 		struct sk_buff *skb;
+		void *rxd;
+		int pkt_len;
 		struct ieee80211_rx_status status;
-		unsigned long addr;
-		struct ieee80211_hdr *wh;
 
-		rx_desc = rxq->rx_desc_area + rxq->rx_head;
-		if (!(rx_desc->rx_ctrl & MWL8K_RX_CTRL_OWNED_BY_HOST))
+		skb = rxq->buf[rxq->head].skb;
+		if (skb == NULL)
 			break;
-		rmb();
 
-		skb = rxq->rx_skb[rxq->rx_head];
-		if (skb == NULL)
+		rxd = rxq->rxd + (rxq->head * priv->rxd_ops->rxd_size);
+
+		pkt_len = priv->rxd_ops->rxd_process(rxd, &status);
+		if (pkt_len < 0)
 			break;
-		rxq->rx_skb[rxq->rx_head] = NULL;
 
-		rxq->rx_head = (rxq->rx_head + 1) % MWL8K_RX_DESCS;
-		rxq->rx_desc_count--;
+		rxq->buf[rxq->head].skb = NULL;
 
-		addr = le32_to_cpu(rx_desc->pkt_phys_addr);
-		pci_unmap_single(priv->pdev, addr,
-					MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE);
+		pci_unmap_single(priv->pdev,
+				 pci_unmap_addr(&rxq->buf[rxq->head], dma),
+				 MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE);
+		pci_unmap_addr_set(&rxq->buf[rxq->head], dma, 0);
 
-		skb_put(skb, le16_to_cpu(rx_desc->pkt_len));
-		mwl8k_remove_dma_header(skb);
+		rxq->head++;
+		if (rxq->head == MWL8K_RX_DESCS)
+			rxq->head = 0;
+
+		rxq->rxd_count--;
 
-		wh = (struct ieee80211_hdr *)skb->data;
+		skb_put(skb, pkt_len);
+		mwl8k_remove_dma_header(skb);
 
 		/*
-		 * Check for pending join operation. save a copy of
-		 * the beacon and schedule a tasklet to send finalize
-		 * join command to the firmware.
+		 * Check for a pending join operation.  Save a
+		 * copy of the beacon and schedule a tasklet to
+		 * send a FINALIZE_JOIN command to the firmware.
 		 */
-		if (mwl8k_capture_bssid(priv, wh))
-			mwl8k_save_beacon(priv, skb);
-
-		memset(&status, 0, sizeof(status));
-		status.mactime = 0;
-		status.signal = -rx_desc->rssi;
-		status.noise = -rx_desc->noise_level;
-		status.qual = rx_desc->link_quality;
-		status.antenna = 1;
-		status.rate_idx = 1;
-		status.flag = 0;
-		status.band = IEEE80211_BAND_2GHZ;
-		status.freq = ieee80211_channel_to_frequency(rx_desc->channel);
+		if (mwl8k_capture_bssid(priv, (void *)skb->data))
+			mwl8k_save_beacon(hw, skb);
+
 		memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
 		ieee80211_rx_irqsafe(hw, skb);
 
@@ -965,24 +1139,10 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
  * Packet transmission.
  */
 
-/* Transmit queue assignment.  */
-enum {
-	MWL8K_WME_AC_BK	= 0,		/* background access */
-	MWL8K_WME_AC_BE	= 1,		/* best effort access */
-	MWL8K_WME_AC_VI	= 2,		/* video access */
-	MWL8K_WME_AC_VO	= 3,		/* voice access */
-};
-
 /* Transmit packet ACK policy */
 #define MWL8K_TXD_ACK_POLICY_NORMAL		0
 #define MWL8K_TXD_ACK_POLICY_BLOCKACK		3
 
-#define GET_TXQ(_ac) (\
-		((_ac) == WME_AC_VO) ? MWL8K_WME_AC_VO : \
-		((_ac) == WME_AC_VI) ? MWL8K_WME_AC_VI : \
-		((_ac) == WME_AC_BK) ? MWL8K_WME_AC_BK : \
-		MWL8K_WME_AC_BE)
-
 #define MWL8K_TXD_STATUS_OK			0x00000001
 #define MWL8K_TXD_STATUS_OK_RETRY		0x00000002
 #define MWL8K_TXD_STATUS_OK_MORE_RETRY		0x00000004
@@ -997,7 +1157,7 @@ struct mwl8k_tx_desc {
 	__le32 pkt_phys_addr;
 	__le16 pkt_len;
 	__u8 dest_MAC_addr[ETH_ALEN];
-	__le32 next_tx_desc_phys_addr;
+	__le32 next_txd_phys_addr;
 	__le32 reserved;
 	__le16 rate_info;
 	__u8 peer_id;
@@ -1013,44 +1173,40 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
 	int size;
 	int i;
 
-	memset(&txq->tx_stats, 0, sizeof(struct ieee80211_tx_queue_stats));
-	txq->tx_stats.limit = MWL8K_TX_DESCS;
-	txq->tx_head = 0;
-	txq->tx_tail = 0;
+	memset(&txq->stats, 0, sizeof(struct ieee80211_tx_queue_stats));
+	txq->stats.limit = MWL8K_TX_DESCS;
+	txq->head = 0;
+	txq->tail = 0;
 
 	size = MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc);
 
-	txq->tx_desc_area =
-		pci_alloc_consistent(priv->pdev, size, &txq->tx_desc_dma);
-	if (txq->tx_desc_area == NULL) {
+	txq->txd = pci_alloc_consistent(priv->pdev, size, &txq->txd_dma);
+	if (txq->txd == NULL) {
 		printk(KERN_ERR "%s: failed to alloc TX descriptors\n",
-		       priv->name);
+		       wiphy_name(hw->wiphy));
 		return -ENOMEM;
 	}
-	memset(txq->tx_desc_area, 0, size);
+	memset(txq->txd, 0, size);
 
-	txq->tx_skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->tx_skb),
-								GFP_KERNEL);
-	if (txq->tx_skb == NULL) {
+	txq->skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->skb), GFP_KERNEL);
+	if (txq->skb == NULL) {
 		printk(KERN_ERR "%s: failed to alloc TX skbuff list\n",
-		       priv->name);
-		pci_free_consistent(priv->pdev, size,
-				    txq->tx_desc_area, txq->tx_desc_dma);
+		       wiphy_name(hw->wiphy));
+		pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma);
 		return -ENOMEM;
 	}
-	memset(txq->tx_skb, 0, MWL8K_TX_DESCS * sizeof(*txq->tx_skb));
+	memset(txq->skb, 0, MWL8K_TX_DESCS * sizeof(*txq->skb));
 
 	for (i = 0; i < MWL8K_TX_DESCS; i++) {
 		struct mwl8k_tx_desc *tx_desc;
 		int nexti;
 
-		tx_desc = txq->tx_desc_area + i;
+		tx_desc = txq->txd + i;
 		nexti = (i + 1) % MWL8K_TX_DESCS;
 
 		tx_desc->status = 0;
-		tx_desc->next_tx_desc_phys_addr =
-			cpu_to_le32(txq->tx_desc_dma +
-						nexti * sizeof(*tx_desc));
+		tx_desc->next_txd_phys_addr =
+			cpu_to_le32(txq->txd_dma + nexti * sizeof(*tx_desc));
 	}
 
 	return 0;
@@ -1065,11 +1221,6 @@ static inline void mwl8k_tx_start(struct mwl8k_priv *priv)
 	ioread32(priv->regs + MWL8K_HIU_INT_CODE);
 }
 
-static inline int mwl8k_txq_busy(struct mwl8k_priv *priv)
-{
-	return priv->pending_tx_pkts;
-}
-
 struct mwl8k_txq_info {
 	u32 fw_owned;
 	u32 drv_owned;
@@ -1089,14 +1240,13 @@ static int mwl8k_scan_tx_ring(struct mwl8k_priv *priv,
 
 	memset(txinfo, 0, MWL8K_TX_QUEUES * sizeof(struct mwl8k_txq_info));
 
-	spin_lock_bh(&priv->tx_lock);
 	for (count = 0; count < MWL8K_TX_QUEUES; count++) {
 		txq = priv->txq + count;
-		txinfo[count].len = txq->tx_stats.len;
-		txinfo[count].head = txq->tx_head;
-		txinfo[count].tail = txq->tx_tail;
+		txinfo[count].len = txq->stats.len;
+		txinfo[count].head = txq->head;
+		txinfo[count].tail = txq->tail;
 		for (desc = 0; desc < MWL8K_TX_DESCS; desc++) {
-			tx_desc = txq->tx_desc_area + desc;
+			tx_desc = txq->txd + desc;
 			status = le32_to_cpu(tx_desc->status);
 
 			if (status & MWL8K_TXD_STATUS_FW_OWNED)
@@ -1108,30 +1258,26 @@ static int mwl8k_scan_tx_ring(struct mwl8k_priv *priv,
 				txinfo[count].unused++;
 		}
 	}
-	spin_unlock_bh(&priv->tx_lock);
 
 	return ndescs;
 }
 
 /*
- * Must be called with hw->fw_mutex held and tx queues stopped.
+ * Must be called with priv->fw_mutex held and tx queues stopped.
  */
 static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
 {
 	struct mwl8k_priv *priv = hw->priv;
-	DECLARE_COMPLETION_ONSTACK(cmd_wait);
+	DECLARE_COMPLETION_ONSTACK(tx_wait);
 	u32 count;
 	unsigned long timeout;
 
 	might_sleep();
 
 	spin_lock_bh(&priv->tx_lock);
-	count = mwl8k_txq_busy(priv);
-	if (count) {
-		priv->tx_wait = &cmd_wait;
-		if (priv->radio_on)
-			mwl8k_tx_start(priv);
-	}
+	count = priv->pending_tx_pkts;
+	if (count)
+		priv->tx_wait = &tx_wait;
 	spin_unlock_bh(&priv->tx_lock);
 
 	if (count) {
@@ -1139,23 +1285,23 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
 		int index;
 		int newcount;
 
-		timeout = wait_for_completion_timeout(&cmd_wait,
+		timeout = wait_for_completion_timeout(&tx_wait,
 					msecs_to_jiffies(5000));
 		if (timeout)
 			return 0;
 
 		spin_lock_bh(&priv->tx_lock);
 		priv->tx_wait = NULL;
-		newcount = mwl8k_txq_busy(priv);
+		newcount = priv->pending_tx_pkts;
+		mwl8k_scan_tx_ring(priv, txinfo);
 		spin_unlock_bh(&priv->tx_lock);
 
 		printk(KERN_ERR "%s(%u) TIMEDOUT:5000ms Pend:%u-->%u\n",
 		       __func__, __LINE__, count, newcount);
 
-		mwl8k_scan_tx_ring(priv, txinfo);
 		for (index = 0; index < MWL8K_TX_QUEUES; index++)
-			printk(KERN_ERR
-				"TXQ:%u L:%u H:%u T:%u FW:%u DRV:%u U:%u\n",
+			printk(KERN_ERR "TXQ:%u L:%u H:%u T:%u FW:%u "
+			       "DRV:%u U:%u\n",
 					index,
 					txinfo[index].len,
 					txinfo[index].head,
@@ -1181,7 +1327,7 @@ static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
 	struct mwl8k_tx_queue *txq = priv->txq + index;
 	int wake = 0;
 
-	while (txq->tx_stats.len > 0) {
+	while (txq->stats.len > 0) {
 		int tx;
 		struct mwl8k_tx_desc *tx_desc;
 		unsigned long addr;
@@ -1190,8 +1336,8 @@ static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
 		struct ieee80211_tx_info *info;
 		u32 status;
 
-		tx = txq->tx_head;
-		tx_desc = txq->tx_desc_area + tx;
+		tx = txq->head;
+		tx_desc = txq->txd + tx;
 
 		status = le32_to_cpu(tx_desc->status);
 
@@ -1202,15 +1348,15 @@ static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
 				~cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED);
 		}
 
-		txq->tx_head = (tx + 1) % MWL8K_TX_DESCS;
-		BUG_ON(txq->tx_stats.len == 0);
-		txq->tx_stats.len--;
+		txq->head = (tx + 1) % MWL8K_TX_DESCS;
+		BUG_ON(txq->stats.len == 0);
+		txq->stats.len--;
 		priv->pending_tx_pkts--;
 
 		addr = le32_to_cpu(tx_desc->pkt_phys_addr);
 		size = le16_to_cpu(tx_desc->pkt_len);
-		skb = txq->tx_skb[tx];
-		txq->tx_skb[tx] = NULL;
+		skb = txq->skb[tx];
+		txq->skb[tx] = NULL;
 
 		BUG_ON(skb == NULL);
 		pci_unmap_single(priv->pdev, addr, size, PCI_DMA_TODEVICE);
@@ -1243,13 +1389,13 @@ static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index)
 
 	mwl8k_txq_reclaim(hw, index, 1);
 
-	kfree(txq->tx_skb);
-	txq->tx_skb = NULL;
+	kfree(txq->skb);
+	txq->skb = NULL;
 
 	pci_free_consistent(priv->pdev,
 			    MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc),
-			    txq->tx_desc_area, txq->tx_desc_dma);
-	txq->tx_desc_area = NULL;
+			    txq->txd, txq->txd_dma);
+	txq->txd = NULL;
 }
 
 static int
@@ -1317,7 +1463,7 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
 
 	if (pci_dma_mapping_error(priv->pdev, dma)) {
 		printk(KERN_DEBUG "%s: failed to dma map skb, "
-			"dropping TX frame.\n", priv->name);
+		       "dropping TX frame.\n", wiphy_name(hw->wiphy));
 		dev_kfree_skb(skb);
 		return NETDEV_TX_OK;
 	}
@@ -1326,10 +1472,10 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
 
 	txq = priv->txq + index;
 
-	BUG_ON(txq->tx_skb[txq->tx_tail] != NULL);
-	txq->tx_skb[txq->tx_tail] = skb;
+	BUG_ON(txq->skb[txq->tail] != NULL);
+	txq->skb[txq->tail] = skb;
 
-	tx = txq->tx_desc_area + txq->tx_tail;
+	tx = txq->txd + txq->tail;
 	tx->data_rate = txdatarate;
 	tx->tx_priority = index;
 	tx->qos_control = cpu_to_le16(qos);
@@ -1340,15 +1486,15 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
 	wmb();
 	tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus);
 
-	txq->tx_stats.count++;
-	txq->tx_stats.len++;
+	txq->stats.count++;
+	txq->stats.len++;
 	priv->pending_tx_pkts++;
 
-	txq->tx_tail++;
-	if (txq->tx_tail == MWL8K_TX_DESCS)
-		txq->tx_tail = 0;
+	txq->tail++;
+	if (txq->tail == MWL8K_TX_DESCS)
+		txq->tail = 0;
 
-	if (txq->tx_head == txq->tx_tail)
+	if (txq->head == txq->tail)
 		ieee80211_stop_queue(hw, index);
 
 	mwl8k_tx_start(priv);
@@ -1431,7 +1577,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
 	unsigned long timeout = 0;
 	u8 buf[32];
 
-	cmd->result = 0xFFFF;
+	cmd->result = 0xffff;
 	dma_size = le16_to_cpu(cmd->length);
 	dma_addr = pci_map_single(priv->pdev, cmd, dma_size,
 				  PCI_DMA_BIDIRECTIONAL);
@@ -1464,7 +1610,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
 
 	if (!timeout) {
 		printk(KERN_ERR "%s: Command %s timeout after %u ms\n",
-		       priv->name,
+		       wiphy_name(hw->wiphy),
 		       mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
 		       MWL8K_CMD_TIMEOUT_MS);
 		rc = -ETIMEDOUT;
@@ -1472,7 +1618,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
 		rc = cmd->result ? -EINVAL : 0;
 		if (rc)
 			printk(KERN_ERR "%s: Command %s error 0x%x\n",
-			       priv->name,
+			       wiphy_name(hw->wiphy),
 			       mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
 			       le16_to_cpu(cmd->result));
 	}
@@ -1481,9 +1627,9 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
 }
 
 /*
- * GET_HW_SPEC.
+ * CMD_GET_HW_SPEC (STA version).
  */
-struct mwl8k_cmd_get_hw_spec {
+struct mwl8k_cmd_get_hw_spec_sta {
 	struct mwl8k_cmd_pkt header;
 	__u8 hw_rev;
 	__u8 host_interface;
@@ -1499,13 +1645,13 @@ struct mwl8k_cmd_get_hw_spec {
 	__le32 tx_queue_ptrs[MWL8K_TX_QUEUES];
 	__le32 caps2;
 	__le32 num_tx_desc_per_queue;
-	__le32 total_rx_desc;
+	__le32 total_rxd;
 } __attribute__((packed));
 
-static int mwl8k_cmd_get_hw_spec(struct ieee80211_hw *hw)
+static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw)
 {
 	struct mwl8k_priv *priv = hw->priv;
-	struct mwl8k_cmd_get_hw_spec *cmd;
+	struct mwl8k_cmd_get_hw_spec_sta *cmd;
 	int rc;
 	int i;
 
@@ -1518,12 +1664,12 @@ static int mwl8k_cmd_get_hw_spec(struct ieee80211_hw *hw)
 
 	memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr));
 	cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
-	cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rx_desc_dma);
+	cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma);
 	cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES);
 	for (i = 0; i < MWL8K_TX_QUEUES; i++)
-		cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].tx_desc_dma);
+		cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma);
 	cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
-	cmd->total_rx_desc = cpu_to_le32(MWL8K_RX_DESCS);
+	cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS);
 
 	rc = mwl8k_post_cmd(hw, &cmd->header);
 
@@ -1539,6 +1685,129 @@ static int mwl8k_cmd_get_hw_spec(struct ieee80211_hw *hw)
 }
 
 /*
+ * CMD_GET_HW_SPEC (AP version).
+ */
+struct mwl8k_cmd_get_hw_spec_ap {
+	struct mwl8k_cmd_pkt header;
+	__u8 hw_rev;
+	__u8 host_interface;
+	__le16 num_wcb;
+	__le16 num_mcaddrs;
+	__u8 perm_addr[ETH_ALEN];
+	__le16 region_code;
+	__le16 num_antenna;
+	__le32 fw_rev;
+	__le32 wcbbase0;
+	__le32 rxwrptr;
+	__le32 rxrdptr;
+	__le32 ps_cookie;
+	__le32 wcbbase1;
+	__le32 wcbbase2;
+	__le32 wcbbase3;
+} __attribute__((packed));
+
+static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
+{
+	struct mwl8k_priv *priv = hw->priv;
+	struct mwl8k_cmd_get_hw_spec_ap *cmd;
+	int rc;
+
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (cmd == NULL)
+		return -ENOMEM;
+
+	cmd->header.code = cpu_to_le16(MWL8K_CMD_GET_HW_SPEC);
+	cmd->header.length = cpu_to_le16(sizeof(*cmd));
+
+	memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr));
+	cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
+
+	rc = mwl8k_post_cmd(hw, &cmd->header);
+
+	if (!rc) {
+		int off;
+
+		SET_IEEE80211_PERM_ADDR(hw, cmd->perm_addr);
+		priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
+		priv->fw_rev = le32_to_cpu(cmd->fw_rev);
+		priv->hw_rev = cmd->hw_rev;
+
+		off = le32_to_cpu(cmd->wcbbase0) & 0xffff;
+		iowrite32(cpu_to_le32(priv->txq[0].txd_dma), priv->sram + off);
+
+		off = le32_to_cpu(cmd->rxwrptr) & 0xffff;
+		iowrite32(cpu_to_le32(priv->rxq[0].rxd_dma), priv->sram + off);
+
+		off = le32_to_cpu(cmd->rxrdptr) & 0xffff;
+		iowrite32(cpu_to_le32(priv->rxq[0].rxd_dma), priv->sram + off);
+
+		off = le32_to_cpu(cmd->wcbbase1) & 0xffff;
+		iowrite32(cpu_to_le32(priv->txq[1].txd_dma), priv->sram + off);
+
+		off = le32_to_cpu(cmd->wcbbase2) & 0xffff;
+		iowrite32(cpu_to_le32(priv->txq[2].txd_dma), priv->sram + off);
+
+		off = le32_to_cpu(cmd->wcbbase3) & 0xffff;
+		iowrite32(cpu_to_le32(priv->txq[3].txd_dma), priv->sram + off);
+	}
+
+	kfree(cmd);
+	return rc;
+}
+
+/*
+ * CMD_SET_HW_SPEC.
+ */
+struct mwl8k_cmd_set_hw_spec {
+	struct mwl8k_cmd_pkt header;
+	__u8 hw_rev;
+	__u8 host_interface;
+	__le16 num_mcaddrs;
+	__u8 perm_addr[ETH_ALEN];
+	__le16 region_code;
+	__le32 fw_rev;
+	__le32 ps_cookie;
+	__le32 caps;
+	__le32 rx_queue_ptr;
+	__le32 num_tx_queues;
+	__le32 tx_queue_ptrs[MWL8K_TX_QUEUES];
+	__le32 flags;
+	__le32 num_tx_desc_per_queue;
+	__le32 total_rxd;
+} __attribute__((packed));
+
+#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT	0x00000080
+
+static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
+{
+	struct mwl8k_priv *priv = hw->priv;
+	struct mwl8k_cmd_set_hw_spec *cmd;
+	int rc;
+	int i;
+
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (cmd == NULL)
+		return -ENOMEM;
+
+	cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_HW_SPEC);
+	cmd->header.length = cpu_to_le16(sizeof(*cmd));
+
+	cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
+	cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma);
+	cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES);
+	for (i = 0; i < MWL8K_TX_QUEUES; i++)
+		cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma);
+	cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT);
+	cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
+	cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS);
+
+	rc = mwl8k_post_cmd(hw, &cmd->header);
+	kfree(cmd);
+
+	return rc;
+}
+
+/*
  * CMD_MAC_MULTICAST_ADR.
  */
 struct mwl8k_cmd_mac_multicast_adr {
@@ -1548,19 +1817,23 @@ struct mwl8k_cmd_mac_multicast_adr {
 	__u8 addr[0][ETH_ALEN];
 };
 
-#define MWL8K_ENABLE_RX_MULTICAST 0x000F
+#define MWL8K_ENABLE_RX_DIRECTED	0x0001
+#define MWL8K_ENABLE_RX_MULTICAST	0x0002
+#define MWL8K_ENABLE_RX_ALL_MULTICAST	0x0004
+#define MWL8K_ENABLE_RX_BROADCAST	0x0008
 
 static struct mwl8k_cmd_pkt *
-__mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw,
+__mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
 			      int mc_count, struct dev_addr_list *mclist)
 {
 	struct mwl8k_priv *priv = hw->priv;
 	struct mwl8k_cmd_mac_multicast_adr *cmd;
 	int size;
-	int i;
 
-	if (mc_count > priv->num_mcaddrs)
-		mc_count = priv->num_mcaddrs;
+	if (allmulti || mc_count > priv->num_mcaddrs) {
+		allmulti = 1;
+		mc_count = 0;
+	}
 
 	size = sizeof(*cmd) + mc_count * ETH_ALEN;
 
@@ -1570,16 +1843,24 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw,
 
 	cmd->header.code = cpu_to_le16(MWL8K_CMD_MAC_MULTICAST_ADR);
 	cmd->header.length = cpu_to_le16(size);
-	cmd->action = cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST);
-	cmd->numaddr = cpu_to_le16(mc_count);
-
-	for (i = 0; i < mc_count && mclist; i++) {
-		if (mclist->da_addrlen != ETH_ALEN) {
-			kfree(cmd);
-			return NULL;
+	cmd->action = cpu_to_le16(MWL8K_ENABLE_RX_DIRECTED |
+				  MWL8K_ENABLE_RX_BROADCAST);
+
+	if (allmulti) {
+		cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_ALL_MULTICAST);
+	} else if (mc_count) {
+		int i;
+
+		cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST);
+		cmd->numaddr = cpu_to_le16(mc_count);
+		for (i = 0; i < mc_count && mclist; i++) {
+			if (mclist->da_addrlen != ETH_ALEN) {
+				kfree(cmd);
+				return NULL;
+			}
+			memcpy(cmd->addr[i], mclist->da_addr, ETH_ALEN);
+			mclist = mclist->next;
 		}
-		memcpy(cmd->addr[i], mclist->da_addr, ETH_ALEN);
-		mclist = mclist->next;
 	}
 
 	return &cmd->header;
@@ -1590,7 +1871,6 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw,
  */
 struct mwl8k_cmd_802_11_get_stat {
 	struct mwl8k_cmd_pkt header;
-	__le16 action;
 	__le32 stats[64];
 } __attribute__((packed));
 
@@ -1611,7 +1891,6 @@ static int mwl8k_cmd_802_11_get_stat(struct ieee80211_hw *hw,
 
 	cmd->header.code = cpu_to_le16(MWL8K_CMD_GET_STAT);
 	cmd->header.length = cpu_to_le16(sizeof(*cmd));
-	cmd->action = cpu_to_le16(MWL8K_CMD_GET);
 
 	rc = mwl8k_post_cmd(hw, &cmd->header);
 	if (!rc) {
@@ -1727,6 +2006,39 @@ static int mwl8k_cmd_802_11_rf_tx_power(struct ieee80211_hw *hw, int dBm)
 }
 
 /*
+ * CMD_RF_ANTENNA.
+ */
+struct mwl8k_cmd_rf_antenna {
+	struct mwl8k_cmd_pkt header;
+	__le16 antenna;
+	__le16 mode;
+} __attribute__((packed));
+
+#define MWL8K_RF_ANTENNA_RX		1
+#define MWL8K_RF_ANTENNA_TX		2
+
+static int
+mwl8k_cmd_rf_antenna(struct ieee80211_hw *hw, int antenna, int mask)
+{
+	struct mwl8k_cmd_rf_antenna *cmd;
+	int rc;
+
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (cmd == NULL)
+		return -ENOMEM;
+
+	cmd->header.code = cpu_to_le16(MWL8K_CMD_RF_ANTENNA);
+	cmd->header.length = cpu_to_le16(sizeof(*cmd));
+	cmd->antenna = cpu_to_le16(antenna);
+	cmd->mode = cpu_to_le16(mask);
+
+	rc = mwl8k_post_cmd(hw, &cmd->header);
+	kfree(cmd);
+
+	return rc;
+}
+
+/*
  * CMD_SET_PRE_SCAN.
  */
 struct mwl8k_cmd_set_pre_scan {
@@ -1904,6 +2216,46 @@ static int mwl8k_enable_sniffer(struct ieee80211_hw *hw, bool enable)
 }
 
 /*
+ * CMD_SET_MAC_ADDR.
+ */
+struct mwl8k_cmd_set_mac_addr {
+	struct mwl8k_cmd_pkt header;
+	union {
+		struct {
+			__le16 mac_type;
+			__u8 mac_addr[ETH_ALEN];
+		} mbss;
+		__u8 mac_addr[ETH_ALEN];
+	};
+} __attribute__((packed));
+
+static int mwl8k_set_mac_addr(struct ieee80211_hw *hw, u8 *mac)
+{
+	struct mwl8k_priv *priv = hw->priv;
+	struct mwl8k_cmd_set_mac_addr *cmd;
+	int rc;
+
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (cmd == NULL)
+		return -ENOMEM;
+
+	cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_MAC_ADDR);
+	cmd->header.length = cpu_to_le16(sizeof(*cmd));
+	if (priv->ap_fw) {
+		cmd->mbss.mac_type = 0;
+		memcpy(cmd->mbss.mac_addr, mac, ETH_ALEN);
+	} else {
+		memcpy(cmd->mac_addr, mac, ETH_ALEN);
+	}
+
+	rc = mwl8k_post_cmd(hw, &cmd->header);
+	kfree(cmd);
+
+	return rc;
+}
+
+
+/*
  * CMD_SET_RATEADAPT_MODE.
  */
 struct mwl8k_cmd_set_rate_adapt_mode {
@@ -2005,17 +2357,34 @@ struct mwl8k_cmd_set_edca_params {
 	/* TX opportunity in units of 32 us */
 	__le16 txop;
 
-	/* Log exponent of max contention period: 0...15*/
-	__u8 log_cw_max;
+	union {
+		struct {
+			/* Log exponent of max contention period: 0...15 */
+			__le32 log_cw_max;
+
+			/* Log exponent of min contention period: 0...15 */
+			__le32 log_cw_min;
+
+			/* Adaptive interframe spacing in units of 32us */
+			__u8 aifs;
+
+			/* TX queue to configure */
+			__u8 txq;
+		} ap;
+		struct {
+			/* Log exponent of max contention period: 0...15 */
+			__u8 log_cw_max;
 
-	/* Log exponent of min contention period: 0...15 */
-	__u8 log_cw_min;
+			/* Log exponent of min contention period: 0...15 */
+			__u8 log_cw_min;
 
-	/* Adaptive interframe spacing in units of 32us */
-	__u8 aifs;
+			/* Adaptive interframe spacing in units of 32us */
+			__u8 aifs;
 
-	/* TX queue to configure */
-	__u8 txq;
+			/* TX queue to configure */
+			__u8 txq;
+		} sta;
+	};
 } __attribute__((packed));
 
 #define MWL8K_SET_EDCA_CW	0x01
@@ -2031,6 +2400,7 @@ mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
 		__u16 cw_min, __u16 cw_max,
 		__u8 aifs, __u16 txop)
 {
+	struct mwl8k_priv *priv = hw->priv;
 	struct mwl8k_cmd_set_edca_params *cmd;
 	int rc;
 
@@ -2038,14 +2408,27 @@ mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
 	if (cmd == NULL)
 		return -ENOMEM;
 
+	/*
+	 * Queues 0 (BE) and 1 (BK) are swapped in hardware for
+	 * this call.
+	 */
+	qnum ^= !(qnum >> 1);
+
 	cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_EDCA_PARAMS);
 	cmd->header.length = cpu_to_le16(sizeof(*cmd));
 	cmd->action = cpu_to_le16(MWL8K_SET_EDCA_ALL);
 	cmd->txop = cpu_to_le16(txop);
-	cmd->log_cw_max = (u8)ilog2(cw_max + 1);
-	cmd->log_cw_min = (u8)ilog2(cw_min + 1);
-	cmd->aifs = aifs;
-	cmd->txq = qnum;
+	if (priv->ap_fw) {
+		cmd->ap.log_cw_max = cpu_to_le32(ilog2(cw_max + 1));
+		cmd->ap.log_cw_min = cpu_to_le32(ilog2(cw_min + 1));
+		cmd->ap.aifs = aifs;
+		cmd->ap.txq = qnum;
+	} else {
+		cmd->sta.log_cw_max = (u8)ilog2(cw_max + 1);
+		cmd->sta.log_cw_min = (u8)ilog2(cw_min + 1);
+		cmd->sta.aifs = aifs;
+		cmd->sta.txq = qnum;
+	}
 
 	rc = mwl8k_post_cmd(hw, &cmd->header);
 	kfree(cmd);
@@ -2093,8 +2476,8 @@ static int mwl8k_finalize_join(struct ieee80211_hw *hw, void *frame,
 	/* XXX TBD Might just have to abort and return an error */
 	if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
 		printk(KERN_ERR "%s(): WARNING: Incomplete beacon "
-			"sent to firmware. Sz=%u MAX=%u\n", __func__,
-			payload_len, MWL8K_FJ_BEACON_MAXLEN);
+		       "sent to firmware. Sz=%u MAX=%u\n", __func__,
+		       payload_len, MWL8K_FJ_BEACON_MAXLEN);
 
 	if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
 		payload_len = MWL8K_FJ_BEACON_MAXLEN;
@@ -2341,9 +2724,10 @@ static int mwl8k_cmd_use_fixed_rate(struct ieee80211_hw *hw,
 	cmd->rate_type = cpu_to_le32(rate_type);
 
 	if (rate_table != NULL) {
-		/* Copy over each field manually so
-		* that bitflipping can be done
-		*/
+		/*
+		 * Copy over each field manually so that endian
+		 * conversion can be done.
+		 */
 		cmd->rate_table.allow_rate_drop =
 				cpu_to_le32(rate_table->allow_rate_drop);
 		cmd->rate_table.num_rates =
@@ -2399,7 +2783,7 @@ static irqreturn_t mwl8k_interrupt(int irq, void *dev_id)
 
 	if (status & MWL8K_A2H_INT_QUEUE_EMPTY) {
 		if (!mutex_is_locked(&priv->fw_mutex) &&
-		    priv->radio_on && mwl8k_txq_busy(priv))
+		    priv->radio_on && priv->pending_tx_pkts)
 			mwl8k_tx_start(priv);
 	}
 
@@ -2418,7 +2802,7 @@ static int mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 
 	if (priv->current_channel == NULL) {
 		printk(KERN_DEBUG "%s: dropped TX frame since radio "
-		       "disabled\n", priv->name);
+		       "disabled\n", wiphy_name(hw->wiphy));
 		dev_kfree_skb(skb);
 		return NETDEV_TX_OK;
 	}
@@ -2433,11 +2817,11 @@ static int mwl8k_start(struct ieee80211_hw *hw)
 	struct mwl8k_priv *priv = hw->priv;
 	int rc;
 
-	rc = request_irq(priv->pdev->irq, &mwl8k_interrupt,
+	rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
 			 IRQF_SHARED, MWL8K_NAME, hw);
 	if (rc) {
 		printk(KERN_ERR "%s: failed to register IRQ handler\n",
-		       priv->name);
+		       wiphy_name(hw->wiphy));
 		return -EIO;
 	}
 
@@ -2451,12 +2835,17 @@ static int mwl8k_start(struct ieee80211_hw *hw)
 	if (!rc) {
 		rc = mwl8k_cmd_802_11_radio_enable(hw);
 
-		if (!rc)
-			rc = mwl8k_cmd_set_pre_scan(hw);
+		if (!priv->ap_fw) {
+			if (!rc)
+				rc = mwl8k_enable_sniffer(hw, 0);
 
-		if (!rc)
-			rc = mwl8k_cmd_set_post_scan(hw,
-					"\x00\x00\x00\x00\x00\x00");
+			if (!rc)
+				rc = mwl8k_cmd_set_pre_scan(hw);
+
+			if (!rc)
+				rc = mwl8k_cmd_set_post_scan(hw,
+						"\x00\x00\x00\x00\x00\x00");
+		}
 
 		if (!rc)
 			rc = mwl8k_cmd_setrateadaptmode(hw, 0);
@@ -2464,9 +2853,6 @@ static int mwl8k_start(struct ieee80211_hw *hw)
 		if (!rc)
 			rc = mwl8k_set_wmm(hw, 0);
 
-		if (!rc)
-			rc = mwl8k_enable_sniffer(hw, 0);
-
 		mwl8k_fw_unlock(hw);
 	}
 
@@ -2500,9 +2886,6 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
 	/* Stop tx reclaim tasklet */
 	tasklet_disable(&priv->tx_reclaim_task);
 
-	/* Stop config thread */
-	flush_workqueue(priv->config_wq);
-
 	/* Return all skbs to mac80211 */
 	for (i = 0; i < MWL8K_TX_QUEUES; i++)
 		mwl8k_txq_reclaim(hw, i, 1);
@@ -2526,11 +2909,24 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
 	if (conf->type != NL80211_IFTYPE_STATION)
 		return -EINVAL;
 
+	/*
+	 * Reject interface creation if sniffer mode is active, as
+	 * STA operation is mutually exclusive with hardware sniffer
+	 * mode.
+	 */
+	if (priv->sniffer_enabled) {
+		printk(KERN_INFO "%s: unable to create STA "
+		       "interface due to sniffer mode being enabled\n",
+		       wiphy_name(hw->wiphy));
+		return -EINVAL;
+	}
+
 	/* Clean out driver private area */
 	mwl8k_vif = MWL8K_VIF(conf->vif);
 	memset(mwl8k_vif, 0, sizeof(*mwl8k_vif));
 
-	/* Save the mac address */
+	/* Set and save the mac address */
+	mwl8k_set_mac_addr(hw, conf->mac_addr);
 	memcpy(mwl8k_vif->mac_addr, conf->mac_addr, ETH_ALEN);
 
 	/* Back pointer to parent config block */
@@ -2558,6 +2954,8 @@ static void mwl8k_remove_interface(struct ieee80211_hw *hw,
 	if (priv->vif == NULL)
 		return;
 
+	mwl8k_set_mac_addr(hw, "\x00\x00\x00\x00\x00\x00");
+
 	priv->vif = NULL;
 }
 
@@ -2593,8 +2991,13 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
 	if (rc)
 		goto out;
 
-	if (mwl8k_cmd_mimo_config(hw, 0x7, 0x7))
-		rc = -EINVAL;
+	if (priv->ap_fw) {
+		rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x7);
+		if (!rc)
+			rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7);
+	} else {
+		rc = mwl8k_cmd_mimo_config(hw, 0x7, 0x7);
+	}
 
 out:
 	mwl8k_fw_unlock(hw);
@@ -2681,32 +3084,108 @@ static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw,
 {
 	struct mwl8k_cmd_pkt *cmd;
 
-	cmd = __mwl8k_cmd_mac_multicast_adr(hw, mc_count, mclist);
+	/*
+	 * Synthesize and return a command packet that programs the
+	 * hardware multicast address filter.  At this point we don't
+	 * know whether FIF_ALLMULTI is being requested, but if it is,
+	 * we'll end up throwing this packet away and creating a new
+	 * one in mwl8k_configure_filter().
+	 */
+	cmd = __mwl8k_cmd_mac_multicast_adr(hw, 0, mc_count, mclist);
 
 	return (unsigned long)cmd;
 }
 
+static int
+mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
+			       unsigned int changed_flags,
+			       unsigned int *total_flags)
+{
+	struct mwl8k_priv *priv = hw->priv;
+
+	/*
+	 * Hardware sniffer mode is mutually exclusive with STA
+	 * operation, so refuse to enable sniffer mode if a STA
+	 * interface is active.
+	 */
+	if (priv->vif != NULL) {
+		if (net_ratelimit())
+			printk(KERN_INFO "%s: not enabling sniffer "
+			       "mode because STA interface is active\n",
+			       wiphy_name(hw->wiphy));
+		return 0;
+	}
+
+	if (!priv->sniffer_enabled) {
+		if (mwl8k_enable_sniffer(hw, 1))
+			return 0;
+		priv->sniffer_enabled = true;
+	}
+
+	*total_flags &=	FIF_PROMISC_IN_BSS | FIF_ALLMULTI |
+			FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL |
+			FIF_OTHER_BSS;
+
+	return 1;
+}
+
 static void mwl8k_configure_filter(struct ieee80211_hw *hw,
 				   unsigned int changed_flags,
 				   unsigned int *total_flags,
 				   u64 multicast)
 {
 	struct mwl8k_priv *priv = hw->priv;
-	struct mwl8k_cmd_pkt *multicast_adr_cmd;
+	struct mwl8k_cmd_pkt *cmd = (void *)(unsigned long)multicast;
+
+	/*
+	 * AP firmware doesn't allow fine-grained control over
+	 * the receive filter.
+	 */
+	if (priv->ap_fw) {
+		*total_flags &= FIF_ALLMULTI | FIF_BCN_PRBRESP_PROMISC;
+		kfree(cmd);
+		return;
+	}
+
+	/*
+	 * Enable hardware sniffer mode if FIF_CONTROL or
+	 * FIF_OTHER_BSS is requested.
+	 */
+	if (*total_flags & (FIF_CONTROL | FIF_OTHER_BSS) &&
+	    mwl8k_configure_filter_sniffer(hw, changed_flags, total_flags)) {
+		kfree(cmd);
+		return;
+	}
 
 	/* Clear unsupported feature flags */
-	*total_flags &= FIF_BCN_PRBRESP_PROMISC;
+	*total_flags &= FIF_ALLMULTI | FIF_BCN_PRBRESP_PROMISC;
 
 	if (mwl8k_fw_lock(hw))
 		return;
 
+	if (priv->sniffer_enabled) {
+		mwl8k_enable_sniffer(hw, 0);
+		priv->sniffer_enabled = false;
+	}
+
 	if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
-		if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
+		if (*total_flags & FIF_BCN_PRBRESP_PROMISC) {
+			/*
+			 * Disable the BSS filter.
+			 */
 			mwl8k_cmd_set_pre_scan(hw);
-		else {
+		} else {
 			u8 *bssid;
 
-			bssid = "\x00\x00\x00\x00\x00\x00";
+			/*
+			 * Enable the BSS filter.
+			 *
+			 * If there is an active STA interface, use that
+			 * interface's BSSID, otherwise use a dummy one
+			 * (where the OUI part needs to be nonzero for
+			 * the BSSID to be accepted by POST_SCAN).
+			 */
+			bssid = "\x01\x00\x00\x00\x00\x00";
 			if (priv->vif != NULL)
 				bssid = MWL8K_VIF(priv->vif)->bssid;
 
@@ -2714,10 +3193,20 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
 		}
 	}
 
-	multicast_adr_cmd = (void *)(unsigned long)multicast;
-	if (multicast_adr_cmd != NULL) {
-		mwl8k_post_cmd(hw, multicast_adr_cmd);
-		kfree(multicast_adr_cmd);
+	/*
+	 * If FIF_ALLMULTI is being requested, throw away the command
+	 * packet that ->prepare_multicast() built and replace it with
+	 * a command packet that enables reception of all multicast
+	 * packets.
+	 */
+	if (*total_flags & FIF_ALLMULTI) {
+		kfree(cmd);
+		cmd = __mwl8k_cmd_mac_multicast_adr(hw, 1, 0, NULL);
+	}
+
+	if (cmd != NULL) {
+		mwl8k_post_cmd(hw, cmd);
+		kfree(cmd);
 	}
 
 	mwl8k_fw_unlock(hw);
@@ -2762,7 +3251,7 @@ static int mwl8k_get_tx_stats(struct ieee80211_hw *hw,
 	spin_lock_bh(&priv->tx_lock);
 	for (index = 0; index < MWL8K_TX_QUEUES; index++) {
 		txq = priv->txq + index;
-		memcpy(&stats[index], &txq->tx_stats,
+		memcpy(&stats[index], &txq->stats,
 			sizeof(struct ieee80211_tx_queue_stats));
 	}
 	spin_unlock_bh(&priv->tx_lock);
@@ -2802,7 +3291,7 @@ static void mwl8k_tx_reclaim_handler(unsigned long data)
 	for (i = 0; i < MWL8K_TX_QUEUES; i++)
 		mwl8k_txq_reclaim(hw, i, 0);
 
-	if (priv->tx_wait != NULL && mwl8k_txq_busy(priv) == 0) {
+	if (priv->tx_wait != NULL && !priv->pending_tx_pkts) {
 		complete(priv->tx_wait);
 		priv->tx_wait = NULL;
 	}
@@ -2822,6 +3311,36 @@ static void mwl8k_finalize_join_worker(struct work_struct *work)
 	priv->beacon_skb = NULL;
 }
 
+enum {
+	MWL8687 = 0,
+	MWL8366,
+};
+
+static struct mwl8k_device_info mwl8k_info_tbl[] __devinitdata = {
+	{
+		.part_name	= "88w8687",
+		.helper_image	= "mwl8k/helper_8687.fw",
+		.fw_image	= "mwl8k/fmimage_8687.fw",
+		.rxd_ops	= &rxd_8687_ops,
+		.modes		= BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+		.part_name	= "88w8366",
+		.helper_image	= "mwl8k/helper_8366.fw",
+		.fw_image	= "mwl8k/fmimage_8366.fw",
+		.rxd_ops	= &rxd_8366_ops,
+		.modes		= 0,
+	},
+};
+
+static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = {
+	{ PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, },
+	{ PCI_VDEVICE(MARVELL, 0x2a30), .driver_data = MWL8687, },
+	{ PCI_VDEVICE(MARVELL, 0x2a40), .driver_data = MWL8366, },
+	{ },
+};
+MODULE_DEVICE_TABLE(pci, mwl8k_pci_id_table);
+
 static int __devinit mwl8k_probe(struct pci_dev *pdev,
 				 const struct pci_device_id *id)
 {
@@ -2862,17 +3381,34 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
 	priv = hw->priv;
 	priv->hw = hw;
 	priv->pdev = pdev;
+	priv->device_info = &mwl8k_info_tbl[id->driver_data];
+	priv->rxd_ops = priv->device_info->rxd_ops;
+	priv->sniffer_enabled = false;
 	priv->wmm_enabled = false;
 	priv->pending_tx_pkts = 0;
-	strncpy(priv->name, MWL8K_NAME, sizeof(priv->name));
 
 	SET_IEEE80211_DEV(hw, &pdev->dev);
 	pci_set_drvdata(pdev, hw);
 
+	priv->sram = pci_iomap(pdev, 0, 0x10000);
+	if (priv->sram == NULL) {
+		printk(KERN_ERR "%s: Cannot map device SRAM\n",
+		       wiphy_name(hw->wiphy));
+		goto err_iounmap;
+	}
+
+	/*
+	 * If BAR0 is a 32 bit BAR, the register BAR will be BAR1.
+	 * If BAR0 is a 64 bit BAR, the register BAR will be BAR2.
+	 */
 	priv->regs = pci_iomap(pdev, 1, 0x10000);
 	if (priv->regs == NULL) {
-		printk(KERN_ERR "%s: Cannot map device memory\n", priv->name);
-		goto err_iounmap;
+		priv->regs = pci_iomap(pdev, 2, 0x10000);
+		if (priv->regs == NULL) {
+			printk(KERN_ERR "%s: Cannot map device registers\n",
+			       wiphy_name(hw->wiphy));
+			goto err_iounmap;
+		}
 	}
 
 	memcpy(priv->channels, mwl8k_channels, sizeof(mwl8k_channels));
@@ -2897,7 +3433,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
 
 	hw->queues = MWL8K_TX_QUEUES;
 
-	hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+	hw->wiphy->interface_modes = priv->device_info->modes;
 
 	/* Set rssi and noise values to dBm */
 	hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM;
@@ -2916,11 +3452,6 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
 			mwl8k_tx_reclaim_handler, (unsigned long)hw);
 	tasklet_disable(&priv->tx_reclaim_task);
 
-	/* Config workthread */
-	priv->config_wq = create_singlethread_workqueue("mwl8k_config");
-	if (priv->config_wq == NULL)
-		goto err_iounmap;
-
 	/* Power management cookie */
 	priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma);
 	if (priv->cookie == NULL)
@@ -2934,11 +3465,12 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
 	mutex_init(&priv->fw_mutex);
 	priv->fw_mutex_owner = NULL;
 	priv->fw_mutex_depth = 0;
-	priv->tx_wait = NULL;
 	priv->hostcmd_wait = NULL;
 
 	spin_lock_init(&priv->tx_lock);
 
+	priv->tx_wait = NULL;
+
 	for (i = 0; i < MWL8K_TX_QUEUES; i++) {
 		rc = mwl8k_txq_init(hw, i);
 		if (rc)
@@ -2950,11 +3482,11 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
 	iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL);
 	iowrite32(0xffffffff, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK);
 
-	rc = request_irq(priv->pdev->irq, &mwl8k_interrupt,
+	rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
 			 IRQF_SHARED, MWL8K_NAME, hw);
 	if (rc) {
 		printk(KERN_ERR "%s: failed to register IRQ handler\n",
-		       priv->name);
+		       wiphy_name(hw->wiphy));
 		goto err_free_queues;
 	}
 
@@ -2962,16 +3494,18 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
 	mwl8k_hw_reset(priv);
 
 	/* Ask userland hotplug daemon for the device firmware */
-	rc = mwl8k_request_firmware(priv, (u32)id->driver_data);
+	rc = mwl8k_request_firmware(priv);
 	if (rc) {
-		printk(KERN_ERR "%s: Firmware files not found\n", priv->name);
+		printk(KERN_ERR "%s: Firmware files not found\n",
+		       wiphy_name(hw->wiphy));
 		goto err_free_irq;
 	}
 
 	/* Load firmware into hardware */
-	rc = mwl8k_load_firmware(priv);
+	rc = mwl8k_load_firmware(hw);
 	if (rc) {
-		printk(KERN_ERR "%s: Cannot start firmware\n", priv->name);
+		printk(KERN_ERR "%s: Cannot start firmware\n",
+		       wiphy_name(hw->wiphy));
 		goto err_stop_firmware;
 	}
 
@@ -2986,16 +3520,31 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
 	iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
 
 	/* Get config data, mac addrs etc */
-	rc = mwl8k_cmd_get_hw_spec(hw);
+	if (priv->ap_fw) {
+		rc = mwl8k_cmd_get_hw_spec_ap(hw);
+		if (!rc)
+			rc = mwl8k_cmd_set_hw_spec(hw);
+	} else {
+		rc = mwl8k_cmd_get_hw_spec_sta(hw);
+	}
 	if (rc) {
-		printk(KERN_ERR "%s: Cannot initialise firmware\n", priv->name);
+		printk(KERN_ERR "%s: Cannot initialise firmware\n",
+		       wiphy_name(hw->wiphy));
 		goto err_stop_firmware;
 	}
 
 	/* Turn radio off */
 	rc = mwl8k_cmd_802_11_radio_disable(hw);
 	if (rc) {
-		printk(KERN_ERR "%s: Cannot disable\n", priv->name);
+		printk(KERN_ERR "%s: Cannot disable\n", wiphy_name(hw->wiphy));
+		goto err_stop_firmware;
+	}
+
+	/* Clear MAC address */
+	rc = mwl8k_set_mac_addr(hw, "\x00\x00\x00\x00\x00\x00");
+	if (rc) {
+		printk(KERN_ERR "%s: Cannot clear MAC address\n",
+		       wiphy_name(hw->wiphy));
 		goto err_stop_firmware;
 	}
 
@@ -3005,13 +3554,15 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
 
 	rc = ieee80211_register_hw(hw);
 	if (rc) {
-		printk(KERN_ERR "%s: Cannot register device\n", priv->name);
+		printk(KERN_ERR "%s: Cannot register device\n",
+		       wiphy_name(hw->wiphy));
 		goto err_stop_firmware;
 	}
 
-	printk(KERN_INFO "%s: 88w%u v%d, %pM, firmware version %u.%u.%u.%u\n",
-	       wiphy_name(hw->wiphy), priv->part_num, priv->hw_rev,
-	       hw->wiphy->perm_addr,
+	printk(KERN_INFO "%s: %s v%d, %pM, %s firmware %u.%u.%u.%u\n",
+	       wiphy_name(hw->wiphy), priv->device_info->part_name,
+	       priv->hw_rev, hw->wiphy->perm_addr,
+	       priv->ap_fw ? "AP" : "STA",
 	       (priv->fw_rev >> 24) & 0xff, (priv->fw_rev >> 16) & 0xff,
 	       (priv->fw_rev >> 8) & 0xff, priv->fw_rev & 0xff);
 
@@ -3038,8 +3589,8 @@ err_iounmap:
 	if (priv->regs != NULL)
 		pci_iounmap(pdev, priv->regs);
 
-	if (priv->config_wq != NULL)
-		destroy_workqueue(priv->config_wq);
+	if (priv->sram != NULL)
+		pci_iounmap(pdev, priv->sram);
 
 	pci_set_drvdata(pdev, NULL);
 	ieee80211_free_hw(hw);
@@ -3073,9 +3624,6 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
 	/* Remove tx reclaim tasklet */
 	tasklet_kill(&priv->tx_reclaim_task);
 
-	/* Stop config thread */
-	destroy_workqueue(priv->config_wq);
-
 	/* Stop hardware */
 	mwl8k_hw_reset(priv);
 
@@ -3088,10 +3636,10 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
 
 	mwl8k_rxq_deinit(hw, 0);
 
-	pci_free_consistent(priv->pdev, 4,
-				priv->cookie, priv->cookie_dma);
+	pci_free_consistent(priv->pdev, 4, priv->cookie, priv->cookie_dma);
 
 	pci_iounmap(pdev, priv->regs);
+	pci_iounmap(pdev, priv->sram);
 	pci_set_drvdata(pdev, NULL);
 	ieee80211_free_hw(hw);
 	pci_release_regions(pdev);
@@ -3100,7 +3648,7 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
 
 static struct pci_driver mwl8k_driver = {
 	.name		= MWL8K_NAME,
-	.id_table	= mwl8k_table,
+	.id_table	= mwl8k_pci_id_table,
 	.probe		= mwl8k_probe,
 	.remove		= __devexit_p(mwl8k_remove),
 	.shutdown	= __devexit_p(mwl8k_shutdown),
@@ -3118,3 +3666,8 @@ static void __exit mwl8k_exit(void)
 
 module_init(mwl8k_init);
 module_exit(mwl8k_exit);
+
+MODULE_DESCRIPTION(MWL8K_DESC);
+MODULE_VERSION(MWL8K_VERSION);
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@marvell.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/orinoco/Kconfig b/drivers/net/wireless/orinoco/Kconfig
index 83b635fd7784..e2a2c18920aa 100644
--- a/drivers/net/wireless/orinoco/Kconfig
+++ b/drivers/net/wireless/orinoco/Kconfig
@@ -1,8 +1,10 @@
 config HERMES
 	tristate "Hermes chipset 802.11b support (Orinoco/Prism2/Symbol)"
-	depends on (PPC_PMAC || PCI || PCMCIA) && WLAN_80211
-	depends on CFG80211
+	depends on (PPC_PMAC || PCI || PCMCIA)
+	depends on CFG80211 && CFG80211_WEXT
 	select WIRELESS_EXT
+	select WEXT_SPY
+	select WEXT_PRIV
 	select FW_LOADER
 	select CRYPTO
 	select CRYPTO_MICHAEL_MIC
diff --git a/drivers/net/wireless/orinoco/fw.c b/drivers/net/wireless/orinoco/fw.c
index 1257250a1e22..cfa72962052b 100644
--- a/drivers/net/wireless/orinoco/fw.c
+++ b/drivers/net/wireless/orinoco/fw.c
@@ -28,6 +28,12 @@ static const struct fw_info orinoco_fw[] = {
 	{ NULL, "prism_sta_fw.bin", "prism_ap_fw.bin", 0, 1024 },
 	{ "symbol_sp24t_prim_fw", "symbol_sp24t_sec_fw", NULL, 0x00003100, 512 }
 };
+MODULE_FIRMWARE("agere_sta_fw.bin");
+MODULE_FIRMWARE("agere_ap_fw.bin");
+MODULE_FIRMWARE("prism_sta_fw.bin");
+MODULE_FIRMWARE("prism_ap_fw.bin");
+MODULE_FIRMWARE("symbol_sp24t_prim_fw");
+MODULE_FIRMWARE("symbol_sp24t_sec_fw");
 
 /* Structure used to access fields in FW
  * Make sure LE decoding macros are used
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index 359652d35e63..404830f47ab2 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -60,8 +60,15 @@ static inline fwtype_t determine_firmware_type(struct comp_id *nic_id)
 /* Set priv->firmware type, determine firmware properties
  * This function can be called before we have registerred with netdev,
  * so all errors go out with dev_* rather than printk
+ *
+ * If non-NULL stores a firmware description in fw_name.
+ * If non-NULL stores a HW version in hw_ver
+ *
+ * These are output via generic cfg80211 ethtool support.
  */
-int determine_fw_capabilities(struct orinoco_private *priv)
+int determine_fw_capabilities(struct orinoco_private *priv,
+			      char *fw_name, size_t fw_name_len,
+			      u32 *hw_ver)
 {
 	struct device *dev = priv->dev;
 	hermes_t *hw = &priv->hw;
@@ -85,6 +92,12 @@ int determine_fw_capabilities(struct orinoco_private *priv)
 	dev_info(dev, "Hardware identity %04x:%04x:%04x:%04x\n",
 		 nic_id.id, nic_id.variant, nic_id.major, nic_id.minor);
 
+	if (hw_ver)
+		*hw_ver = (((nic_id.id & 0xff) << 24) |
+			   ((nic_id.variant & 0xff) << 16) |
+			   ((nic_id.major & 0xff) << 8) |
+			   (nic_id.minor & 0xff));
+
 	priv->firmware_type = determine_firmware_type(&nic_id);
 
 	/* Get the firmware version */
@@ -135,8 +148,9 @@ int determine_fw_capabilities(struct orinoco_private *priv)
 	case FIRMWARE_TYPE_AGERE:
 		/* Lucent Wavelan IEEE, Lucent Orinoco, Cabletron RoamAbout,
 		   ELSA, Melco, HP, IBM, Dell 1150, Compaq 110/210 */
-		snprintf(priv->fw_name, sizeof(priv->fw_name) - 1,
-			 "Lucent/Agere %d.%02d", sta_id.major, sta_id.minor);
+		if (fw_name)
+			snprintf(fw_name, fw_name_len, "Lucent/Agere %d.%02d",
+				 sta_id.major, sta_id.minor);
 
 		firmver = ((unsigned long)sta_id.major << 16) | sta_id.minor;
 
@@ -185,8 +199,8 @@ int determine_fw_capabilities(struct orinoco_private *priv)
 			tmp[SYMBOL_MAX_VER_LEN] = '\0';
 		}
 
-		snprintf(priv->fw_name, sizeof(priv->fw_name) - 1,
-			 "Symbol %s", tmp);
+		if (fw_name)
+			snprintf(fw_name, fw_name_len, "Symbol %s", tmp);
 
 		priv->has_ibss = (firmver >= 0x20000);
 		priv->has_wep = (firmver >= 0x15012);
@@ -224,9 +238,9 @@ int determine_fw_capabilities(struct orinoco_private *priv)
 		 * different and less well tested */
 		/* D-Link MAC : 00:40:05:* */
 		/* Addtron MAC : 00:90:D1:* */
-		snprintf(priv->fw_name, sizeof(priv->fw_name) - 1,
-			 "Intersil %d.%d.%d", sta_id.major, sta_id.minor,
-			 sta_id.variant);
+		if (fw_name)
+			snprintf(fw_name, fw_name_len, "Intersil %d.%d.%d",
+				 sta_id.major, sta_id.minor, sta_id.variant);
 
 		firmver = ((unsigned long)sta_id.major << 16) |
 			((unsigned long)sta_id.minor << 8) | sta_id.variant;
@@ -245,7 +259,8 @@ int determine_fw_capabilities(struct orinoco_private *priv)
 		}
 		break;
 	}
-	dev_info(dev, "Firmware determined as %s\n", priv->fw_name);
+	if (fw_name)
+		dev_info(dev, "Firmware determined as %s\n", fw_name);
 
 	return 0;
 }
diff --git a/drivers/net/wireless/orinoco/hw.h b/drivers/net/wireless/orinoco/hw.h
index 8df6e8752be6..e2f7fdc4d45a 100644
--- a/drivers/net/wireless/orinoco/hw.h
+++ b/drivers/net/wireless/orinoco/hw.h
@@ -24,7 +24,8 @@
 struct orinoco_private;
 struct dev_addr_list;
 
-int determine_fw_capabilities(struct orinoco_private *priv);
+int determine_fw_capabilities(struct orinoco_private *priv, char *fw_name,
+			      size_t fw_name_len, u32 *hw_ver);
 int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr);
 int orinoco_hw_allocate_fid(struct orinoco_private *priv);
 int orinoco_get_bitratemode(int bitrate, int automatic);
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index 7a32bcb0c037..753a1804eee7 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -83,7 +83,6 @@
 #include <linux/device.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
-#include <linux/ethtool.h>
 #include <linux/suspend.h>
 #include <linux/if_arp.h>
 #include <linux/wireless.h>
@@ -162,8 +161,6 @@ static const u8 encaps_hdr[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
 				 | HERMES_EV_WTERR | HERMES_EV_INFO \
 				 | HERMES_EV_INFDROP)
 
-static const struct ethtool_ops orinoco_ethtool_ops;
-
 /********************************************************************/
 /* Data types                                                       */
 /********************************************************************/
@@ -1994,7 +1991,9 @@ int orinoco_init(struct orinoco_private *priv)
 		goto out;
 	}
 
-	err = determine_fw_capabilities(priv);
+	err = determine_fw_capabilities(priv, wiphy->fw_version,
+					sizeof(wiphy->fw_version),
+					&wiphy->hw_version);
 	if (err != 0) {
 		dev_err(dev, "Incompatible firmware, aborting\n");
 		goto out;
@@ -2010,7 +2009,9 @@ int orinoco_init(struct orinoco_private *priv)
 			priv->do_fw_download = 0;
 
 		/* Check firmware version again */
-		err = determine_fw_capabilities(priv);
+		err = determine_fw_capabilities(priv, wiphy->fw_version,
+						sizeof(wiphy->fw_version),
+						&wiphy->hw_version);
 		if (err != 0) {
 			dev_err(dev, "Incompatible firmware, aborting\n");
 			goto out;
@@ -2212,7 +2213,6 @@ int orinoco_if_add(struct orinoco_private *priv,
 	dev->ieee80211_ptr = wdev;
 	dev->netdev_ops = &orinoco_netdev_ops;
 	dev->watchdog_timeo = HZ; /* 1 second timeout */
-	dev->ethtool_ops = &orinoco_ethtool_ops;
 	dev->wireless_handlers = &orinoco_handler_def;
 #ifdef WIRELESS_SPY
 	dev->wireless_data = &priv->wireless_data;
@@ -2225,6 +2225,7 @@ int orinoco_if_add(struct orinoco_private *priv,
 	netif_carrier_off(dev);
 
 	memcpy(dev->dev_addr, wiphy->perm_addr, ETH_ALEN);
+	memcpy(dev->perm_addr, wiphy->perm_addr, ETH_ALEN);
 
 	dev->base_addr = base_addr;
 	dev->irq = irq;
@@ -2348,27 +2349,6 @@ void orinoco_down(struct orinoco_private *priv)
 }
 EXPORT_SYMBOL(orinoco_down);
 
-static void orinoco_get_drvinfo(struct net_device *dev,
-				struct ethtool_drvinfo *info)
-{
-	struct orinoco_private *priv = ndev_priv(dev);
-
-	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver) - 1);
-	strncpy(info->version, DRIVER_VERSION, sizeof(info->version) - 1);
-	strncpy(info->fw_version, priv->fw_name, sizeof(info->fw_version) - 1);
-	if (dev->dev.parent)
-		strncpy(info->bus_info, dev_name(dev->dev.parent),
-			sizeof(info->bus_info) - 1);
-	else
-		snprintf(info->bus_info, sizeof(info->bus_info) - 1,
-			 "PCMCIA %p", priv->hw.iobase);
-}
-
-static const struct ethtool_ops orinoco_ethtool_ops = {
-	.get_drvinfo = orinoco_get_drvinfo,
-	.get_link = ethtool_op_get_link,
-};
-
 /********************************************************************/
 /* Module initialization                                            */
 /********************************************************************/
diff --git a/drivers/net/wireless/orinoco/orinoco.h b/drivers/net/wireless/orinoco/orinoco.h
index 9ac6f1dda4b0..665ef56f8382 100644
--- a/drivers/net/wireless/orinoco/orinoco.h
+++ b/drivers/net/wireless/orinoco/orinoco.h
@@ -93,7 +93,6 @@ struct orinoco_private {
 
 	/* Capabilities of the hardware/firmware */
 	fwtype_t firmware_type;
-	char fw_name[32];
 	int ibss_port;
 	int nicbuf_size;
 	u16 channel_mask;
diff --git a/drivers/net/wireless/p54/Kconfig b/drivers/net/wireless/p54/Kconfig
index b45d6a4ed1e8..b0342a520bf1 100644
--- a/drivers/net/wireless/p54/Kconfig
+++ b/drivers/net/wireless/p54/Kconfig
@@ -1,6 +1,6 @@
 config P54_COMMON
 	tristate "Softmac Prism54 support"
-	depends on MAC80211 && WLAN_80211 && EXPERIMENTAL
+	depends on MAC80211 && EXPERIMENTAL
 	select FW_LOADER
 	---help---
 	  This is common code for isl38xx/stlc45xx based modules.
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index 0efe67deedee..8e3818f6832e 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -126,7 +126,7 @@ static int p54_generate_band(struct ieee80211_hw *dev,
 	int ret = -ENOMEM;
 
 	if ((!list->entries) || (!list->band_channel_num[band]))
-		return 0;
+		return -EINVAL;
 
 	tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
 	if (!tmp)
@@ -158,6 +158,7 @@ static int p54_generate_band(struct ieee80211_hw *dev,
 			       (list->channels[i].data & CHAN_HAS_CURVE ? "" :
 				" [curve data]"),
 			       list->channels[i].index, list->channels[i].freq);
+			continue;
 		}
 
 		tmp->channels[j].band = list->channels[i].band;
@@ -165,7 +166,16 @@ static int p54_generate_band(struct ieee80211_hw *dev,
 		j++;
 	}
 
-	tmp->n_channels = list->band_channel_num[band];
+	if (j == 0) {
+		printk(KERN_ERR "%s: Disabling totally damaged %s band.\n",
+		       wiphy_name(dev->wiphy), (band == IEEE80211_BAND_2GHZ) ?
+		       "2 GHz" : "5 GHz");
+
+		ret = -ENODATA;
+		goto err_out;
+	}
+
+	tmp->n_channels = j;
 	old = priv->band_table[band];
 	priv->band_table[band] = tmp;
 	if (old) {
@@ -228,13 +238,13 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
 	struct p54_common *priv = dev->priv;
 	struct p54_channel_list *list;
 	unsigned int i, j, max_channel_num;
-	int ret = -ENOMEM;
+	int ret = 0;
 	u16 freq;
 
 	if ((priv->iq_autocal_len != priv->curve_data->entries) ||
 	    (priv->iq_autocal_len != priv->output_limit->entries))
-		printk(KERN_ERR "%s: EEPROM is damaged... you may not be able"
-				"to use all channels with this device.\n",
+		printk(KERN_ERR "%s: Unsupported or damaged EEPROM detected. "
+				"You may not be able to use all channels.\n",
 				wiphy_name(dev->wiphy));
 
 	max_channel_num = max_t(unsigned int, priv->output_limit->entries,
@@ -243,8 +253,10 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
 				priv->curve_data->entries);
 
 	list = kzalloc(sizeof(*list), GFP_KERNEL);
-	if (!list)
+	if (!list) {
+		ret = -ENOMEM;
 		goto free;
+	}
 
 	list->max_entries = max_channel_num;
 	list->channels = kzalloc(sizeof(struct p54_channel_entry) *
@@ -282,13 +294,8 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
 	     p54_compare_channels, NULL);
 
 	for (i = 0, j = 0; i < IEEE80211_NUM_BANDS; i++) {
-		if (list->band_channel_num[i]) {
-			ret = p54_generate_band(dev, list, i);
-			if (ret)
-				goto free;
-
+		if (p54_generate_band(dev, list, i) == 0)
 			j++;
-		}
 	}
 	if (j == 0) {
 		/* no useable band available. */
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 4d486bf9f725..18012dbfb45d 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -579,7 +579,7 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
 	 * For now, disable PS by default because it affects
 	 * link stability significantly.
 	 */
-	dev->wiphy->ps_default = false;
+	dev->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
 	mutex_init(&priv->conf_mutex);
 	mutex_init(&priv->eeprom_mutex);
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index d348c265e867..a15962a19b2a 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -411,7 +411,7 @@ static int p54p_open(struct ieee80211_hw *dev)
 	int err;
 
 	init_completion(&priv->boot_comp);
-	err = request_irq(priv->pdev->irq, &p54p_interrupt,
+	err = request_irq(priv->pdev->irq, p54p_interrupt,
 			  IRQF_SHARED, "p54pci", dev);
 	if (err) {
 		dev_err(&priv->pdev->dev, "failed to register IRQ handler\n");
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 2505be56ae39..a3ba3539db02 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -41,6 +41,9 @@
 #define ISL3877_IMAGE_FILE	"isl3877"
 #define ISL3886_IMAGE_FILE	"isl3886"
 #define ISL3890_IMAGE_FILE	"isl3890"
+MODULE_FIRMWARE(ISL3877_IMAGE_FILE);
+MODULE_FIRMWARE(ISL3886_IMAGE_FILE);
+MODULE_FIRMWARE(ISL3890_IMAGE_FILE);
 
 static int prism54_bring_down(islpci_private *);
 static int islpci_alloc_memory(islpci_private *);
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index 83d366258c81..e4f2bb7368f2 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -181,7 +181,7 @@ prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	isl38xx_disable_interrupts(priv->device_base);
 
 	/* request for the interrupt before uploading the firmware */
-	rvalue = request_irq(pdev->irq, &islpci_interrupt,
+	rvalue = request_irq(pdev->irq, islpci_interrupt,
 			     IRQF_SHARED, ndev->name, priv);
 
 	if (rvalue) {
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 5b8e3e4cdd9f..88e1e4e32b22 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -2854,18 +2854,8 @@ static int build_auth_frame(ray_dev_t *local, UCHAR *dest, int auth_type)
 
 /*===========================================================================*/
 #ifdef CONFIG_PROC_FS
-static void raycs_write(const char *name, write_proc_t *w, void *data)
-{
-	struct proc_dir_entry *entry =
-	    create_proc_entry(name, S_IFREG | S_IWUSR, NULL);
-	if (entry) {
-		entry->write_proc = w;
-		entry->data = data;
-	}
-}
-
-static int write_essid(struct file *file, const char __user *buffer,
-		       unsigned long count, void *data)
+static ssize_t ray_cs_essid_proc_write(struct file *file,
+		const char __user *buffer, size_t count, loff_t *pos)
 {
 	static char proc_essid[33];
 	unsigned int len = count;
@@ -2879,8 +2869,13 @@ static int write_essid(struct file *file, const char __user *buffer,
 	return count;
 }
 
-static int write_int(struct file *file, const char __user *buffer,
-		     unsigned long count, void *data)
+static const struct file_operations ray_cs_essid_proc_fops = {
+	.owner		= THIS_MODULE,
+	.write		= ray_cs_essid_proc_write,
+};
+
+static ssize_t int_proc_write(struct file *file, const char __user *buffer,
+			      size_t count, loff_t *pos)
 {
 	static char proc_number[10];
 	char *p;
@@ -2903,9 +2898,14 @@ static int write_int(struct file *file, const char __user *buffer,
 		nr = nr * 10 + c;
 		p++;
 	} while (--len);
-	*(int *)data = nr;
+	*(int *)PDE(file->f_path.dentry->d_inode)->data = nr;
 	return count;
 }
+
+static const struct file_operations int_proc_fops = {
+	.owner		= THIS_MODULE,
+	.write		= int_proc_write,
+};
 #endif
 
 static struct pcmcia_device_id ray_ids[] = {
@@ -2940,9 +2940,9 @@ static int __init init_ray_cs(void)
 	proc_mkdir("driver/ray_cs", NULL);
 
 	proc_create("driver/ray_cs/ray_cs", 0, NULL, &ray_cs_proc_fops);
-	raycs_write("driver/ray_cs/essid", write_essid, NULL);
-	raycs_write("driver/ray_cs/net_type", write_int, &net_type);
-	raycs_write("driver/ray_cs/translate", write_int, &translate);
+	proc_create("driver/ray_cs/essid", S_IWUSR, NULL, &ray_cs_essid_proc_fops);
+	proc_create_data("driver/ray_cs/net_type", S_IWUSR, NULL, &int_proc_fops, &net_type);
+	proc_create_data("driver/ray_cs/translate", S_IWUSR, NULL, &int_proc_fops, &translate);
 #endif
 	if (translate != 0)
 		translate = 1;
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 54175b6fa86c..2ecbedb26e15 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -83,11 +83,11 @@ MODULE_PARM_DESC(roamdelta,
 	"set roaming tendency: 0=aggressive, 1=moderate, "
 				"2=conservative (default: moderate)");
 
-static int modparam_workaround_interval = 500;
+static int modparam_workaround_interval;
 module_param_named(workaround_interval, modparam_workaround_interval,
 							int, 0444);
 MODULE_PARM_DESC(workaround_interval,
-	"set stall workaround interval in msecs (default: 500)");
+	"set stall workaround interval in msecs (0=disabled) (default: 0)");
 
 
 /* various RNDIS OID defs */
@@ -733,12 +733,13 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
 			le32_to_cpu(u.get_c->status));
 
 	if (ret == 0) {
+		memcpy(data, u.buf + le32_to_cpu(u.get_c->offset) + 8, *len);
+
 		ret = le32_to_cpu(u.get_c->len);
 		if (ret > *len)
 			*len = ret;
-		memcpy(data, u.buf + le32_to_cpu(u.get_c->offset) + 8, *len);
-		ret = rndis_error_status(u.get_c->status);
 
+		ret = rndis_error_status(u.get_c->status);
 		if (ret < 0)
 			devdbg(dev, "rndis_query_oid(%s): device returned "
 				"error,  0x%08x (%d)", oid_to_string(oid),
@@ -1072,6 +1073,8 @@ static int set_auth_mode(struct usbnet *usbdev, u32 wpa_version,
 		auth_mode = NDIS_80211_AUTH_SHARED;
 	else if (auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM)
 		auth_mode = NDIS_80211_AUTH_OPEN;
+	else if (auth_type == NL80211_AUTHTYPE_AUTOMATIC)
+		auth_mode = NDIS_80211_AUTH_AUTO_SWITCH;
 	else
 		return -ENOTSUPP;
 
@@ -2547,7 +2550,7 @@ static void rndis_device_poller(struct work_struct *work)
 	/* Workaround transfer stalls on poor quality links.
 	 * TODO: find right way to fix these stalls (as stalls do not happen
 	 * with ndiswrapper/windows driver). */
-	if (priv->last_qual <= 25) {
+	if (priv->param_workaround_interval > 0 && priv->last_qual <= 25) {
 		/* Decrease stats worker interval to catch stalls.
 		 * faster. Faster than 400-500ms causes packet loss,
 		 * Slower doesn't catch stalls fast enough.
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index ed1f997e3521..bf60689aaabb 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -1,6 +1,6 @@
 menuconfig RT2X00
 	tristate "Ralink driver support"
-	depends on MAC80211 && WLAN_80211
+	depends on MAC80211
 	---help---
 	  This will enable the support for the Ralink drivers,
 	  developed in the rt2x00 project <http://rt2x00.serialmonkey.com>.
@@ -53,6 +53,36 @@ config RT61PCI
 
 	  When compiled as a module, this driver will be called rt61pci.
 
+config RT2800PCI_PCI
+	tristate
+	depends on PCI
+	default y
+
+config RT2800PCI_SOC
+	tristate
+	depends on RALINK_RT288X || RALINK_RT305X
+	default y
+
+config RT2800PCI
+	tristate "Ralink rt2800 (PCI/PCMCIA) support (VERY EXPERIMENTAL)"
+	depends on (RT2800PCI_PCI || RT2800PCI_SOC) && EXPERIMENTAL
+	select RT2800_LIB
+	select RT2X00_LIB_PCI if RT2800PCI_PCI
+	select RT2X00_LIB_SOC if RT2800PCI_SOC
+	select RT2X00_LIB_HT
+	select RT2X00_LIB_FIRMWARE
+	select RT2X00_LIB_CRYPTO
+	select CRC_CCITT
+	select EEPROM_93CX6
+	---help---
+	  This adds support for rt2800 wireless chipset family.
+	  Supported chips: RT2760, RT2790, RT2860, RT2880, RT2890 & RT3052
+
+	  This driver is non-functional at the moment and is intended for
+	  developers.
+
+	  When compiled as a module, this driver will be called "rt2800pci.ko".
+
 config RT2500USB
 	tristate "Ralink rt2500 (USB) support"
 	depends on USB
@@ -78,8 +108,9 @@ config RT73USB
 	  When compiled as a module, this driver will be called rt73usb.
 
 config RT2800USB
-	tristate "Ralink rt2800 (USB) support"
+	tristate "Ralink rt2800 (USB) support (EXPERIMENTAL)"
 	depends on USB && EXPERIMENTAL
+	select RT2800_LIB
 	select RT2X00_LIB_USB
 	select RT2X00_LIB_HT
 	select RT2X00_LIB_FIRMWARE
@@ -89,12 +120,23 @@ config RT2800USB
 	  This adds experimental support for rt2800 wireless chipset family.
 	  Supported chips: RT2770, RT2870 & RT3070.
 
+	  Known issues:
+	  - support for RT2870 chips doesn't work with 802.11n APs yet
+	  - support for RT3070 chips is non-functional at the moment
+
 	  When compiled as a module, this driver will be called "rt2800usb.ko".
 
+config RT2800_LIB
+	tristate
+
 config RT2X00_LIB_PCI
 	tristate
 	select RT2X00_LIB
 
+config RT2X00_LIB_SOC
+	tristate
+	select RT2X00_LIB
+
 config RT2X00_LIB_USB
 	tristate
 	select RT2X00_LIB
diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile
index 13043ea97667..971339858297 100644
--- a/drivers/net/wireless/rt2x00/Makefile
+++ b/drivers/net/wireless/rt2x00/Makefile
@@ -11,10 +11,13 @@ rt2x00lib-$(CONFIG_RT2X00_LIB_HT)	+= rt2x00ht.o
 
 obj-$(CONFIG_RT2X00_LIB)		+= rt2x00lib.o
 obj-$(CONFIG_RT2X00_LIB_PCI)		+= rt2x00pci.o
+obj-$(CONFIG_RT2X00_LIB_SOC)		+= rt2x00soc.o
 obj-$(CONFIG_RT2X00_LIB_USB)		+= rt2x00usb.o
+obj-$(CONFIG_RT2800_LIB)		+= rt2800lib.o
 obj-$(CONFIG_RT2400PCI)			+= rt2400pci.o
 obj-$(CONFIG_RT2500PCI)			+= rt2500pci.o
 obj-$(CONFIG_RT61PCI)			+= rt61pci.o
+obj-$(CONFIG_RT2800PCI)			+= rt2800pci.o
 obj-$(CONFIG_RT2500USB)			+= rt2500usb.o
 obj-$(CONFIG_RT73USB)			+= rt73usb.o
 obj-$(CONFIG_RT2800USB)			+= rt2800usb.o
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 798f625e38f7..e7f46405a418 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1,5 +1,5 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
@@ -1341,6 +1341,7 @@ static int rt2400pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
 	value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
 	rt2x00pci_register_read(rt2x00dev, CSR0, &reg);
 	rt2x00_set_chip_rf(rt2x00dev, value, reg);
+	rt2x00_print_chip(rt2x00dev);
 
 	if (!rt2x00_rf(&rt2x00dev->chip, RF2420) &&
 	    !rt2x00_rf(&rt2x00dev->chip, RF2421)) {
@@ -1431,7 +1432,6 @@ static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 			       IEEE80211_HW_SIGNAL_DBM |
 			       IEEE80211_HW_SUPPORTS_PS |
 			       IEEE80211_HW_PS_NULLFUNC_STACK;
-	rt2x00dev->hw->extra_tx_headroom = 0;
 
 	SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
 	SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
@@ -1622,20 +1622,21 @@ static const struct data_queue_desc rt2400pci_queue_atim = {
 };
 
 static const struct rt2x00_ops rt2400pci_ops = {
-	.name		= KBUILD_MODNAME,
-	.max_sta_intf	= 1,
-	.max_ap_intf	= 1,
-	.eeprom_size	= EEPROM_SIZE,
-	.rf_size	= RF_SIZE,
-	.tx_queues	= NUM_TX_QUEUES,
-	.rx		= &rt2400pci_queue_rx,
-	.tx		= &rt2400pci_queue_tx,
-	.bcn		= &rt2400pci_queue_bcn,
-	.atim		= &rt2400pci_queue_atim,
-	.lib		= &rt2400pci_rt2x00_ops,
-	.hw		= &rt2400pci_mac80211_ops,
+	.name			= KBUILD_MODNAME,
+	.max_sta_intf		= 1,
+	.max_ap_intf		= 1,
+	.eeprom_size		= EEPROM_SIZE,
+	.rf_size		= RF_SIZE,
+	.tx_queues		= NUM_TX_QUEUES,
+	.extra_tx_headroom	= 0,
+	.rx			= &rt2400pci_queue_rx,
+	.tx			= &rt2400pci_queue_tx,
+	.bcn			= &rt2400pci_queue_bcn,
+	.atim			= &rt2400pci_queue_atim,
+	.lib			= &rt2400pci_rt2x00_ops,
+	.hw			= &rt2400pci_mac80211_ops,
 #ifdef CONFIG_RT2X00_LIB_DEBUGFS
-	.debugfs	= &rt2400pci_rt2x00debug,
+	.debugfs		= &rt2400pci_rt2x00debug,
 #endif /* CONFIG_RT2X00_LIB_DEBUGFS */
 };
 
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h
index ccd644104ad1..6c21ef66dfe0 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.h
+++ b/drivers/net/wireless/rt2x00/rt2400pci.h
@@ -1,5 +1,5 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 2e872ac69826..408fcfc120f5 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1,5 +1,5 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
@@ -1505,6 +1505,7 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
 	value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
 	rt2x00pci_register_read(rt2x00dev, CSR0, &reg);
 	rt2x00_set_chip_rf(rt2x00dev, value, reg);
+	rt2x00_print_chip(rt2x00dev);
 
 	if (!rt2x00_rf(&rt2x00dev->chip, RF2522) &&
 	    !rt2x00_rf(&rt2x00dev->chip, RF2523) &&
@@ -1732,8 +1733,6 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 			       IEEE80211_HW_SUPPORTS_PS |
 			       IEEE80211_HW_PS_NULLFUNC_STACK;
 
-	rt2x00dev->hw->extra_tx_headroom = 0;
-
 	SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
 	SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
 				rt2x00_eeprom_addr(rt2x00dev,
@@ -1921,20 +1920,21 @@ static const struct data_queue_desc rt2500pci_queue_atim = {
 };
 
 static const struct rt2x00_ops rt2500pci_ops = {
-	.name		= KBUILD_MODNAME,
-	.max_sta_intf	= 1,
-	.max_ap_intf	= 1,
-	.eeprom_size	= EEPROM_SIZE,
-	.rf_size	= RF_SIZE,
-	.tx_queues	= NUM_TX_QUEUES,
-	.rx		= &rt2500pci_queue_rx,
-	.tx		= &rt2500pci_queue_tx,
-	.bcn		= &rt2500pci_queue_bcn,
-	.atim		= &rt2500pci_queue_atim,
-	.lib		= &rt2500pci_rt2x00_ops,
-	.hw		= &rt2500pci_mac80211_ops,
+	.name			= KBUILD_MODNAME,
+	.max_sta_intf		= 1,
+	.max_ap_intf		= 1,
+	.eeprom_size		= EEPROM_SIZE,
+	.rf_size		= RF_SIZE,
+	.tx_queues		= NUM_TX_QUEUES,
+	.extra_tx_headroom	= 0,
+	.rx			= &rt2500pci_queue_rx,
+	.tx			= &rt2500pci_queue_tx,
+	.bcn			= &rt2500pci_queue_bcn,
+	.atim			= &rt2500pci_queue_atim,
+	.lib			= &rt2500pci_rt2x00_ops,
+	.hw			= &rt2500pci_mac80211_ops,
 #ifdef CONFIG_RT2X00_LIB_DEBUGFS
-	.debugfs	= &rt2500pci_rt2x00debug,
+	.debugfs		= &rt2500pci_rt2x00debug,
 #endif /* CONFIG_RT2X00_LIB_DEBUGFS */
 };
 
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.h b/drivers/net/wireless/rt2x00/rt2500pci.h
index 54d37957883c..b0075674c09b 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.h
+++ b/drivers/net/wireless/rt2x00/rt2500pci.h
@@ -1,5 +1,5 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 22dd6d9e2981..83f2592c59de 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1,5 +1,5 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
@@ -716,139 +716,6 @@ static void rt2500usb_reset_tuner(struct rt2x00_dev *rt2x00dev,
 }
 
 /*
- * NOTE: This function is directly ported from legacy driver, but
- * despite it being declared it was never called. Although link tuning
- * sounds like a good idea, and usually works well for the other drivers,
- * it does _not_ work with rt2500usb. Enabling this function will result
- * in TX capabilities only until association kicks in. Immediately
- * after the successful association all TX frames will be kept in the
- * hardware queue and never transmitted.
- */
-#if 0
-static void rt2500usb_link_tuner(struct rt2x00_dev *rt2x00dev)
-{
-	int rssi = rt2x00_get_link_rssi(&rt2x00dev->link);
-	u16 bbp_thresh;
-	u16 vgc_bound;
-	u16 sens;
-	u16 r24;
-	u16 r25;
-	u16 r61;
-	u16 r17_sens;
-	u8 r17;
-	u8 up_bound;
-	u8 low_bound;
-
-	/*
-	 * Read current r17 value, as well as the sensitivity values
-	 * for the r17 register.
-	 */
-	rt2500usb_bbp_read(rt2x00dev, 17, &r17);
-	rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R17, &r17_sens);
-
-	rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC, &vgc_bound);
-	up_bound = rt2x00_get_field16(vgc_bound, EEPROM_BBPTUNE_VGCUPPER);
-	low_bound = rt2x00_get_field16(vgc_bound, EEPROM_BBPTUNE_VGCLOWER);
-
-	/*
-	 * If we are not associated, we should go straight to the
-	 * dynamic CCA tuning.
-	 */
-	if (!rt2x00dev->intf_associated)
-		goto dynamic_cca_tune;
-
-	/*
-	 * Determine the BBP tuning threshold and correctly
-	 * set BBP 24, 25 and 61.
-	 */
-	rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE, &bbp_thresh);
-	bbp_thresh = rt2x00_get_field16(bbp_thresh, EEPROM_BBPTUNE_THRESHOLD);
-
-	rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24, &r24);
-	rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R25, &r25);
-	rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R61, &r61);
-
-	if ((rssi + bbp_thresh) > 0) {
-		r24 = rt2x00_get_field16(r24, EEPROM_BBPTUNE_R24_HIGH);
-		r25 = rt2x00_get_field16(r25, EEPROM_BBPTUNE_R25_HIGH);
-		r61 = rt2x00_get_field16(r61, EEPROM_BBPTUNE_R61_HIGH);
-	} else {
-		r24 = rt2x00_get_field16(r24, EEPROM_BBPTUNE_R24_LOW);
-		r25 = rt2x00_get_field16(r25, EEPROM_BBPTUNE_R25_LOW);
-		r61 = rt2x00_get_field16(r61, EEPROM_BBPTUNE_R61_LOW);
-	}
-
-	rt2500usb_bbp_write(rt2x00dev, 24, r24);
-	rt2500usb_bbp_write(rt2x00dev, 25, r25);
-	rt2500usb_bbp_write(rt2x00dev, 61, r61);
-
-	/*
-	 * A too low RSSI will cause too much false CCA which will
-	 * then corrupt the R17 tuning. To remidy this the tuning should
-	 * be stopped (While making sure the R17 value will not exceed limits)
-	 */
-	if (rssi >= -40) {
-		if (r17 != 0x60)
-			rt2500usb_bbp_write(rt2x00dev, 17, 0x60);
-		return;
-	}
-
-	/*
-	 * Special big-R17 for short distance
-	 */
-	if (rssi >= -58) {
-		sens = rt2x00_get_field16(r17_sens, EEPROM_BBPTUNE_R17_LOW);
-		if (r17 != sens)
-			rt2500usb_bbp_write(rt2x00dev, 17, sens);
-		return;
-	}
-
-	/*
-	 * Special mid-R17 for middle distance
-	 */
-	if (rssi >= -74) {
-		sens = rt2x00_get_field16(r17_sens, EEPROM_BBPTUNE_R17_HIGH);
-		if (r17 != sens)
-			rt2500usb_bbp_write(rt2x00dev, 17, sens);
-		return;
-	}
-
-	/*
-	 * Leave short or middle distance condition, restore r17
-	 * to the dynamic tuning range.
-	 */
-	low_bound = 0x32;
-	if (rssi < -77)
-		up_bound -= (-77 - rssi);
-
-	if (up_bound < low_bound)
-		up_bound = low_bound;
-
-	if (r17 > up_bound) {
-		rt2500usb_bbp_write(rt2x00dev, 17, up_bound);
-		rt2x00dev->link.vgc_level = up_bound;
-		return;
-	}
-
-dynamic_cca_tune:
-
-	/*
-	 * R17 is inside the dynamic tuning range,
-	 * start tuning the link based on the false cca counter.
-	 */
-	if (rt2x00dev->link.qual.false_cca > 512 && r17 < up_bound) {
-		rt2500usb_bbp_write(rt2x00dev, 17, ++r17);
-		rt2x00dev->link.vgc_level = r17;
-	} else if (rt2x00dev->link.qual.false_cca < 100 && r17 > low_bound) {
-		rt2500usb_bbp_write(rt2x00dev, 17, --r17);
-		rt2x00dev->link.vgc_level = r17;
-	}
-}
-#else
-#define rt2500usb_link_tuner	NULL
-#endif
-
-/*
  * Initialization functions.
  */
 static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev)
@@ -1542,6 +1409,7 @@ static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
 	value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
 	rt2500usb_register_read(rt2x00dev, MAC_CSR0, &reg);
 	rt2x00_set_chip(rt2x00dev, RT2570, value, reg);
+	rt2x00_print_chip(rt2x00dev);
 
 	if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0) ||
 	    rt2x00_check_rev(&rt2x00dev->chip, 0x0000000f, 0)) {
@@ -1788,8 +1656,6 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 	    IEEE80211_HW_SUPPORTS_PS |
 	    IEEE80211_HW_PS_NULLFUNC_STACK;
 
-	rt2x00dev->hw->extra_tx_headroom = TXD_DESC_SIZE;
-
 	SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
 	SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
 				rt2x00_eeprom_addr(rt2x00dev,
@@ -1910,7 +1776,6 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
 	.rfkill_poll		= rt2500usb_rfkill_poll,
 	.link_stats		= rt2500usb_link_stats,
 	.reset_tuner		= rt2500usb_reset_tuner,
-	.link_tuner		= rt2500usb_link_tuner,
 	.write_tx_desc		= rt2500usb_write_tx_desc,
 	.write_tx_data		= rt2x00usb_write_tx_data,
 	.write_beacon		= rt2500usb_write_beacon,
@@ -1956,20 +1821,21 @@ static const struct data_queue_desc rt2500usb_queue_atim = {
 };
 
 static const struct rt2x00_ops rt2500usb_ops = {
-	.name		= KBUILD_MODNAME,
-	.max_sta_intf	= 1,
-	.max_ap_intf	= 1,
-	.eeprom_size	= EEPROM_SIZE,
-	.rf_size	= RF_SIZE,
-	.tx_queues	= NUM_TX_QUEUES,
-	.rx		= &rt2500usb_queue_rx,
-	.tx		= &rt2500usb_queue_tx,
-	.bcn		= &rt2500usb_queue_bcn,
-	.atim		= &rt2500usb_queue_atim,
-	.lib		= &rt2500usb_rt2x00_ops,
-	.hw		= &rt2500usb_mac80211_ops,
+	.name			= KBUILD_MODNAME,
+	.max_sta_intf		= 1,
+	.max_ap_intf		= 1,
+	.eeprom_size		= EEPROM_SIZE,
+	.rf_size		= RF_SIZE,
+	.tx_queues		= NUM_TX_QUEUES,
+	.extra_tx_headroom	= TXD_DESC_SIZE,
+	.rx			= &rt2500usb_queue_rx,
+	.tx			= &rt2500usb_queue_tx,
+	.bcn			= &rt2500usb_queue_bcn,
+	.atim			= &rt2500usb_queue_atim,
+	.lib			= &rt2500usb_rt2x00_ops,
+	.hw			= &rt2500usb_mac80211_ops,
 #ifdef CONFIG_RT2X00_LIB_DEBUGFS
-	.debugfs	= &rt2500usb_rt2x00debug,
+	.debugfs		= &rt2500usb_rt2x00debug,
 #endif /* CONFIG_RT2X00_LIB_DEBUGFS */
 };
 
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h
index b01edca42583..341a70454635 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.h
+++ b/drivers/net/wireless/rt2x00/rt2500usb.h
@@ -1,5 +1,5 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
new file mode 100644
index 000000000000..c5fe867665e6
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -0,0 +1,1852 @@
+/*
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
+	Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
+	Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
+	Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
+	Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
+	Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
+	Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
+	Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
+	<http://rt2x00.serialmonkey.com>
+
+	This program is free software; you can redistribute it and/or modify
+	it under the terms of the GNU General Public License as published by
+	the Free Software Foundation; either version 2 of the License, or
+	(at your option) any later version.
+
+	This program is distributed in the hope that it will be useful,
+	but WITHOUT ANY WARRANTY; without even the implied warranty of
+	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+	GNU General Public License for more details.
+
+	You should have received a copy of the GNU General Public License
+	along with this program; if not, write to the
+	Free Software Foundation, Inc.,
+	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+	Module: rt2800
+	Abstract: Data structures and registers for the rt2800 modules.
+	Supported chipsets: RT2800E, RT2800ED & RT2800U.
+ */
+
+#ifndef RT2800_H
+#define RT2800_H
+
+/*
+ * RF chip defines.
+ *
+ * RF2820 2.4G 2T3R
+ * RF2850 2.4G/5G 2T3R
+ * RF2720 2.4G 1T2R
+ * RF2750 2.4G/5G 1T2R
+ * RF3020 2.4G 1T1R
+ * RF2020 2.4G B/G
+ * RF3021 2.4G 1T2R
+ * RF3022 2.4G 2T2R
+ * RF3052 2.4G 2T2R
+ */
+#define RF2820				0x0001
+#define RF2850				0x0002
+#define RF2720				0x0003
+#define RF2750				0x0004
+#define RF3020				0x0005
+#define RF2020				0x0006
+#define RF3021				0x0007
+#define RF3022				0x0008
+#define RF3052				0x0009
+
+/*
+ * Chipset version.
+ */
+#define RT2860C_VERSION			0x28600100
+#define RT2860D_VERSION			0x28600101
+#define RT2880E_VERSION			0x28720200
+#define RT2883_VERSION			0x28830300
+#define RT3070_VERSION			0x30700200
+
+/*
+ * Signal information.
+ * Default offset is required for RSSI <-> dBm conversion.
+ */
+#define DEFAULT_RSSI_OFFSET		120 /* FIXME */
+
+/*
+ * Register layout information.
+ */
+#define CSR_REG_BASE			0x1000
+#define CSR_REG_SIZE			0x0800
+#define EEPROM_BASE			0x0000
+#define EEPROM_SIZE			0x0110
+#define BBP_BASE			0x0000
+#define BBP_SIZE			0x0080
+#define RF_BASE				0x0004
+#define RF_SIZE				0x0010
+
+/*
+ * Number of TX queues.
+ */
+#define NUM_TX_QUEUES			4
+
+/*
+ * USB registers.
+ */
+
+/*
+ * INT_SOURCE_CSR: Interrupt source register.
+ * Write one to clear corresponding bit.
+ * TX_FIFO_STATUS: FIFO Statistics is full, sw should read 0x171c
+ */
+#define INT_SOURCE_CSR			0x0200
+#define INT_SOURCE_CSR_RXDELAYINT	FIELD32(0x00000001)
+#define INT_SOURCE_CSR_TXDELAYINT	FIELD32(0x00000002)
+#define INT_SOURCE_CSR_RX_DONE		FIELD32(0x00000004)
+#define INT_SOURCE_CSR_AC0_DMA_DONE	FIELD32(0x00000008)
+#define INT_SOURCE_CSR_AC1_DMA_DONE	FIELD32(0x00000010)
+#define INT_SOURCE_CSR_AC2_DMA_DONE	FIELD32(0x00000020)
+#define INT_SOURCE_CSR_AC3_DMA_DONE	FIELD32(0x00000040)
+#define INT_SOURCE_CSR_HCCA_DMA_DONE	FIELD32(0x00000080)
+#define INT_SOURCE_CSR_MGMT_DMA_DONE	FIELD32(0x00000100)
+#define INT_SOURCE_CSR_MCU_COMMAND	FIELD32(0x00000200)
+#define INT_SOURCE_CSR_RXTX_COHERENT	FIELD32(0x00000400)
+#define INT_SOURCE_CSR_TBTT		FIELD32(0x00000800)
+#define INT_SOURCE_CSR_PRE_TBTT		FIELD32(0x00001000)
+#define INT_SOURCE_CSR_TX_FIFO_STATUS	FIELD32(0x00002000)
+#define INT_SOURCE_CSR_AUTO_WAKEUP	FIELD32(0x00004000)
+#define INT_SOURCE_CSR_GPTIMER		FIELD32(0x00008000)
+#define INT_SOURCE_CSR_RX_COHERENT	FIELD32(0x00010000)
+#define INT_SOURCE_CSR_TX_COHERENT	FIELD32(0x00020000)
+
+/*
+ * INT_MASK_CSR: Interrupt MASK register. 1: the interrupt is mask OFF.
+ */
+#define INT_MASK_CSR			0x0204
+#define INT_MASK_CSR_RXDELAYINT		FIELD32(0x00000001)
+#define INT_MASK_CSR_TXDELAYINT		FIELD32(0x00000002)
+#define INT_MASK_CSR_RX_DONE		FIELD32(0x00000004)
+#define INT_MASK_CSR_AC0_DMA_DONE	FIELD32(0x00000008)
+#define INT_MASK_CSR_AC1_DMA_DONE	FIELD32(0x00000010)
+#define INT_MASK_CSR_AC2_DMA_DONE	FIELD32(0x00000020)
+#define INT_MASK_CSR_AC3_DMA_DONE	FIELD32(0x00000040)
+#define INT_MASK_CSR_HCCA_DMA_DONE	FIELD32(0x00000080)
+#define INT_MASK_CSR_MGMT_DMA_DONE	FIELD32(0x00000100)
+#define INT_MASK_CSR_MCU_COMMAND	FIELD32(0x00000200)
+#define INT_MASK_CSR_RXTX_COHERENT	FIELD32(0x00000400)
+#define INT_MASK_CSR_TBTT		FIELD32(0x00000800)
+#define INT_MASK_CSR_PRE_TBTT		FIELD32(0x00001000)
+#define INT_MASK_CSR_TX_FIFO_STATUS	FIELD32(0x00002000)
+#define INT_MASK_CSR_AUTO_WAKEUP	FIELD32(0x00004000)
+#define INT_MASK_CSR_GPTIMER		FIELD32(0x00008000)
+#define INT_MASK_CSR_RX_COHERENT	FIELD32(0x00010000)
+#define INT_MASK_CSR_TX_COHERENT	FIELD32(0x00020000)
+
+/*
+ * WPDMA_GLO_CFG
+ */
+#define WPDMA_GLO_CFG 			0x0208
+#define WPDMA_GLO_CFG_ENABLE_TX_DMA	FIELD32(0x00000001)
+#define WPDMA_GLO_CFG_TX_DMA_BUSY    	FIELD32(0x00000002)
+#define WPDMA_GLO_CFG_ENABLE_RX_DMA	FIELD32(0x00000004)
+#define WPDMA_GLO_CFG_RX_DMA_BUSY	FIELD32(0x00000008)
+#define WPDMA_GLO_CFG_WP_DMA_BURST_SIZE	FIELD32(0x00000030)
+#define WPDMA_GLO_CFG_TX_WRITEBACK_DONE	FIELD32(0x00000040)
+#define WPDMA_GLO_CFG_BIG_ENDIAN	FIELD32(0x00000080)
+#define WPDMA_GLO_CFG_RX_HDR_SCATTER	FIELD32(0x0000ff00)
+#define WPDMA_GLO_CFG_HDR_SEG_LEN	FIELD32(0xffff0000)
+
+/*
+ * WPDMA_RST_IDX
+ */
+#define WPDMA_RST_IDX 			0x020c
+#define WPDMA_RST_IDX_DTX_IDX0		FIELD32(0x00000001)
+#define WPDMA_RST_IDX_DTX_IDX1		FIELD32(0x00000002)
+#define WPDMA_RST_IDX_DTX_IDX2		FIELD32(0x00000004)
+#define WPDMA_RST_IDX_DTX_IDX3		FIELD32(0x00000008)
+#define WPDMA_RST_IDX_DTX_IDX4		FIELD32(0x00000010)
+#define WPDMA_RST_IDX_DTX_IDX5		FIELD32(0x00000020)
+#define WPDMA_RST_IDX_DRX_IDX0		FIELD32(0x00010000)
+
+/*
+ * DELAY_INT_CFG
+ */
+#define DELAY_INT_CFG			0x0210
+#define DELAY_INT_CFG_RXMAX_PTIME	FIELD32(0x000000ff)
+#define DELAY_INT_CFG_RXMAX_PINT	FIELD32(0x00007f00)
+#define DELAY_INT_CFG_RXDLY_INT_EN	FIELD32(0x00008000)
+#define DELAY_INT_CFG_TXMAX_PTIME	FIELD32(0x00ff0000)
+#define DELAY_INT_CFG_TXMAX_PINT	FIELD32(0x7f000000)
+#define DELAY_INT_CFG_TXDLY_INT_EN	FIELD32(0x80000000)
+
+/*
+ * WMM_AIFSN_CFG: Aifsn for each EDCA AC
+ * AIFSN0: AC_BE
+ * AIFSN1: AC_BK
+ * AIFSN2: AC_VI
+ * AIFSN3: AC_VO
+ */
+#define WMM_AIFSN_CFG			0x0214
+#define WMM_AIFSN_CFG_AIFSN0		FIELD32(0x0000000f)
+#define WMM_AIFSN_CFG_AIFSN1		FIELD32(0x000000f0)
+#define WMM_AIFSN_CFG_AIFSN2		FIELD32(0x00000f00)
+#define WMM_AIFSN_CFG_AIFSN3		FIELD32(0x0000f000)
+
+/*
+ * WMM_CWMIN_CSR: CWmin for each EDCA AC
+ * CWMIN0: AC_BE
+ * CWMIN1: AC_BK
+ * CWMIN2: AC_VI
+ * CWMIN3: AC_VO
+ */
+#define WMM_CWMIN_CFG			0x0218
+#define WMM_CWMIN_CFG_CWMIN0		FIELD32(0x0000000f)
+#define WMM_CWMIN_CFG_CWMIN1		FIELD32(0x000000f0)
+#define WMM_CWMIN_CFG_CWMIN2		FIELD32(0x00000f00)
+#define WMM_CWMIN_CFG_CWMIN3		FIELD32(0x0000f000)
+
+/*
+ * WMM_CWMAX_CSR: CWmax for each EDCA AC
+ * CWMAX0: AC_BE
+ * CWMAX1: AC_BK
+ * CWMAX2: AC_VI
+ * CWMAX3: AC_VO
+ */
+#define WMM_CWMAX_CFG			0x021c
+#define WMM_CWMAX_CFG_CWMAX0		FIELD32(0x0000000f)
+#define WMM_CWMAX_CFG_CWMAX1		FIELD32(0x000000f0)
+#define WMM_CWMAX_CFG_CWMAX2		FIELD32(0x00000f00)
+#define WMM_CWMAX_CFG_CWMAX3		FIELD32(0x0000f000)
+
+/*
+ * AC_TXOP0: AC_BK/AC_BE TXOP register
+ * AC0TXOP: AC_BK in unit of 32us
+ * AC1TXOP: AC_BE in unit of 32us
+ */
+#define WMM_TXOP0_CFG			0x0220
+#define WMM_TXOP0_CFG_AC0TXOP		FIELD32(0x0000ffff)
+#define WMM_TXOP0_CFG_AC1TXOP		FIELD32(0xffff0000)
+
+/*
+ * AC_TXOP1: AC_VO/AC_VI TXOP register
+ * AC2TXOP: AC_VI in unit of 32us
+ * AC3TXOP: AC_VO in unit of 32us
+ */
+#define WMM_TXOP1_CFG			0x0224
+#define WMM_TXOP1_CFG_AC2TXOP		FIELD32(0x0000ffff)
+#define WMM_TXOP1_CFG_AC3TXOP		FIELD32(0xffff0000)
+
+/*
+ * GPIO_CTRL_CFG:
+ */
+#define GPIO_CTRL_CFG			0x0228
+#define GPIO_CTRL_CFG_BIT0		FIELD32(0x00000001)
+#define GPIO_CTRL_CFG_BIT1		FIELD32(0x00000002)
+#define GPIO_CTRL_CFG_BIT2		FIELD32(0x00000004)
+#define GPIO_CTRL_CFG_BIT3		FIELD32(0x00000008)
+#define GPIO_CTRL_CFG_BIT4		FIELD32(0x00000010)
+#define GPIO_CTRL_CFG_BIT5		FIELD32(0x00000020)
+#define GPIO_CTRL_CFG_BIT6		FIELD32(0x00000040)
+#define GPIO_CTRL_CFG_BIT7		FIELD32(0x00000080)
+#define GPIO_CTRL_CFG_BIT8		FIELD32(0x00000100)
+
+/*
+ * MCU_CMD_CFG
+ */
+#define MCU_CMD_CFG			0x022c
+
+/*
+ * AC_BK register offsets
+ */
+#define TX_BASE_PTR0			0x0230
+#define TX_MAX_CNT0			0x0234
+#define TX_CTX_IDX0			0x0238
+#define TX_DTX_IDX0			0x023c
+
+/*
+ * AC_BE register offsets
+ */
+#define TX_BASE_PTR1			0x0240
+#define TX_MAX_CNT1			0x0244
+#define TX_CTX_IDX1			0x0248
+#define TX_DTX_IDX1			0x024c
+
+/*
+ * AC_VI register offsets
+ */
+#define TX_BASE_PTR2			0x0250
+#define TX_MAX_CNT2			0x0254
+#define TX_CTX_IDX2			0x0258
+#define TX_DTX_IDX2			0x025c
+
+/*
+ * AC_VO register offsets
+ */
+#define TX_BASE_PTR3			0x0260
+#define TX_MAX_CNT3			0x0264
+#define TX_CTX_IDX3			0x0268
+#define TX_DTX_IDX3			0x026c
+
+/*
+ * HCCA register offsets
+ */
+#define TX_BASE_PTR4			0x0270
+#define TX_MAX_CNT4			0x0274
+#define TX_CTX_IDX4			0x0278
+#define TX_DTX_IDX4			0x027c
+
+/*
+ * MGMT register offsets
+ */
+#define TX_BASE_PTR5			0x0280
+#define TX_MAX_CNT5			0x0284
+#define TX_CTX_IDX5			0x0288
+#define TX_DTX_IDX5			0x028c
+
+/*
+ * RX register offsets
+ */
+#define RX_BASE_PTR			0x0290
+#define RX_MAX_CNT			0x0294
+#define RX_CRX_IDX			0x0298
+#define RX_DRX_IDX			0x029c
+
+/*
+ * PBF_SYS_CTRL
+ * HOST_RAM_WRITE: enable Host program ram write selection
+ */
+#define PBF_SYS_CTRL			0x0400
+#define PBF_SYS_CTRL_READY		FIELD32(0x00000080)
+#define PBF_SYS_CTRL_HOST_RAM_WRITE	FIELD32(0x00010000)
+
+/*
+ * HOST-MCU shared memory
+ */
+#define HOST_CMD_CSR			0x0404
+#define HOST_CMD_CSR_HOST_COMMAND	FIELD32(0x000000ff)
+
+/*
+ * PBF registers
+ * Most are for debug. Driver doesn't touch PBF register.
+ */
+#define PBF_CFG				0x0408
+#define PBF_MAX_PCNT			0x040c
+#define PBF_CTRL			0x0410
+#define PBF_INT_STA			0x0414
+#define PBF_INT_ENA			0x0418
+
+/*
+ * BCN_OFFSET0:
+ */
+#define BCN_OFFSET0			0x042c
+#define BCN_OFFSET0_BCN0		FIELD32(0x000000ff)
+#define BCN_OFFSET0_BCN1		FIELD32(0x0000ff00)
+#define BCN_OFFSET0_BCN2		FIELD32(0x00ff0000)
+#define BCN_OFFSET0_BCN3		FIELD32(0xff000000)
+
+/*
+ * BCN_OFFSET1:
+ */
+#define BCN_OFFSET1			0x0430
+#define BCN_OFFSET1_BCN4		FIELD32(0x000000ff)
+#define BCN_OFFSET1_BCN5		FIELD32(0x0000ff00)
+#define BCN_OFFSET1_BCN6		FIELD32(0x00ff0000)
+#define BCN_OFFSET1_BCN7		FIELD32(0xff000000)
+
+/*
+ * PBF registers
+ * Most are for debug. Driver doesn't touch PBF register.
+ */
+#define TXRXQ_PCNT			0x0438
+#define PBF_DBG				0x043c
+
+/*
+ * RF registers
+ */
+#define	RF_CSR_CFG			0x0500
+#define RF_CSR_CFG_DATA			FIELD32(0x000000ff)
+#define RF_CSR_CFG_REGNUM		FIELD32(0x00001f00)
+#define RF_CSR_CFG_WRITE		FIELD32(0x00010000)
+#define RF_CSR_CFG_BUSY			FIELD32(0x00020000)
+
+/*
+ * EFUSE_CSR: RT30x0 EEPROM
+ */
+#define EFUSE_CTRL			0x0580
+#define EFUSE_CTRL_ADDRESS_IN		FIELD32(0x03fe0000)
+#define EFUSE_CTRL_MODE			FIELD32(0x000000c0)
+#define EFUSE_CTRL_KICK			FIELD32(0x40000000)
+#define EFUSE_CTRL_PRESENT		FIELD32(0x80000000)
+
+/*
+ * EFUSE_DATA0
+ */
+#define EFUSE_DATA0			0x0590
+
+/*
+ * EFUSE_DATA1
+ */
+#define EFUSE_DATA1			0x0594
+
+/*
+ * EFUSE_DATA2
+ */
+#define EFUSE_DATA2			0x0598
+
+/*
+ * EFUSE_DATA3
+ */
+#define EFUSE_DATA3			0x059c
+
+/*
+ * MAC Control/Status Registers(CSR).
+ * Some values are set in TU, whereas 1 TU == 1024 us.
+ */
+
+/*
+ * MAC_CSR0: ASIC revision number.
+ * ASIC_REV: 0
+ * ASIC_VER: 2860 or 2870
+ */
+#define MAC_CSR0			0x1000
+#define MAC_CSR0_ASIC_REV		FIELD32(0x0000ffff)
+#define MAC_CSR0_ASIC_VER		FIELD32(0xffff0000)
+
+/*
+ * MAC_SYS_CTRL:
+ */
+#define MAC_SYS_CTRL			0x1004
+#define MAC_SYS_CTRL_RESET_CSR		FIELD32(0x00000001)
+#define MAC_SYS_CTRL_RESET_BBP		FIELD32(0x00000002)
+#define MAC_SYS_CTRL_ENABLE_TX		FIELD32(0x00000004)
+#define MAC_SYS_CTRL_ENABLE_RX		FIELD32(0x00000008)
+#define MAC_SYS_CTRL_CONTINUOUS_TX	FIELD32(0x00000010)
+#define MAC_SYS_CTRL_LOOPBACK		FIELD32(0x00000020)
+#define MAC_SYS_CTRL_WLAN_HALT		FIELD32(0x00000040)
+#define MAC_SYS_CTRL_RX_TIMESTAMP	FIELD32(0x00000080)
+
+/*
+ * MAC_ADDR_DW0: STA MAC register 0
+ */
+#define MAC_ADDR_DW0			0x1008
+#define MAC_ADDR_DW0_BYTE0		FIELD32(0x000000ff)
+#define MAC_ADDR_DW0_BYTE1		FIELD32(0x0000ff00)
+#define MAC_ADDR_DW0_BYTE2		FIELD32(0x00ff0000)
+#define MAC_ADDR_DW0_BYTE3		FIELD32(0xff000000)
+
+/*
+ * MAC_ADDR_DW1: STA MAC register 1
+ * UNICAST_TO_ME_MASK:
+ * Used to mask off bits from byte 5 of the MAC address
+ * to determine the UNICAST_TO_ME bit for RX frames.
+ * The full mask is complemented by BSS_ID_MASK:
+ *    MASK = BSS_ID_MASK & UNICAST_TO_ME_MASK
+ */
+#define MAC_ADDR_DW1			0x100c
+#define MAC_ADDR_DW1_BYTE4		FIELD32(0x000000ff)
+#define MAC_ADDR_DW1_BYTE5		FIELD32(0x0000ff00)
+#define MAC_ADDR_DW1_UNICAST_TO_ME_MASK	FIELD32(0x00ff0000)
+
+/*
+ * MAC_BSSID_DW0: BSSID register 0
+ */
+#define MAC_BSSID_DW0			0x1010
+#define MAC_BSSID_DW0_BYTE0		FIELD32(0x000000ff)
+#define MAC_BSSID_DW0_BYTE1		FIELD32(0x0000ff00)
+#define MAC_BSSID_DW0_BYTE2		FIELD32(0x00ff0000)
+#define MAC_BSSID_DW0_BYTE3		FIELD32(0xff000000)
+
+/*
+ * MAC_BSSID_DW1: BSSID register 1
+ * BSS_ID_MASK:
+ *     0: 1-BSSID mode (BSS index = 0)
+ *     1: 2-BSSID mode (BSS index: Byte5, bit 0)
+ *     2: 4-BSSID mode (BSS index: byte5, bit 0 - 1)
+ *     3: 8-BSSID mode (BSS index: byte5, bit 0 - 2)
+ * This mask is used to mask off bits 0, 1 and 2 of byte 5 of the
+ * BSSID. This will make sure that those bits will be ignored
+ * when determining the MY_BSS of RX frames.
+ */
+#define MAC_BSSID_DW1			0x1014
+#define MAC_BSSID_DW1_BYTE4		FIELD32(0x000000ff)
+#define MAC_BSSID_DW1_BYTE5		FIELD32(0x0000ff00)
+#define MAC_BSSID_DW1_BSS_ID_MASK	FIELD32(0x00030000)
+#define MAC_BSSID_DW1_BSS_BCN_NUM	FIELD32(0x001c0000)
+
+/*
+ * MAX_LEN_CFG: Maximum frame length register.
+ * MAX_MPDU: rt2860b max 16k bytes
+ * MAX_PSDU: Maximum PSDU length
+ *	(power factor) 0:2^13, 1:2^14, 2:2^15, 3:2^16
+ */
+#define MAX_LEN_CFG			0x1018
+#define MAX_LEN_CFG_MAX_MPDU		FIELD32(0x00000fff)
+#define MAX_LEN_CFG_MAX_PSDU		FIELD32(0x00003000)
+#define MAX_LEN_CFG_MIN_PSDU		FIELD32(0x0000c000)
+#define MAX_LEN_CFG_MIN_MPDU		FIELD32(0x000f0000)
+
+/*
+ * BBP_CSR_CFG: BBP serial control register
+ * VALUE: Register value to program into BBP
+ * REG_NUM: Selected BBP register
+ * READ_CONTROL: 0 write BBP, 1 read BBP
+ * BUSY: ASIC is busy executing BBP commands
+ * BBP_PAR_DUR: 0 4 MAC clocks, 1 8 MAC clocks
+ * BBP_RW_MODE: 0 serial, 1 paralell
+ */
+#define BBP_CSR_CFG			0x101c
+#define BBP_CSR_CFG_VALUE		FIELD32(0x000000ff)
+#define BBP_CSR_CFG_REGNUM		FIELD32(0x0000ff00)
+#define BBP_CSR_CFG_READ_CONTROL	FIELD32(0x00010000)
+#define BBP_CSR_CFG_BUSY		FIELD32(0x00020000)
+#define BBP_CSR_CFG_BBP_PAR_DUR		FIELD32(0x00040000)
+#define BBP_CSR_CFG_BBP_RW_MODE		FIELD32(0x00080000)
+
+/*
+ * RF_CSR_CFG0: RF control register
+ * REGID_AND_VALUE: Register value to program into RF
+ * BITWIDTH: Selected RF register
+ * STANDBYMODE: 0 high when standby, 1 low when standby
+ * SEL: 0 RF_LE0 activate, 1 RF_LE1 activate
+ * BUSY: ASIC is busy executing RF commands
+ */
+#define RF_CSR_CFG0			0x1020
+#define RF_CSR_CFG0_REGID_AND_VALUE	FIELD32(0x00ffffff)
+#define RF_CSR_CFG0_BITWIDTH		FIELD32(0x1f000000)
+#define RF_CSR_CFG0_REG_VALUE_BW	FIELD32(0x1fffffff)
+#define RF_CSR_CFG0_STANDBYMODE		FIELD32(0x20000000)
+#define RF_CSR_CFG0_SEL			FIELD32(0x40000000)
+#define RF_CSR_CFG0_BUSY		FIELD32(0x80000000)
+
+/*
+ * RF_CSR_CFG1: RF control register
+ * REGID_AND_VALUE: Register value to program into RF
+ * RFGAP: Gap between BB_CONTROL_RF and RF_LE
+ *        0: 3 system clock cycle (37.5usec)
+ *        1: 5 system clock cycle (62.5usec)
+ */
+#define RF_CSR_CFG1			0x1024
+#define RF_CSR_CFG1_REGID_AND_VALUE	FIELD32(0x00ffffff)
+#define RF_CSR_CFG1_RFGAP		FIELD32(0x1f000000)
+
+/*
+ * RF_CSR_CFG2: RF control register
+ * VALUE: Register value to program into RF
+ */
+#define RF_CSR_CFG2			0x1028
+#define RF_CSR_CFG2_VALUE		FIELD32(0x00ffffff)
+
+/*
+ * LED_CFG: LED control
+ * color LED's:
+ *   0: off
+ *   1: blinking upon TX2
+ *   2: periodic slow blinking
+ *   3: always on
+ * LED polarity:
+ *   0: active low
+ *   1: active high
+ */
+#define LED_CFG				0x102c
+#define LED_CFG_ON_PERIOD		FIELD32(0x000000ff)
+#define LED_CFG_OFF_PERIOD		FIELD32(0x0000ff00)
+#define LED_CFG_SLOW_BLINK_PERIOD	FIELD32(0x003f0000)
+#define LED_CFG_R_LED_MODE		FIELD32(0x03000000)
+#define LED_CFG_G_LED_MODE		FIELD32(0x0c000000)
+#define LED_CFG_Y_LED_MODE		FIELD32(0x30000000)
+#define LED_CFG_LED_POLAR		FIELD32(0x40000000)
+
+/*
+ * XIFS_TIME_CFG: MAC timing
+ * CCKM_SIFS_TIME: unit 1us. Applied after CCK RX/TX
+ * OFDM_SIFS_TIME: unit 1us. Applied after OFDM RX/TX
+ * OFDM_XIFS_TIME: unit 1us. Applied after OFDM RX
+ *	when MAC doesn't reference BBP signal BBRXEND
+ * EIFS: unit 1us
+ * BB_RXEND_ENABLE: reference RXEND signal to begin XIFS defer
+ *
+ */
+#define XIFS_TIME_CFG			0x1100
+#define XIFS_TIME_CFG_CCKM_SIFS_TIME	FIELD32(0x000000ff)
+#define XIFS_TIME_CFG_OFDM_SIFS_TIME	FIELD32(0x0000ff00)
+#define XIFS_TIME_CFG_OFDM_XIFS_TIME	FIELD32(0x000f0000)
+#define XIFS_TIME_CFG_EIFS		FIELD32(0x1ff00000)
+#define XIFS_TIME_CFG_BB_RXEND_ENABLE	FIELD32(0x20000000)
+
+/*
+ * BKOFF_SLOT_CFG:
+ */
+#define BKOFF_SLOT_CFG			0x1104
+#define BKOFF_SLOT_CFG_SLOT_TIME	FIELD32(0x000000ff)
+#define BKOFF_SLOT_CFG_CC_DELAY_TIME	FIELD32(0x0000ff00)
+
+/*
+ * NAV_TIME_CFG:
+ */
+#define NAV_TIME_CFG			0x1108
+#define NAV_TIME_CFG_SIFS		FIELD32(0x000000ff)
+#define NAV_TIME_CFG_SLOT_TIME		FIELD32(0x0000ff00)
+#define NAV_TIME_CFG_EIFS		FIELD32(0x01ff0000)
+#define NAV_TIME_ZERO_SIFS		FIELD32(0x02000000)
+
+/*
+ * CH_TIME_CFG: count as channel busy
+ */
+#define CH_TIME_CFG     	        0x110c
+
+/*
+ * PBF_LIFE_TIMER: TX/RX MPDU timestamp timer (free run) Unit: 1us
+ */
+#define PBF_LIFE_TIMER     	        0x1110
+
+/*
+ * BCN_TIME_CFG:
+ * BEACON_INTERVAL: in unit of 1/16 TU
+ * TSF_TICKING: Enable TSF auto counting
+ * TSF_SYNC: Enable TSF sync, 00: disable, 01: infra mode, 10: ad-hoc mode
+ * BEACON_GEN: Enable beacon generator
+ */
+#define BCN_TIME_CFG			0x1114
+#define BCN_TIME_CFG_BEACON_INTERVAL	FIELD32(0x0000ffff)
+#define BCN_TIME_CFG_TSF_TICKING	FIELD32(0x00010000)
+#define BCN_TIME_CFG_TSF_SYNC		FIELD32(0x00060000)
+#define BCN_TIME_CFG_TBTT_ENABLE	FIELD32(0x00080000)
+#define BCN_TIME_CFG_BEACON_GEN		FIELD32(0x00100000)
+#define BCN_TIME_CFG_TX_TIME_COMPENSATE	FIELD32(0xf0000000)
+
+/*
+ * TBTT_SYNC_CFG:
+ */
+#define TBTT_SYNC_CFG			0x1118
+
+/*
+ * TSF_TIMER_DW0: Local lsb TSF timer, read-only
+ */
+#define TSF_TIMER_DW0			0x111c
+#define TSF_TIMER_DW0_LOW_WORD		FIELD32(0xffffffff)
+
+/*
+ * TSF_TIMER_DW1: Local msb TSF timer, read-only
+ */
+#define TSF_TIMER_DW1			0x1120
+#define TSF_TIMER_DW1_HIGH_WORD		FIELD32(0xffffffff)
+
+/*
+ * TBTT_TIMER: TImer remains till next TBTT, read-only
+ */
+#define TBTT_TIMER			0x1124
+
+/*
+ * INT_TIMER_CFG:
+ */
+#define INT_TIMER_CFG			0x1128
+
+/*
+ * INT_TIMER_EN: GP-timer and pre-tbtt Int enable
+ */
+#define INT_TIMER_EN			0x112c
+
+/*
+ * CH_IDLE_STA: channel idle time
+ */
+#define CH_IDLE_STA			0x1130
+
+/*
+ * CH_BUSY_STA: channel busy time
+ */
+#define CH_BUSY_STA			0x1134
+
+/*
+ * MAC_STATUS_CFG:
+ * BBP_RF_BUSY: When set to 0, BBP and RF are stable.
+ *	if 1 or higher one of the 2 registers is busy.
+ */
+#define MAC_STATUS_CFG			0x1200
+#define MAC_STATUS_CFG_BBP_RF_BUSY	FIELD32(0x00000003)
+
+/*
+ * PWR_PIN_CFG:
+ */
+#define PWR_PIN_CFG			0x1204
+
+/*
+ * AUTOWAKEUP_CFG: Manual power control / status register
+ * TBCN_BEFORE_WAKE: ForceWake has high privilege than PutToSleep when both set
+ * AUTOWAKE: 0:sleep, 1:awake
+ */
+#define AUTOWAKEUP_CFG			0x1208
+#define AUTOWAKEUP_CFG_AUTO_LEAD_TIME	FIELD32(0x000000ff)
+#define AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE	FIELD32(0x00007f00)
+#define AUTOWAKEUP_CFG_AUTOWAKE		FIELD32(0x00008000)
+
+/*
+ * EDCA_AC0_CFG:
+ */
+#define EDCA_AC0_CFG			0x1300
+#define EDCA_AC0_CFG_TX_OP		FIELD32(0x000000ff)
+#define EDCA_AC0_CFG_AIFSN		FIELD32(0x00000f00)
+#define EDCA_AC0_CFG_CWMIN		FIELD32(0x0000f000)
+#define EDCA_AC0_CFG_CWMAX		FIELD32(0x000f0000)
+
+/*
+ * EDCA_AC1_CFG:
+ */
+#define EDCA_AC1_CFG			0x1304
+#define EDCA_AC1_CFG_TX_OP		FIELD32(0x000000ff)
+#define EDCA_AC1_CFG_AIFSN		FIELD32(0x00000f00)
+#define EDCA_AC1_CFG_CWMIN		FIELD32(0x0000f000)
+#define EDCA_AC1_CFG_CWMAX		FIELD32(0x000f0000)
+
+/*
+ * EDCA_AC2_CFG:
+ */
+#define EDCA_AC2_CFG			0x1308
+#define EDCA_AC2_CFG_TX_OP		FIELD32(0x000000ff)
+#define EDCA_AC2_CFG_AIFSN		FIELD32(0x00000f00)
+#define EDCA_AC2_CFG_CWMIN		FIELD32(0x0000f000)
+#define EDCA_AC2_CFG_CWMAX		FIELD32(0x000f0000)
+
+/*
+ * EDCA_AC3_CFG:
+ */
+#define EDCA_AC3_CFG			0x130c
+#define EDCA_AC3_CFG_TX_OP		FIELD32(0x000000ff)
+#define EDCA_AC3_CFG_AIFSN		FIELD32(0x00000f00)
+#define EDCA_AC3_CFG_CWMIN		FIELD32(0x0000f000)
+#define EDCA_AC3_CFG_CWMAX		FIELD32(0x000f0000)
+
+/*
+ * EDCA_TID_AC_MAP:
+ */
+#define EDCA_TID_AC_MAP			0x1310
+
+/*
+ * TX_PWR_CFG_0:
+ */
+#define TX_PWR_CFG_0			0x1314
+#define TX_PWR_CFG_0_1MBS		FIELD32(0x0000000f)
+#define TX_PWR_CFG_0_2MBS		FIELD32(0x000000f0)
+#define TX_PWR_CFG_0_55MBS		FIELD32(0x00000f00)
+#define TX_PWR_CFG_0_11MBS		FIELD32(0x0000f000)
+#define TX_PWR_CFG_0_6MBS		FIELD32(0x000f0000)
+#define TX_PWR_CFG_0_9MBS		FIELD32(0x00f00000)
+#define TX_PWR_CFG_0_12MBS		FIELD32(0x0f000000)
+#define TX_PWR_CFG_0_18MBS		FIELD32(0xf0000000)
+
+/*
+ * TX_PWR_CFG_1:
+ */
+#define TX_PWR_CFG_1			0x1318
+#define TX_PWR_CFG_1_24MBS		FIELD32(0x0000000f)
+#define TX_PWR_CFG_1_36MBS		FIELD32(0x000000f0)
+#define TX_PWR_CFG_1_48MBS		FIELD32(0x00000f00)
+#define TX_PWR_CFG_1_54MBS		FIELD32(0x0000f000)
+#define TX_PWR_CFG_1_MCS0		FIELD32(0x000f0000)
+#define TX_PWR_CFG_1_MCS1		FIELD32(0x00f00000)
+#define TX_PWR_CFG_1_MCS2		FIELD32(0x0f000000)
+#define TX_PWR_CFG_1_MCS3		FIELD32(0xf0000000)
+
+/*
+ * TX_PWR_CFG_2:
+ */
+#define TX_PWR_CFG_2			0x131c
+#define TX_PWR_CFG_2_MCS4		FIELD32(0x0000000f)
+#define TX_PWR_CFG_2_MCS5		FIELD32(0x000000f0)
+#define TX_PWR_CFG_2_MCS6		FIELD32(0x00000f00)
+#define TX_PWR_CFG_2_MCS7		FIELD32(0x0000f000)
+#define TX_PWR_CFG_2_MCS8		FIELD32(0x000f0000)
+#define TX_PWR_CFG_2_MCS9		FIELD32(0x00f00000)
+#define TX_PWR_CFG_2_MCS10		FIELD32(0x0f000000)
+#define TX_PWR_CFG_2_MCS11		FIELD32(0xf0000000)
+
+/*
+ * TX_PWR_CFG_3:
+ */
+#define TX_PWR_CFG_3			0x1320
+#define TX_PWR_CFG_3_MCS12		FIELD32(0x0000000f)
+#define TX_PWR_CFG_3_MCS13		FIELD32(0x000000f0)
+#define TX_PWR_CFG_3_MCS14		FIELD32(0x00000f00)
+#define TX_PWR_CFG_3_MCS15		FIELD32(0x0000f000)
+#define TX_PWR_CFG_3_UKNOWN1		FIELD32(0x000f0000)
+#define TX_PWR_CFG_3_UKNOWN2		FIELD32(0x00f00000)
+#define TX_PWR_CFG_3_UKNOWN3		FIELD32(0x0f000000)
+#define TX_PWR_CFG_3_UKNOWN4		FIELD32(0xf0000000)
+
+/*
+ * TX_PWR_CFG_4:
+ */
+#define TX_PWR_CFG_4			0x1324
+#define TX_PWR_CFG_4_UKNOWN5		FIELD32(0x0000000f)
+#define TX_PWR_CFG_4_UKNOWN6		FIELD32(0x000000f0)
+#define TX_PWR_CFG_4_UKNOWN7		FIELD32(0x00000f00)
+#define TX_PWR_CFG_4_UKNOWN8		FIELD32(0x0000f000)
+
+/*
+ * TX_PIN_CFG:
+ */
+#define TX_PIN_CFG			0x1328
+#define TX_PIN_CFG_PA_PE_A0_EN		FIELD32(0x00000001)
+#define TX_PIN_CFG_PA_PE_G0_EN		FIELD32(0x00000002)
+#define TX_PIN_CFG_PA_PE_A1_EN		FIELD32(0x00000004)
+#define TX_PIN_CFG_PA_PE_G1_EN		FIELD32(0x00000008)
+#define TX_PIN_CFG_PA_PE_A0_POL		FIELD32(0x00000010)
+#define TX_PIN_CFG_PA_PE_G0_POL		FIELD32(0x00000020)
+#define TX_PIN_CFG_PA_PE_A1_POL		FIELD32(0x00000040)
+#define TX_PIN_CFG_PA_PE_G1_POL		FIELD32(0x00000080)
+#define TX_PIN_CFG_LNA_PE_A0_EN		FIELD32(0x00000100)
+#define TX_PIN_CFG_LNA_PE_G0_EN		FIELD32(0x00000200)
+#define TX_PIN_CFG_LNA_PE_A1_EN		FIELD32(0x00000400)
+#define TX_PIN_CFG_LNA_PE_G1_EN		FIELD32(0x00000800)
+#define TX_PIN_CFG_LNA_PE_A0_POL	FIELD32(0x00001000)
+#define TX_PIN_CFG_LNA_PE_G0_POL	FIELD32(0x00002000)
+#define TX_PIN_CFG_LNA_PE_A1_POL	FIELD32(0x00004000)
+#define TX_PIN_CFG_LNA_PE_G1_POL	FIELD32(0x00008000)
+#define TX_PIN_CFG_RFTR_EN		FIELD32(0x00010000)
+#define TX_PIN_CFG_RFTR_POL		FIELD32(0x00020000)
+#define TX_PIN_CFG_TRSW_EN		FIELD32(0x00040000)
+#define TX_PIN_CFG_TRSW_POL		FIELD32(0x00080000)
+
+/*
+ * TX_BAND_CFG: 0x1 use upper 20MHz, 0x0 use lower 20MHz
+ */
+#define TX_BAND_CFG			0x132c
+#define TX_BAND_CFG_HT40_PLUS		FIELD32(0x00000001)
+#define TX_BAND_CFG_A			FIELD32(0x00000002)
+#define TX_BAND_CFG_BG			FIELD32(0x00000004)
+
+/*
+ * TX_SW_CFG0:
+ */
+#define TX_SW_CFG0			0x1330
+
+/*
+ * TX_SW_CFG1:
+ */
+#define TX_SW_CFG1			0x1334
+
+/*
+ * TX_SW_CFG2:
+ */
+#define TX_SW_CFG2			0x1338
+
+/*
+ * TXOP_THRES_CFG:
+ */
+#define TXOP_THRES_CFG			0x133c
+
+/*
+ * TXOP_CTRL_CFG:
+ */
+#define TXOP_CTRL_CFG			0x1340
+
+/*
+ * TX_RTS_CFG:
+ * RTS_THRES: unit:byte
+ * RTS_FBK_EN: enable rts rate fallback
+ */
+#define TX_RTS_CFG			0x1344
+#define TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT	FIELD32(0x000000ff)
+#define TX_RTS_CFG_RTS_THRES		FIELD32(0x00ffff00)
+#define TX_RTS_CFG_RTS_FBK_EN		FIELD32(0x01000000)
+
+/*
+ * TX_TIMEOUT_CFG:
+ * MPDU_LIFETIME: expiration time = 2^(9+MPDU LIFE TIME) us
+ * RX_ACK_TIMEOUT: unit:slot. Used for TX procedure
+ * TX_OP_TIMEOUT: TXOP timeout value for TXOP truncation.
+ *                it is recommended that:
+ *                (SLOT_TIME) > (TX_OP_TIMEOUT) > (RX_ACK_TIMEOUT)
+ */
+#define TX_TIMEOUT_CFG			0x1348
+#define TX_TIMEOUT_CFG_MPDU_LIFETIME	FIELD32(0x000000f0)
+#define TX_TIMEOUT_CFG_RX_ACK_TIMEOUT	FIELD32(0x0000ff00)
+#define TX_TIMEOUT_CFG_TX_OP_TIMEOUT	FIELD32(0x00ff0000)
+
+/*
+ * TX_RTY_CFG:
+ * SHORT_RTY_LIMIT: short retry limit
+ * LONG_RTY_LIMIT: long retry limit
+ * LONG_RTY_THRE: Long retry threshoold
+ * NON_AGG_RTY_MODE: Non-Aggregate MPDU retry mode
+ *                   0:expired by retry limit, 1: expired by mpdu life timer
+ * AGG_RTY_MODE: Aggregate MPDU retry mode
+ *               0:expired by retry limit, 1: expired by mpdu life timer
+ * TX_AUTO_FB_ENABLE: Tx retry PHY rate auto fallback enable
+ */
+#define TX_RTY_CFG			0x134c
+#define TX_RTY_CFG_SHORT_RTY_LIMIT	FIELD32(0x000000ff)
+#define TX_RTY_CFG_LONG_RTY_LIMIT	FIELD32(0x0000ff00)
+#define TX_RTY_CFG_LONG_RTY_THRE	FIELD32(0x0fff0000)
+#define TX_RTY_CFG_NON_AGG_RTY_MODE	FIELD32(0x10000000)
+#define TX_RTY_CFG_AGG_RTY_MODE		FIELD32(0x20000000)
+#define TX_RTY_CFG_TX_AUTO_FB_ENABLE	FIELD32(0x40000000)
+
+/*
+ * TX_LINK_CFG:
+ * REMOTE_MFB_LIFETIME: remote MFB life time. unit: 32us
+ * MFB_ENABLE: TX apply remote MFB 1:enable
+ * REMOTE_UMFS_ENABLE: remote unsolicit  MFB enable
+ *                     0: not apply remote remote unsolicit (MFS=7)
+ * TX_MRQ_EN: MCS request TX enable
+ * TX_RDG_EN: RDG TX enable
+ * TX_CF_ACK_EN: Piggyback CF-ACK enable
+ * REMOTE_MFB: remote MCS feedback
+ * REMOTE_MFS: remote MCS feedback sequence number
+ */
+#define TX_LINK_CFG			0x1350
+#define TX_LINK_CFG_REMOTE_MFB_LIFETIME	FIELD32(0x000000ff)
+#define TX_LINK_CFG_MFB_ENABLE		FIELD32(0x00000100)
+#define TX_LINK_CFG_REMOTE_UMFS_ENABLE	FIELD32(0x00000200)
+#define TX_LINK_CFG_TX_MRQ_EN		FIELD32(0x00000400)
+#define TX_LINK_CFG_TX_RDG_EN		FIELD32(0x00000800)
+#define TX_LINK_CFG_TX_CF_ACK_EN	FIELD32(0x00001000)
+#define TX_LINK_CFG_REMOTE_MFB		FIELD32(0x00ff0000)
+#define TX_LINK_CFG_REMOTE_MFS		FIELD32(0xff000000)
+
+/*
+ * HT_FBK_CFG0:
+ */
+#define HT_FBK_CFG0			0x1354
+#define HT_FBK_CFG0_HTMCS0FBK		FIELD32(0x0000000f)
+#define HT_FBK_CFG0_HTMCS1FBK		FIELD32(0x000000f0)
+#define HT_FBK_CFG0_HTMCS2FBK		FIELD32(0x00000f00)
+#define HT_FBK_CFG0_HTMCS3FBK		FIELD32(0x0000f000)
+#define HT_FBK_CFG0_HTMCS4FBK		FIELD32(0x000f0000)
+#define HT_FBK_CFG0_HTMCS5FBK		FIELD32(0x00f00000)
+#define HT_FBK_CFG0_HTMCS6FBK		FIELD32(0x0f000000)
+#define HT_FBK_CFG0_HTMCS7FBK		FIELD32(0xf0000000)
+
+/*
+ * HT_FBK_CFG1:
+ */
+#define HT_FBK_CFG1			0x1358
+#define HT_FBK_CFG1_HTMCS8FBK		FIELD32(0x0000000f)
+#define HT_FBK_CFG1_HTMCS9FBK		FIELD32(0x000000f0)
+#define HT_FBK_CFG1_HTMCS10FBK		FIELD32(0x00000f00)
+#define HT_FBK_CFG1_HTMCS11FBK		FIELD32(0x0000f000)
+#define HT_FBK_CFG1_HTMCS12FBK		FIELD32(0x000f0000)
+#define HT_FBK_CFG1_HTMCS13FBK		FIELD32(0x00f00000)
+#define HT_FBK_CFG1_HTMCS14FBK		FIELD32(0x0f000000)
+#define HT_FBK_CFG1_HTMCS15FBK		FIELD32(0xf0000000)
+
+/*
+ * LG_FBK_CFG0:
+ */
+#define LG_FBK_CFG0			0x135c
+#define LG_FBK_CFG0_OFDMMCS0FBK		FIELD32(0x0000000f)
+#define LG_FBK_CFG0_OFDMMCS1FBK		FIELD32(0x000000f0)
+#define LG_FBK_CFG0_OFDMMCS2FBK		FIELD32(0x00000f00)
+#define LG_FBK_CFG0_OFDMMCS3FBK		FIELD32(0x0000f000)
+#define LG_FBK_CFG0_OFDMMCS4FBK		FIELD32(0x000f0000)
+#define LG_FBK_CFG0_OFDMMCS5FBK		FIELD32(0x00f00000)
+#define LG_FBK_CFG0_OFDMMCS6FBK		FIELD32(0x0f000000)
+#define LG_FBK_CFG0_OFDMMCS7FBK		FIELD32(0xf0000000)
+
+/*
+ * LG_FBK_CFG1:
+ */
+#define LG_FBK_CFG1			0x1360
+#define LG_FBK_CFG0_CCKMCS0FBK		FIELD32(0x0000000f)
+#define LG_FBK_CFG0_CCKMCS1FBK		FIELD32(0x000000f0)
+#define LG_FBK_CFG0_CCKMCS2FBK		FIELD32(0x00000f00)
+#define LG_FBK_CFG0_CCKMCS3FBK		FIELD32(0x0000f000)
+
+/*
+ * CCK_PROT_CFG: CCK Protection
+ * PROTECT_RATE: Protection control frame rate for CCK TX(RTS/CTS/CFEnd)
+ * PROTECT_CTRL: Protection control frame type for CCK TX
+ *               0:none, 1:RTS/CTS, 2:CTS-to-self
+ * PROTECT_NAV: TXOP protection type for CCK TX
+ *              0:none, 1:ShortNAVprotect, 2:LongNAVProtect
+ * TX_OP_ALLOW_CCK: CCK TXOP allowance, 0:disallow
+ * TX_OP_ALLOW_OFDM: CCK TXOP allowance, 0:disallow
+ * TX_OP_ALLOW_MM20: CCK TXOP allowance, 0:disallow
+ * TX_OP_ALLOW_MM40: CCK TXOP allowance, 0:disallow
+ * TX_OP_ALLOW_GF20: CCK TXOP allowance, 0:disallow
+ * TX_OP_ALLOW_GF40: CCK TXOP allowance, 0:disallow
+ * RTS_TH_EN: RTS threshold enable on CCK TX
+ */
+#define CCK_PROT_CFG			0x1364
+#define CCK_PROT_CFG_PROTECT_RATE	FIELD32(0x0000ffff)
+#define CCK_PROT_CFG_PROTECT_CTRL	FIELD32(0x00030000)
+#define CCK_PROT_CFG_PROTECT_NAV	FIELD32(0x000c0000)
+#define CCK_PROT_CFG_TX_OP_ALLOW_CCK	FIELD32(0x00100000)
+#define CCK_PROT_CFG_TX_OP_ALLOW_OFDM	FIELD32(0x00200000)
+#define CCK_PROT_CFG_TX_OP_ALLOW_MM20	FIELD32(0x00400000)
+#define CCK_PROT_CFG_TX_OP_ALLOW_MM40	FIELD32(0x00800000)
+#define CCK_PROT_CFG_TX_OP_ALLOW_GF20	FIELD32(0x01000000)
+#define CCK_PROT_CFG_TX_OP_ALLOW_GF40	FIELD32(0x02000000)
+#define CCK_PROT_CFG_RTS_TH_EN		FIELD32(0x04000000)
+
+/*
+ * OFDM_PROT_CFG: OFDM Protection
+ */
+#define OFDM_PROT_CFG			0x1368
+#define OFDM_PROT_CFG_PROTECT_RATE	FIELD32(0x0000ffff)
+#define OFDM_PROT_CFG_PROTECT_CTRL	FIELD32(0x00030000)
+#define OFDM_PROT_CFG_PROTECT_NAV	FIELD32(0x000c0000)
+#define OFDM_PROT_CFG_TX_OP_ALLOW_CCK	FIELD32(0x00100000)
+#define OFDM_PROT_CFG_TX_OP_ALLOW_OFDM	FIELD32(0x00200000)
+#define OFDM_PROT_CFG_TX_OP_ALLOW_MM20	FIELD32(0x00400000)
+#define OFDM_PROT_CFG_TX_OP_ALLOW_MM40	FIELD32(0x00800000)
+#define OFDM_PROT_CFG_TX_OP_ALLOW_GF20	FIELD32(0x01000000)
+#define OFDM_PROT_CFG_TX_OP_ALLOW_GF40	FIELD32(0x02000000)
+#define OFDM_PROT_CFG_RTS_TH_EN		FIELD32(0x04000000)
+
+/*
+ * MM20_PROT_CFG: MM20 Protection
+ */
+#define MM20_PROT_CFG			0x136c
+#define MM20_PROT_CFG_PROTECT_RATE	FIELD32(0x0000ffff)
+#define MM20_PROT_CFG_PROTECT_CTRL	FIELD32(0x00030000)
+#define MM20_PROT_CFG_PROTECT_NAV	FIELD32(0x000c0000)
+#define MM20_PROT_CFG_TX_OP_ALLOW_CCK	FIELD32(0x00100000)
+#define MM20_PROT_CFG_TX_OP_ALLOW_OFDM	FIELD32(0x00200000)
+#define MM20_PROT_CFG_TX_OP_ALLOW_MM20	FIELD32(0x00400000)
+#define MM20_PROT_CFG_TX_OP_ALLOW_MM40	FIELD32(0x00800000)
+#define MM20_PROT_CFG_TX_OP_ALLOW_GF20	FIELD32(0x01000000)
+#define MM20_PROT_CFG_TX_OP_ALLOW_GF40	FIELD32(0x02000000)
+#define MM20_PROT_CFG_RTS_TH_EN		FIELD32(0x04000000)
+
+/*
+ * MM40_PROT_CFG: MM40 Protection
+ */
+#define MM40_PROT_CFG			0x1370
+#define MM40_PROT_CFG_PROTECT_RATE	FIELD32(0x0000ffff)
+#define MM40_PROT_CFG_PROTECT_CTRL	FIELD32(0x00030000)
+#define MM40_PROT_CFG_PROTECT_NAV	FIELD32(0x000c0000)
+#define MM40_PROT_CFG_TX_OP_ALLOW_CCK	FIELD32(0x00100000)
+#define MM40_PROT_CFG_TX_OP_ALLOW_OFDM	FIELD32(0x00200000)
+#define MM40_PROT_CFG_TX_OP_ALLOW_MM20	FIELD32(0x00400000)
+#define MM40_PROT_CFG_TX_OP_ALLOW_MM40	FIELD32(0x00800000)
+#define MM40_PROT_CFG_TX_OP_ALLOW_GF20	FIELD32(0x01000000)
+#define MM40_PROT_CFG_TX_OP_ALLOW_GF40	FIELD32(0x02000000)
+#define MM40_PROT_CFG_RTS_TH_EN		FIELD32(0x04000000)
+
+/*
+ * GF20_PROT_CFG: GF20 Protection
+ */
+#define GF20_PROT_CFG			0x1374
+#define GF20_PROT_CFG_PROTECT_RATE	FIELD32(0x0000ffff)
+#define GF20_PROT_CFG_PROTECT_CTRL	FIELD32(0x00030000)
+#define GF20_PROT_CFG_PROTECT_NAV	FIELD32(0x000c0000)
+#define GF20_PROT_CFG_TX_OP_ALLOW_CCK	FIELD32(0x00100000)
+#define GF20_PROT_CFG_TX_OP_ALLOW_OFDM	FIELD32(0x00200000)
+#define GF20_PROT_CFG_TX_OP_ALLOW_MM20	FIELD32(0x00400000)
+#define GF20_PROT_CFG_TX_OP_ALLOW_MM40	FIELD32(0x00800000)
+#define GF20_PROT_CFG_TX_OP_ALLOW_GF20	FIELD32(0x01000000)
+#define GF20_PROT_CFG_TX_OP_ALLOW_GF40	FIELD32(0x02000000)
+#define GF20_PROT_CFG_RTS_TH_EN		FIELD32(0x04000000)
+
+/*
+ * GF40_PROT_CFG: GF40 Protection
+ */
+#define GF40_PROT_CFG			0x1378
+#define GF40_PROT_CFG_PROTECT_RATE	FIELD32(0x0000ffff)
+#define GF40_PROT_CFG_PROTECT_CTRL	FIELD32(0x00030000)
+#define GF40_PROT_CFG_PROTECT_NAV	FIELD32(0x000c0000)
+#define GF40_PROT_CFG_TX_OP_ALLOW_CCK	FIELD32(0x00100000)
+#define GF40_PROT_CFG_TX_OP_ALLOW_OFDM	FIELD32(0x00200000)
+#define GF40_PROT_CFG_TX_OP_ALLOW_MM20	FIELD32(0x00400000)
+#define GF40_PROT_CFG_TX_OP_ALLOW_MM40	FIELD32(0x00800000)
+#define GF40_PROT_CFG_TX_OP_ALLOW_GF20	FIELD32(0x01000000)
+#define GF40_PROT_CFG_TX_OP_ALLOW_GF40	FIELD32(0x02000000)
+#define GF40_PROT_CFG_RTS_TH_EN		FIELD32(0x04000000)
+
+/*
+ * EXP_CTS_TIME:
+ */
+#define EXP_CTS_TIME			0x137c
+
+/*
+ * EXP_ACK_TIME:
+ */
+#define EXP_ACK_TIME			0x1380
+
+/*
+ * RX_FILTER_CFG: RX configuration register.
+ */
+#define RX_FILTER_CFG			0x1400
+#define RX_FILTER_CFG_DROP_CRC_ERROR	FIELD32(0x00000001)
+#define RX_FILTER_CFG_DROP_PHY_ERROR	FIELD32(0x00000002)
+#define RX_FILTER_CFG_DROP_NOT_TO_ME	FIELD32(0x00000004)
+#define RX_FILTER_CFG_DROP_NOT_MY_BSSD	FIELD32(0x00000008)
+#define RX_FILTER_CFG_DROP_VER_ERROR	FIELD32(0x00000010)
+#define RX_FILTER_CFG_DROP_MULTICAST	FIELD32(0x00000020)
+#define RX_FILTER_CFG_DROP_BROADCAST	FIELD32(0x00000040)
+#define RX_FILTER_CFG_DROP_DUPLICATE	FIELD32(0x00000080)
+#define RX_FILTER_CFG_DROP_CF_END_ACK	FIELD32(0x00000100)
+#define RX_FILTER_CFG_DROP_CF_END	FIELD32(0x00000200)
+#define RX_FILTER_CFG_DROP_ACK		FIELD32(0x00000400)
+#define RX_FILTER_CFG_DROP_CTS		FIELD32(0x00000800)
+#define RX_FILTER_CFG_DROP_RTS		FIELD32(0x00001000)
+#define RX_FILTER_CFG_DROP_PSPOLL	FIELD32(0x00002000)
+#define RX_FILTER_CFG_DROP_BA		FIELD32(0x00004000)
+#define RX_FILTER_CFG_DROP_BAR		FIELD32(0x00008000)
+#define RX_FILTER_CFG_DROP_CNTL		FIELD32(0x00010000)
+
+/*
+ * AUTO_RSP_CFG:
+ * AUTORESPONDER: 0: disable, 1: enable
+ * BAC_ACK_POLICY: 0:long, 1:short preamble
+ * CTS_40_MMODE: Response CTS 40MHz duplicate mode
+ * CTS_40_MREF: Response CTS 40MHz duplicate mode
+ * AR_PREAMBLE: Auto responder preamble 0:long, 1:short preamble
+ * DUAL_CTS_EN: Power bit value in control frame
+ * ACK_CTS_PSM_BIT:Power bit value in control frame
+ */
+#define AUTO_RSP_CFG			0x1404
+#define AUTO_RSP_CFG_AUTORESPONDER	FIELD32(0x00000001)
+#define AUTO_RSP_CFG_BAC_ACK_POLICY	FIELD32(0x00000002)
+#define AUTO_RSP_CFG_CTS_40_MMODE	FIELD32(0x00000004)
+#define AUTO_RSP_CFG_CTS_40_MREF	FIELD32(0x00000008)
+#define AUTO_RSP_CFG_AR_PREAMBLE	FIELD32(0x00000010)
+#define AUTO_RSP_CFG_DUAL_CTS_EN	FIELD32(0x00000040)
+#define AUTO_RSP_CFG_ACK_CTS_PSM_BIT	FIELD32(0x00000080)
+
+/*
+ * LEGACY_BASIC_RATE:
+ */
+#define LEGACY_BASIC_RATE		0x1408
+
+/*
+ * HT_BASIC_RATE:
+ */
+#define HT_BASIC_RATE			0x140c
+
+/*
+ * HT_CTRL_CFG:
+ */
+#define HT_CTRL_CFG			0x1410
+
+/*
+ * SIFS_COST_CFG:
+ */
+#define SIFS_COST_CFG			0x1414
+
+/*
+ * RX_PARSER_CFG:
+ * Set NAV for all received frames
+ */
+#define RX_PARSER_CFG			0x1418
+
+/*
+ * TX_SEC_CNT0:
+ */
+#define TX_SEC_CNT0			0x1500
+
+/*
+ * RX_SEC_CNT0:
+ */
+#define RX_SEC_CNT0			0x1504
+
+/*
+ * CCMP_FC_MUTE:
+ */
+#define CCMP_FC_MUTE			0x1508
+
+/*
+ * TXOP_HLDR_ADDR0:
+ */
+#define TXOP_HLDR_ADDR0			0x1600
+
+/*
+ * TXOP_HLDR_ADDR1:
+ */
+#define TXOP_HLDR_ADDR1			0x1604
+
+/*
+ * TXOP_HLDR_ET:
+ */
+#define TXOP_HLDR_ET			0x1608
+
+/*
+ * QOS_CFPOLL_RA_DW0:
+ */
+#define QOS_CFPOLL_RA_DW0		0x160c
+
+/*
+ * QOS_CFPOLL_RA_DW1:
+ */
+#define QOS_CFPOLL_RA_DW1		0x1610
+
+/*
+ * QOS_CFPOLL_QC:
+ */
+#define QOS_CFPOLL_QC			0x1614
+
+/*
+ * RX_STA_CNT0: RX PLCP error count & RX CRC error count
+ */
+#define RX_STA_CNT0			0x1700
+#define RX_STA_CNT0_CRC_ERR		FIELD32(0x0000ffff)
+#define RX_STA_CNT0_PHY_ERR		FIELD32(0xffff0000)
+
+/*
+ * RX_STA_CNT1: RX False CCA count & RX LONG frame count
+ */
+#define RX_STA_CNT1			0x1704
+#define RX_STA_CNT1_FALSE_CCA		FIELD32(0x0000ffff)
+#define RX_STA_CNT1_PLCP_ERR		FIELD32(0xffff0000)
+
+/*
+ * RX_STA_CNT2:
+ */
+#define RX_STA_CNT2			0x1708
+#define RX_STA_CNT2_RX_DUPLI_COUNT	FIELD32(0x0000ffff)
+#define RX_STA_CNT2_RX_FIFO_OVERFLOW	FIELD32(0xffff0000)
+
+/*
+ * TX_STA_CNT0: TX Beacon count
+ */
+#define TX_STA_CNT0			0x170c
+#define TX_STA_CNT0_TX_FAIL_COUNT	FIELD32(0x0000ffff)
+#define TX_STA_CNT0_TX_BEACON_COUNT	FIELD32(0xffff0000)
+
+/*
+ * TX_STA_CNT1: TX tx count
+ */
+#define TX_STA_CNT1			0x1710
+#define TX_STA_CNT1_TX_SUCCESS		FIELD32(0x0000ffff)
+#define TX_STA_CNT1_TX_RETRANSMIT	FIELD32(0xffff0000)
+
+/*
+ * TX_STA_CNT2: TX tx count
+ */
+#define TX_STA_CNT2			0x1714
+#define TX_STA_CNT2_TX_ZERO_LEN_COUNT	FIELD32(0x0000ffff)
+#define TX_STA_CNT2_TX_UNDER_FLOW_COUNT	FIELD32(0xffff0000)
+
+/*
+ * TX_STA_FIFO: TX Result for specific PID status fifo register
+ */
+#define TX_STA_FIFO			0x1718
+#define TX_STA_FIFO_VALID		FIELD32(0x00000001)
+#define TX_STA_FIFO_PID_TYPE		FIELD32(0x0000001e)
+#define TX_STA_FIFO_TX_SUCCESS		FIELD32(0x00000020)
+#define TX_STA_FIFO_TX_AGGRE		FIELD32(0x00000040)
+#define TX_STA_FIFO_TX_ACK_REQUIRED	FIELD32(0x00000080)
+#define TX_STA_FIFO_WCID		FIELD32(0x0000ff00)
+#define TX_STA_FIFO_SUCCESS_RATE	FIELD32(0xffff0000)
+#define TX_STA_FIFO_MCS			FIELD32(0x007f0000)
+#define TX_STA_FIFO_PHYMODE		FIELD32(0xc0000000)
+
+/*
+ * TX_AGG_CNT: Debug counter
+ */
+#define TX_AGG_CNT			0x171c
+#define TX_AGG_CNT_NON_AGG_TX_COUNT	FIELD32(0x0000ffff)
+#define TX_AGG_CNT_AGG_TX_COUNT		FIELD32(0xffff0000)
+
+/*
+ * TX_AGG_CNT0:
+ */
+#define TX_AGG_CNT0			0x1720
+#define TX_AGG_CNT0_AGG_SIZE_1_COUNT	FIELD32(0x0000ffff)
+#define TX_AGG_CNT0_AGG_SIZE_2_COUNT	FIELD32(0xffff0000)
+
+/*
+ * TX_AGG_CNT1:
+ */
+#define TX_AGG_CNT1			0x1724
+#define TX_AGG_CNT1_AGG_SIZE_3_COUNT	FIELD32(0x0000ffff)
+#define TX_AGG_CNT1_AGG_SIZE_4_COUNT	FIELD32(0xffff0000)
+
+/*
+ * TX_AGG_CNT2:
+ */
+#define TX_AGG_CNT2			0x1728
+#define TX_AGG_CNT2_AGG_SIZE_5_COUNT	FIELD32(0x0000ffff)
+#define TX_AGG_CNT2_AGG_SIZE_6_COUNT	FIELD32(0xffff0000)
+
+/*
+ * TX_AGG_CNT3:
+ */
+#define TX_AGG_CNT3			0x172c
+#define TX_AGG_CNT3_AGG_SIZE_7_COUNT	FIELD32(0x0000ffff)
+#define TX_AGG_CNT3_AGG_SIZE_8_COUNT	FIELD32(0xffff0000)
+
+/*
+ * TX_AGG_CNT4:
+ */
+#define TX_AGG_CNT4			0x1730
+#define TX_AGG_CNT4_AGG_SIZE_9_COUNT	FIELD32(0x0000ffff)
+#define TX_AGG_CNT4_AGG_SIZE_10_COUNT	FIELD32(0xffff0000)
+
+/*
+ * TX_AGG_CNT5:
+ */
+#define TX_AGG_CNT5			0x1734
+#define TX_AGG_CNT5_AGG_SIZE_11_COUNT	FIELD32(0x0000ffff)
+#define TX_AGG_CNT5_AGG_SIZE_12_COUNT	FIELD32(0xffff0000)
+
+/*
+ * TX_AGG_CNT6:
+ */
+#define TX_AGG_CNT6			0x1738
+#define TX_AGG_CNT6_AGG_SIZE_13_COUNT	FIELD32(0x0000ffff)
+#define TX_AGG_CNT6_AGG_SIZE_14_COUNT	FIELD32(0xffff0000)
+
+/*
+ * TX_AGG_CNT7:
+ */
+#define TX_AGG_CNT7			0x173c
+#define TX_AGG_CNT7_AGG_SIZE_15_COUNT	FIELD32(0x0000ffff)
+#define TX_AGG_CNT7_AGG_SIZE_16_COUNT	FIELD32(0xffff0000)
+
+/*
+ * MPDU_DENSITY_CNT:
+ * TX_ZERO_DEL: TX zero length delimiter count
+ * RX_ZERO_DEL: RX zero length delimiter count
+ */
+#define MPDU_DENSITY_CNT		0x1740
+#define MPDU_DENSITY_CNT_TX_ZERO_DEL	FIELD32(0x0000ffff)
+#define MPDU_DENSITY_CNT_RX_ZERO_DEL	FIELD32(0xffff0000)
+
+/*
+ * Security key table memory.
+ * MAC_WCID_BASE: 8-bytes (use only 6 bytes) * 256 entry
+ * PAIRWISE_KEY_TABLE_BASE: 32-byte * 256 entry
+ * MAC_IVEIV_TABLE_BASE: 8-byte * 256-entry
+ * MAC_WCID_ATTRIBUTE_BASE: 4-byte * 256-entry
+ * SHARED_KEY_TABLE_BASE: 32-byte * 16-entry
+ * SHARED_KEY_MODE_BASE: 4-byte * 16-entry
+ */
+#define MAC_WCID_BASE			0x1800
+#define PAIRWISE_KEY_TABLE_BASE		0x4000
+#define MAC_IVEIV_TABLE_BASE		0x6000
+#define MAC_WCID_ATTRIBUTE_BASE		0x6800
+#define SHARED_KEY_TABLE_BASE		0x6c00
+#define SHARED_KEY_MODE_BASE		0x7000
+
+#define MAC_WCID_ENTRY(__idx) \
+	( MAC_WCID_BASE + ((__idx) * sizeof(struct mac_wcid_entry)) )
+#define PAIRWISE_KEY_ENTRY(__idx) \
+	( PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
+#define MAC_IVEIV_ENTRY(__idx) \
+	( MAC_IVEIV_TABLE_BASE + ((__idx) & sizeof(struct mac_iveiv_entry)) )
+#define MAC_WCID_ATTR_ENTRY(__idx) \
+	( MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)) )
+#define SHARED_KEY_ENTRY(__idx) \
+	( SHARED_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
+#define SHARED_KEY_MODE_ENTRY(__idx) \
+	( SHARED_KEY_MODE_BASE + ((__idx) * sizeof(u32)) )
+
+struct mac_wcid_entry {
+	u8 mac[6];
+	u8 reserved[2];
+} __attribute__ ((packed));
+
+struct hw_key_entry {
+	u8 key[16];
+	u8 tx_mic[8];
+	u8 rx_mic[8];
+} __attribute__ ((packed));
+
+struct mac_iveiv_entry {
+	u8 iv[8];
+} __attribute__ ((packed));
+
+/*
+ * MAC_WCID_ATTRIBUTE:
+ */
+#define MAC_WCID_ATTRIBUTE_KEYTAB	FIELD32(0x00000001)
+#define MAC_WCID_ATTRIBUTE_CIPHER	FIELD32(0x0000000e)
+#define MAC_WCID_ATTRIBUTE_BSS_IDX	FIELD32(0x00000070)
+#define MAC_WCID_ATTRIBUTE_RX_WIUDF	FIELD32(0x00000380)
+
+/*
+ * SHARED_KEY_MODE:
+ */
+#define SHARED_KEY_MODE_BSS0_KEY0	FIELD32(0x00000007)
+#define SHARED_KEY_MODE_BSS0_KEY1	FIELD32(0x00000070)
+#define SHARED_KEY_MODE_BSS0_KEY2	FIELD32(0x00000700)
+#define SHARED_KEY_MODE_BSS0_KEY3	FIELD32(0x00007000)
+#define SHARED_KEY_MODE_BSS1_KEY0	FIELD32(0x00070000)
+#define SHARED_KEY_MODE_BSS1_KEY1	FIELD32(0x00700000)
+#define SHARED_KEY_MODE_BSS1_KEY2	FIELD32(0x07000000)
+#define SHARED_KEY_MODE_BSS1_KEY3	FIELD32(0x70000000)
+
+/*
+ * HOST-MCU communication
+ */
+
+/*
+ * H2M_MAILBOX_CSR: Host-to-MCU Mailbox.
+ */
+#define H2M_MAILBOX_CSR			0x7010
+#define H2M_MAILBOX_CSR_ARG0		FIELD32(0x000000ff)
+#define H2M_MAILBOX_CSR_ARG1		FIELD32(0x0000ff00)
+#define H2M_MAILBOX_CSR_CMD_TOKEN	FIELD32(0x00ff0000)
+#define H2M_MAILBOX_CSR_OWNER		FIELD32(0xff000000)
+
+/*
+ * H2M_MAILBOX_CID:
+ */
+#define H2M_MAILBOX_CID			0x7014
+#define H2M_MAILBOX_CID_CMD0		FIELD32(0x000000ff)
+#define H2M_MAILBOX_CID_CMD1		FIELD32(0x0000ff00)
+#define H2M_MAILBOX_CID_CMD2		FIELD32(0x00ff0000)
+#define H2M_MAILBOX_CID_CMD3		FIELD32(0xff000000)
+
+/*
+ * H2M_MAILBOX_STATUS:
+ */
+#define H2M_MAILBOX_STATUS		0x701c
+
+/*
+ * H2M_INT_SRC:
+ */
+#define H2M_INT_SRC			0x7024
+
+/*
+ * H2M_BBP_AGENT:
+ */
+#define H2M_BBP_AGENT			0x7028
+
+/*
+ * MCU_LEDCS: LED control for MCU Mailbox.
+ */
+#define MCU_LEDCS_LED_MODE		FIELD8(0x1f)
+#define MCU_LEDCS_POLARITY		FIELD8(0x01)
+
+/*
+ * HW_CS_CTS_BASE:
+ * Carrier-sense CTS frame base address.
+ * It's where mac stores carrier-sense frame for carrier-sense function.
+ */
+#define HW_CS_CTS_BASE			0x7700
+
+/*
+ * HW_DFS_CTS_BASE:
+ * DFS CTS frame base address. It's where mac stores CTS frame for DFS.
+ */
+#define HW_DFS_CTS_BASE			0x7780
+
+/*
+ * TXRX control registers - base address 0x3000
+ */
+
+/*
+ * TXRX_CSR1:
+ * rt2860b  UNKNOWN reg use R/O Reg Addr 0x77d0 first..
+ */
+#define TXRX_CSR1			0x77d0
+
+/*
+ * HW_DEBUG_SETTING_BASE:
+ * since NULL frame won't be that long (256 byte)
+ * We steal 16 tail bytes to save debugging settings
+ */
+#define HW_DEBUG_SETTING_BASE		0x77f0
+#define HW_DEBUG_SETTING_BASE2		0x7770
+
+/*
+ * HW_BEACON_BASE
+ * In order to support maximum 8 MBSS and its maximum length
+ * is 512 bytes for each beacon
+ * Three section discontinue memory segments will be used.
+ * 1. The original region for BCN 0~3
+ * 2. Extract memory from FCE table for BCN 4~5
+ * 3. Extract memory from Pair-wise key table for BCN 6~7
+ *    It occupied those memory of wcid 238~253 for BCN 6
+ *    and wcid 222~237 for BCN 7
+ *
+ * IMPORTANT NOTE: Not sure why legacy driver does this,
+ * but HW_BEACON_BASE7 is 0x0200 bytes below HW_BEACON_BASE6.
+ */
+#define HW_BEACON_BASE0			0x7800
+#define HW_BEACON_BASE1			0x7a00
+#define HW_BEACON_BASE2			0x7c00
+#define HW_BEACON_BASE3			0x7e00
+#define HW_BEACON_BASE4			0x7200
+#define HW_BEACON_BASE5			0x7400
+#define HW_BEACON_BASE6			0x5dc0
+#define HW_BEACON_BASE7			0x5bc0
+
+#define HW_BEACON_OFFSET(__index) \
+	( ((__index) < 4) ? ( HW_BEACON_BASE0 + (__index * 0x0200) ) : \
+	  (((__index) < 6) ? ( HW_BEACON_BASE4 + ((__index - 4) * 0x0200) ) : \
+	  (HW_BEACON_BASE6 - ((__index - 6) * 0x0200))) )
+
+/*
+ * BBP registers.
+ * The wordsize of the BBP is 8 bits.
+ */
+
+/*
+ * BBP 1: TX Antenna
+ */
+#define BBP1_TX_POWER			FIELD8(0x07)
+#define BBP1_TX_ANTENNA			FIELD8(0x18)
+
+/*
+ * BBP 3: RX Antenna
+ */
+#define BBP3_RX_ANTENNA			FIELD8(0x18)
+#define BBP3_HT40_PLUS			FIELD8(0x20)
+
+/*
+ * BBP 4: Bandwidth
+ */
+#define BBP4_TX_BF			FIELD8(0x01)
+#define BBP4_BANDWIDTH			FIELD8(0x18)
+
+/*
+ * RFCSR registers
+ * The wordsize of the RFCSR is 8 bits.
+ */
+
+/*
+ * RFCSR 6:
+ */
+#define RFCSR6_R			FIELD8(0x03)
+
+/*
+ * RFCSR 7:
+ */
+#define RFCSR7_RF_TUNING		FIELD8(0x01)
+
+/*
+ * RFCSR 12:
+ */
+#define RFCSR12_TX_POWER		FIELD8(0x1f)
+
+/*
+ * RFCSR 22:
+ */
+#define RFCSR22_BASEBAND_LOOPBACK	FIELD8(0x01)
+
+/*
+ * RFCSR 23:
+ */
+#define RFCSR23_FREQ_OFFSET		FIELD8(0x7f)
+
+/*
+ * RFCSR 30:
+ */
+#define RFCSR30_RF_CALIBRATION		FIELD8(0x80)
+
+/*
+ * RF registers
+ */
+
+/*
+ * RF 2
+ */
+#define RF2_ANTENNA_RX2			FIELD32(0x00000040)
+#define RF2_ANTENNA_TX1			FIELD32(0x00004000)
+#define RF2_ANTENNA_RX1			FIELD32(0x00020000)
+
+/*
+ * RF 3
+ */
+#define RF3_TXPOWER_G			FIELD32(0x00003e00)
+#define RF3_TXPOWER_A_7DBM_BOOST	FIELD32(0x00000200)
+#define RF3_TXPOWER_A			FIELD32(0x00003c00)
+
+/*
+ * RF 4
+ */
+#define RF4_TXPOWER_G			FIELD32(0x000007c0)
+#define RF4_TXPOWER_A_7DBM_BOOST	FIELD32(0x00000040)
+#define RF4_TXPOWER_A			FIELD32(0x00000780)
+#define RF4_FREQ_OFFSET			FIELD32(0x001f8000)
+#define RF4_HT40			FIELD32(0x00200000)
+
+/*
+ * EEPROM content.
+ * The wordsize of the EEPROM is 16 bits.
+ */
+
+/*
+ * EEPROM Version
+ */
+#define EEPROM_VERSION			0x0001
+#define EEPROM_VERSION_FAE		FIELD16(0x00ff)
+#define EEPROM_VERSION_VERSION		FIELD16(0xff00)
+
+/*
+ * HW MAC address.
+ */
+#define EEPROM_MAC_ADDR_0		0x0002
+#define EEPROM_MAC_ADDR_BYTE0		FIELD16(0x00ff)
+#define EEPROM_MAC_ADDR_BYTE1		FIELD16(0xff00)
+#define EEPROM_MAC_ADDR_1		0x0003
+#define EEPROM_MAC_ADDR_BYTE2		FIELD16(0x00ff)
+#define EEPROM_MAC_ADDR_BYTE3		FIELD16(0xff00)
+#define EEPROM_MAC_ADDR_2		0x0004
+#define EEPROM_MAC_ADDR_BYTE4		FIELD16(0x00ff)
+#define EEPROM_MAC_ADDR_BYTE5		FIELD16(0xff00)
+
+/*
+ * EEPROM ANTENNA config
+ * RXPATH: 1: 1R, 2: 2R, 3: 3R
+ * TXPATH: 1: 1T, 2: 2T
+ */
+#define	EEPROM_ANTENNA			0x001a
+#define EEPROM_ANTENNA_RXPATH		FIELD16(0x000f)
+#define EEPROM_ANTENNA_TXPATH		FIELD16(0x00f0)
+#define EEPROM_ANTENNA_RF_TYPE		FIELD16(0x0f00)
+
+/*
+ * EEPROM NIC config
+ * CARDBUS_ACCEL: 0 - enable, 1 - disable
+ */
+#define	EEPROM_NIC			0x001b
+#define EEPROM_NIC_HW_RADIO		FIELD16(0x0001)
+#define EEPROM_NIC_DYNAMIC_TX_AGC	FIELD16(0x0002)
+#define EEPROM_NIC_EXTERNAL_LNA_BG	FIELD16(0x0004)
+#define EEPROM_NIC_EXTERNAL_LNA_A	FIELD16(0x0008)
+#define EEPROM_NIC_CARDBUS_ACCEL	FIELD16(0x0010)
+#define EEPROM_NIC_BW40M_SB_BG		FIELD16(0x0020)
+#define EEPROM_NIC_BW40M_SB_A		FIELD16(0x0040)
+#define EEPROM_NIC_WPS_PBC		FIELD16(0x0080)
+#define EEPROM_NIC_BW40M_BG		FIELD16(0x0100)
+#define EEPROM_NIC_BW40M_A		FIELD16(0x0200)
+
+/*
+ * EEPROM frequency
+ */
+#define	EEPROM_FREQ			0x001d
+#define EEPROM_FREQ_OFFSET		FIELD16(0x00ff)
+#define EEPROM_FREQ_LED_MODE		FIELD16(0x7f00)
+#define EEPROM_FREQ_LED_POLARITY	FIELD16(0x1000)
+
+/*
+ * EEPROM LED
+ * POLARITY_RDY_G: Polarity RDY_G setting.
+ * POLARITY_RDY_A: Polarity RDY_A setting.
+ * POLARITY_ACT: Polarity ACT setting.
+ * POLARITY_GPIO_0: Polarity GPIO0 setting.
+ * POLARITY_GPIO_1: Polarity GPIO1 setting.
+ * POLARITY_GPIO_2: Polarity GPIO2 setting.
+ * POLARITY_GPIO_3: Polarity GPIO3 setting.
+ * POLARITY_GPIO_4: Polarity GPIO4 setting.
+ * LED_MODE: Led mode.
+ */
+#define EEPROM_LED1			0x001e
+#define EEPROM_LED2			0x001f
+#define EEPROM_LED3			0x0020
+#define EEPROM_LED_POLARITY_RDY_BG	FIELD16(0x0001)
+#define EEPROM_LED_POLARITY_RDY_A	FIELD16(0x0002)
+#define EEPROM_LED_POLARITY_ACT		FIELD16(0x0004)
+#define EEPROM_LED_POLARITY_GPIO_0	FIELD16(0x0008)
+#define EEPROM_LED_POLARITY_GPIO_1	FIELD16(0x0010)
+#define EEPROM_LED_POLARITY_GPIO_2	FIELD16(0x0020)
+#define EEPROM_LED_POLARITY_GPIO_3	FIELD16(0x0040)
+#define EEPROM_LED_POLARITY_GPIO_4	FIELD16(0x0080)
+#define EEPROM_LED_LED_MODE		FIELD16(0x1f00)
+
+/*
+ * EEPROM LNA
+ */
+#define EEPROM_LNA			0x0022
+#define EEPROM_LNA_BG			FIELD16(0x00ff)
+#define EEPROM_LNA_A0			FIELD16(0xff00)
+
+/*
+ * EEPROM RSSI BG offset
+ */
+#define EEPROM_RSSI_BG			0x0023
+#define EEPROM_RSSI_BG_OFFSET0		FIELD16(0x00ff)
+#define EEPROM_RSSI_BG_OFFSET1		FIELD16(0xff00)
+
+/*
+ * EEPROM RSSI BG2 offset
+ */
+#define EEPROM_RSSI_BG2			0x0024
+#define EEPROM_RSSI_BG2_OFFSET2		FIELD16(0x00ff)
+#define EEPROM_RSSI_BG2_LNA_A1		FIELD16(0xff00)
+
+/*
+ * EEPROM RSSI A offset
+ */
+#define EEPROM_RSSI_A			0x0025
+#define EEPROM_RSSI_A_OFFSET0		FIELD16(0x00ff)
+#define EEPROM_RSSI_A_OFFSET1		FIELD16(0xff00)
+
+/*
+ * EEPROM RSSI A2 offset
+ */
+#define EEPROM_RSSI_A2			0x0026
+#define EEPROM_RSSI_A2_OFFSET2		FIELD16(0x00ff)
+#define EEPROM_RSSI_A2_LNA_A2		FIELD16(0xff00)
+
+/*
+ * EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power.
+ *	This is delta in 40MHZ.
+ * VALUE: Tx Power dalta value (MAX=4)
+ * TYPE: 1: Plus the delta value, 0: minus the delta value
+ * TXPOWER: Enable:
+ */
+#define EEPROM_TXPOWER_DELTA		0x0028
+#define EEPROM_TXPOWER_DELTA_VALUE	FIELD16(0x003f)
+#define EEPROM_TXPOWER_DELTA_TYPE	FIELD16(0x0040)
+#define EEPROM_TXPOWER_DELTA_TXPOWER	FIELD16(0x0080)
+
+/*
+ * EEPROM TXPOWER 802.11BG
+ */
+#define	EEPROM_TXPOWER_BG1		0x0029
+#define	EEPROM_TXPOWER_BG2		0x0030
+#define EEPROM_TXPOWER_BG_SIZE		7
+#define EEPROM_TXPOWER_BG_1		FIELD16(0x00ff)
+#define EEPROM_TXPOWER_BG_2		FIELD16(0xff00)
+
+/*
+ * EEPROM TXPOWER 802.11A
+ */
+#define EEPROM_TXPOWER_A1		0x003c
+#define EEPROM_TXPOWER_A2		0x0053
+#define EEPROM_TXPOWER_A_SIZE		6
+#define EEPROM_TXPOWER_A_1		FIELD16(0x00ff)
+#define EEPROM_TXPOWER_A_2		FIELD16(0xff00)
+
+/*
+ * EEPROM TXpower byrate: 20MHZ power
+ */
+#define EEPROM_TXPOWER_BYRATE		0x006f
+
+/*
+ * EEPROM BBP.
+ */
+#define	EEPROM_BBP_START		0x0078
+#define EEPROM_BBP_SIZE			16
+#define EEPROM_BBP_VALUE		FIELD16(0x00ff)
+#define EEPROM_BBP_REG_ID		FIELD16(0xff00)
+
+/*
+ * MCU mailbox commands.
+ */
+#define MCU_SLEEP			0x30
+#define MCU_WAKEUP			0x31
+#define MCU_RADIO_OFF			0x35
+#define MCU_CURRENT			0x36
+#define MCU_LED				0x50
+#define MCU_LED_STRENGTH		0x51
+#define MCU_LED_1			0x52
+#define MCU_LED_2			0x53
+#define MCU_LED_3			0x54
+#define MCU_RADAR			0x60
+#define MCU_BOOT_SIGNAL			0x72
+#define MCU_BBP_SIGNAL			0x80
+#define MCU_POWER_SAVE			0x83
+
+/*
+ * MCU mailbox tokens
+ */
+#define TOKEN_WAKUP			3
+
+/*
+ * DMA descriptor defines.
+ */
+#define TXWI_DESC_SIZE			( 4 * sizeof(__le32) )
+#define RXWI_DESC_SIZE			( 4 * sizeof(__le32) )
+
+/*
+ * TX WI structure
+ */
+
+/*
+ * Word0
+ * FRAG: 1 To inform TKIP engine this is a fragment.
+ * MIMO_PS: The remote peer is in dynamic MIMO-PS mode
+ * TX_OP: 0:HT TXOP rule , 1:PIFS TX ,2:Backoff, 3:sifs
+ * BW: Channel bandwidth 20MHz or 40 MHz
+ * STBC: 1: STBC support MCS =0-7, 2,3 : RESERVED
+ */
+#define TXWI_W0_FRAG			FIELD32(0x00000001)
+#define TXWI_W0_MIMO_PS			FIELD32(0x00000002)
+#define TXWI_W0_CF_ACK			FIELD32(0x00000004)
+#define TXWI_W0_TS			FIELD32(0x00000008)
+#define TXWI_W0_AMPDU			FIELD32(0x00000010)
+#define TXWI_W0_MPDU_DENSITY		FIELD32(0x000000e0)
+#define TXWI_W0_TX_OP			FIELD32(0x00000300)
+#define TXWI_W0_MCS			FIELD32(0x007f0000)
+#define TXWI_W0_BW			FIELD32(0x00800000)
+#define TXWI_W0_SHORT_GI		FIELD32(0x01000000)
+#define TXWI_W0_STBC			FIELD32(0x06000000)
+#define TXWI_W0_IFS			FIELD32(0x08000000)
+#define TXWI_W0_PHYMODE			FIELD32(0xc0000000)
+
+/*
+ * Word1
+ */
+#define TXWI_W1_ACK			FIELD32(0x00000001)
+#define TXWI_W1_NSEQ			FIELD32(0x00000002)
+#define TXWI_W1_BW_WIN_SIZE		FIELD32(0x000000fc)
+#define TXWI_W1_WIRELESS_CLI_ID		FIELD32(0x0000ff00)
+#define TXWI_W1_MPDU_TOTAL_BYTE_COUNT	FIELD32(0x0fff0000)
+#define TXWI_W1_PACKETID		FIELD32(0xf0000000)
+
+/*
+ * Word2
+ */
+#define TXWI_W2_IV			FIELD32(0xffffffff)
+
+/*
+ * Word3
+ */
+#define TXWI_W3_EIV			FIELD32(0xffffffff)
+
+/*
+ * RX WI structure
+ */
+
+/*
+ * Word0
+ */
+#define RXWI_W0_WIRELESS_CLI_ID		FIELD32(0x000000ff)
+#define RXWI_W0_KEY_INDEX		FIELD32(0x00000300)
+#define RXWI_W0_BSSID			FIELD32(0x00001c00)
+#define RXWI_W0_UDF			FIELD32(0x0000e000)
+#define RXWI_W0_MPDU_TOTAL_BYTE_COUNT	FIELD32(0x0fff0000)
+#define RXWI_W0_TID			FIELD32(0xf0000000)
+
+/*
+ * Word1
+ */
+#define RXWI_W1_FRAG			FIELD32(0x0000000f)
+#define RXWI_W1_SEQUENCE		FIELD32(0x0000fff0)
+#define RXWI_W1_MCS			FIELD32(0x007f0000)
+#define RXWI_W1_BW			FIELD32(0x00800000)
+#define RXWI_W1_SHORT_GI		FIELD32(0x01000000)
+#define RXWI_W1_STBC			FIELD32(0x06000000)
+#define RXWI_W1_PHYMODE			FIELD32(0xc0000000)
+
+/*
+ * Word2
+ */
+#define RXWI_W2_RSSI0			FIELD32(0x000000ff)
+#define RXWI_W2_RSSI1			FIELD32(0x0000ff00)
+#define RXWI_W2_RSSI2			FIELD32(0x00ff0000)
+
+/*
+ * Word3
+ */
+#define RXWI_W3_SNR0			FIELD32(0x000000ff)
+#define RXWI_W3_SNR1			FIELD32(0x0000ff00)
+
+/*
+ * Macros for converting txpower from EEPROM to mac80211 value
+ * and from mac80211 value to register value.
+ */
+#define MIN_G_TXPOWER	0
+#define MIN_A_TXPOWER	-7
+#define MAX_G_TXPOWER	31
+#define MAX_A_TXPOWER	15
+#define DEFAULT_TXPOWER	5
+
+#define TXPOWER_G_FROM_DEV(__txpower) \
+	((__txpower) > MAX_G_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
+
+#define TXPOWER_G_TO_DEV(__txpower) \
+	clamp_t(char, __txpower, MIN_G_TXPOWER, MAX_G_TXPOWER)
+
+#define TXPOWER_A_FROM_DEV(__txpower) \
+	((__txpower) > MAX_A_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
+
+#define TXPOWER_A_TO_DEV(__txpower) \
+	clamp_t(char, __txpower, MIN_A_TXPOWER, MAX_A_TXPOWER)
+
+#endif /* RT2800_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
new file mode 100644
index 000000000000..eb1e1d00bec3
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -0,0 +1,2284 @@
+/*
+	Copyright (C) 2009 Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
+	Copyright (C) 2009 Gertjan van Wingerde <gwingerde@gmail.com>
+
+	Based on the original rt2800pci.c and rt2800usb.c.
+	  Copyright (C) 2009 Ivo van Doorn <IvDoorn@gmail.com>
+	  Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
+	  Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
+	  Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
+	  Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
+	  Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
+	  Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
+	  <http://rt2x00.serialmonkey.com>
+
+	This program is free software; you can redistribute it and/or modify
+	it under the terms of the GNU General Public License as published by
+	the Free Software Foundation; either version 2 of the License, or
+	(at your option) any later version.
+
+	This program is distributed in the hope that it will be useful,
+	but WITHOUT ANY WARRANTY; without even the implied warranty of
+	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+	GNU General Public License for more details.
+
+	You should have received a copy of the GNU General Public License
+	along with this program; if not, write to the
+	Free Software Foundation, Inc.,
+	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+	Module: rt2800lib
+	Abstract: rt2800 generic device routines.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "rt2x00.h"
+#ifdef CONFIG_RT2800USB
+#include "rt2x00usb.h"
+#endif
+#include "rt2800lib.h"
+#include "rt2800.h"
+#include "rt2800usb.h"
+
+MODULE_AUTHOR("Bartlomiej Zolnierkiewicz");
+MODULE_DESCRIPTION("rt2800 library");
+MODULE_LICENSE("GPL");
+
+/*
+ * Register access.
+ * All access to the CSR registers will go through the methods
+ * rt2800_register_read and rt2800_register_write.
+ * BBP and RF register require indirect register access,
+ * and use the CSR registers BBPCSR and RFCSR to achieve this.
+ * These indirect registers work with busy bits,
+ * and we will try maximal REGISTER_BUSY_COUNT times to access
+ * the register while taking a REGISTER_BUSY_DELAY us delay
+ * between each attampt. When the busy bit is still set at that time,
+ * the access attempt is considered to have failed,
+ * and we will print an error.
+ * The _lock versions must be used if you already hold the csr_mutex
+ */
+#define WAIT_FOR_BBP(__dev, __reg) \
+	rt2800_regbusy_read((__dev), BBP_CSR_CFG, BBP_CSR_CFG_BUSY, (__reg))
+#define WAIT_FOR_RFCSR(__dev, __reg) \
+	rt2800_regbusy_read((__dev), RF_CSR_CFG, RF_CSR_CFG_BUSY, (__reg))
+#define WAIT_FOR_RF(__dev, __reg) \
+	rt2800_regbusy_read((__dev), RF_CSR_CFG0, RF_CSR_CFG0_BUSY, (__reg))
+#define WAIT_FOR_MCU(__dev, __reg) \
+	rt2800_regbusy_read((__dev), H2M_MAILBOX_CSR, \
+			    H2M_MAILBOX_CSR_OWNER, (__reg))
+
+static void rt2800_bbp_write(struct rt2x00_dev *rt2x00dev,
+			     const unsigned int word, const u8 value)
+{
+	u32 reg;
+
+	mutex_lock(&rt2x00dev->csr_mutex);
+
+	/*
+	 * Wait until the BBP becomes available, afterwards we
+	 * can safely write the new data into the register.
+	 */
+	if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
+		reg = 0;
+		rt2x00_set_field32(&reg, BBP_CSR_CFG_VALUE, value);
+		rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
+		rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
+		rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 0);
+		if (rt2x00_intf_is_pci(rt2x00dev))
+			rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
+
+		rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
+	}
+
+	mutex_unlock(&rt2x00dev->csr_mutex);
+}
+
+static void rt2800_bbp_read(struct rt2x00_dev *rt2x00dev,
+			    const unsigned int word, u8 *value)
+{
+	u32 reg;
+
+	mutex_lock(&rt2x00dev->csr_mutex);
+
+	/*
+	 * Wait until the BBP becomes available, afterwards we
+	 * can safely write the read request into the register.
+	 * After the data has been written, we wait until hardware
+	 * returns the correct value, if at any time the register
+	 * doesn't become available in time, reg will be 0xffffffff
+	 * which means we return 0xff to the caller.
+	 */
+	if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
+		reg = 0;
+		rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
+		rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
+		rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 1);
+		if (rt2x00_intf_is_pci(rt2x00dev))
+			rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
+
+		rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
+
+		WAIT_FOR_BBP(rt2x00dev, &reg);
+	}
+
+	*value = rt2x00_get_field32(reg, BBP_CSR_CFG_VALUE);
+
+	mutex_unlock(&rt2x00dev->csr_mutex);
+}
+
+static void rt2800_rfcsr_write(struct rt2x00_dev *rt2x00dev,
+			       const unsigned int word, const u8 value)
+{
+	u32 reg;
+
+	mutex_lock(&rt2x00dev->csr_mutex);
+
+	/*
+	 * Wait until the RFCSR becomes available, afterwards we
+	 * can safely write the new data into the register.
+	 */
+	if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) {
+		reg = 0;
+		rt2x00_set_field32(&reg, RF_CSR_CFG_DATA, value);
+		rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word);
+		rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 1);
+		rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1);
+
+		rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg);
+	}
+
+	mutex_unlock(&rt2x00dev->csr_mutex);
+}
+
+static void rt2800_rfcsr_read(struct rt2x00_dev *rt2x00dev,
+			      const unsigned int word, u8 *value)
+{
+	u32 reg;
+
+	mutex_lock(&rt2x00dev->csr_mutex);
+
+	/*
+	 * Wait until the RFCSR becomes available, afterwards we
+	 * can safely write the read request into the register.
+	 * After the data has been written, we wait until hardware
+	 * returns the correct value, if at any time the register
+	 * doesn't become available in time, reg will be 0xffffffff
+	 * which means we return 0xff to the caller.
+	 */
+	if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) {
+		reg = 0;
+		rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word);
+		rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 0);
+		rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1);
+
+		rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg);
+
+		WAIT_FOR_RFCSR(rt2x00dev, &reg);
+	}
+
+	*value = rt2x00_get_field32(reg, RF_CSR_CFG_DATA);
+
+	mutex_unlock(&rt2x00dev->csr_mutex);
+}
+
+static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev,
+			    const unsigned int word, const u32 value)
+{
+	u32 reg;
+
+	mutex_lock(&rt2x00dev->csr_mutex);
+
+	/*
+	 * Wait until the RF becomes available, afterwards we
+	 * can safely write the new data into the register.
+	 */
+	if (WAIT_FOR_RF(rt2x00dev, &reg)) {
+		reg = 0;
+		rt2x00_set_field32(&reg, RF_CSR_CFG0_REG_VALUE_BW, value);
+		rt2x00_set_field32(&reg, RF_CSR_CFG0_STANDBYMODE, 0);
+		rt2x00_set_field32(&reg, RF_CSR_CFG0_SEL, 0);
+		rt2x00_set_field32(&reg, RF_CSR_CFG0_BUSY, 1);
+
+		rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG0, reg);
+		rt2x00_rf_write(rt2x00dev, word, value);
+	}
+
+	mutex_unlock(&rt2x00dev->csr_mutex);
+}
+
+void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
+			const u8 command, const u8 token,
+			const u8 arg0, const u8 arg1)
+{
+	u32 reg;
+
+	/*
+	 * RT2880 and RT3052 don't support MCU requests.
+	 */
+	if (rt2x00_rt(&rt2x00dev->chip, RT2880) ||
+	    rt2x00_rt(&rt2x00dev->chip, RT3052))
+		return;
+
+	mutex_lock(&rt2x00dev->csr_mutex);
+
+	/*
+	 * Wait until the MCU becomes available, afterwards we
+	 * can safely write the new data into the register.
+	 */
+	if (WAIT_FOR_MCU(rt2x00dev, &reg)) {
+		rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_OWNER, 1);
+		rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_CMD_TOKEN, token);
+		rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG0, arg0);
+		rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG1, arg1);
+		rt2800_register_write_lock(rt2x00dev, H2M_MAILBOX_CSR, reg);
+
+		reg = 0;
+		rt2x00_set_field32(&reg, HOST_CMD_CSR_HOST_COMMAND, command);
+		rt2800_register_write_lock(rt2x00dev, HOST_CMD_CSR, reg);
+	}
+
+	mutex_unlock(&rt2x00dev->csr_mutex);
+}
+EXPORT_SYMBOL_GPL(rt2800_mcu_request);
+
+#ifdef CONFIG_RT2X00_LIB_DEBUGFS
+const struct rt2x00debug rt2800_rt2x00debug = {
+	.owner	= THIS_MODULE,
+	.csr	= {
+		.read		= rt2800_register_read,
+		.write		= rt2800_register_write,
+		.flags		= RT2X00DEBUGFS_OFFSET,
+		.word_base	= CSR_REG_BASE,
+		.word_size	= sizeof(u32),
+		.word_count	= CSR_REG_SIZE / sizeof(u32),
+	},
+	.eeprom	= {
+		.read		= rt2x00_eeprom_read,
+		.write		= rt2x00_eeprom_write,
+		.word_base	= EEPROM_BASE,
+		.word_size	= sizeof(u16),
+		.word_count	= EEPROM_SIZE / sizeof(u16),
+	},
+	.bbp	= {
+		.read		= rt2800_bbp_read,
+		.write		= rt2800_bbp_write,
+		.word_base	= BBP_BASE,
+		.word_size	= sizeof(u8),
+		.word_count	= BBP_SIZE / sizeof(u8),
+	},
+	.rf	= {
+		.read		= rt2x00_rf_read,
+		.write		= rt2800_rf_write,
+		.word_base	= RF_BASE,
+		.word_size	= sizeof(u32),
+		.word_count	= RF_SIZE / sizeof(u32),
+	},
+};
+EXPORT_SYMBOL_GPL(rt2800_rt2x00debug);
+#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
+
+int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev)
+{
+	u32 reg;
+
+	rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
+	return rt2x00_get_field32(reg, GPIO_CTRL_CFG_BIT2);
+}
+EXPORT_SYMBOL_GPL(rt2800_rfkill_poll);
+
+#ifdef CONFIG_RT2X00_LIB_LEDS
+static void rt2800_brightness_set(struct led_classdev *led_cdev,
+				  enum led_brightness brightness)
+{
+	struct rt2x00_led *led =
+	    container_of(led_cdev, struct rt2x00_led, led_dev);
+	unsigned int enabled = brightness != LED_OFF;
+	unsigned int bg_mode =
+	    (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
+	unsigned int polarity =
+		rt2x00_get_field16(led->rt2x00dev->led_mcu_reg,
+				   EEPROM_FREQ_LED_POLARITY);
+	unsigned int ledmode =
+		rt2x00_get_field16(led->rt2x00dev->led_mcu_reg,
+				   EEPROM_FREQ_LED_MODE);
+
+	if (led->type == LED_TYPE_RADIO) {
+		rt2800_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode,
+				      enabled ? 0x20 : 0);
+	} else if (led->type == LED_TYPE_ASSOC) {
+		rt2800_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode,
+				      enabled ? (bg_mode ? 0x60 : 0xa0) : 0x20);
+	} else if (led->type == LED_TYPE_QUALITY) {
+		/*
+		 * The brightness is divided into 6 levels (0 - 5),
+		 * The specs tell us the following levels:
+		 *	0, 1 ,3, 7, 15, 31
+		 * to determine the level in a simple way we can simply
+		 * work with bitshifting:
+		 *	(1 << level) - 1
+		 */
+		rt2800_mcu_request(led->rt2x00dev, MCU_LED_STRENGTH, 0xff,
+				      (1 << brightness / (LED_FULL / 6)) - 1,
+				      polarity);
+	}
+}
+
+static int rt2800_blink_set(struct led_classdev *led_cdev,
+			    unsigned long *delay_on, unsigned long *delay_off)
+{
+	struct rt2x00_led *led =
+	    container_of(led_cdev, struct rt2x00_led, led_dev);
+	u32 reg;
+
+	rt2800_register_read(led->rt2x00dev, LED_CFG, &reg);
+	rt2x00_set_field32(&reg, LED_CFG_ON_PERIOD, *delay_on);
+	rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, *delay_off);
+	rt2x00_set_field32(&reg, LED_CFG_SLOW_BLINK_PERIOD, 3);
+	rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, 3);
+	rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 12);
+	rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, 3);
+	rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, 1);
+	rt2800_register_write(led->rt2x00dev, LED_CFG, reg);
+
+	return 0;
+}
+
+void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
+		     struct rt2x00_led *led, enum led_type type)
+{
+	led->rt2x00dev = rt2x00dev;
+	led->type = type;
+	led->led_dev.brightness_set = rt2800_brightness_set;
+	led->led_dev.blink_set = rt2800_blink_set;
+	led->flags = LED_INITIALIZED;
+}
+EXPORT_SYMBOL_GPL(rt2800_init_led);
+#endif /* CONFIG_RT2X00_LIB_LEDS */
+
+/*
+ * Configuration handlers.
+ */
+static void rt2800_config_wcid_attr(struct rt2x00_dev *rt2x00dev,
+				    struct rt2x00lib_crypto *crypto,
+				    struct ieee80211_key_conf *key)
+{
+	struct mac_wcid_entry wcid_entry;
+	struct mac_iveiv_entry iveiv_entry;
+	u32 offset;
+	u32 reg;
+
+	offset = MAC_WCID_ATTR_ENTRY(key->hw_key_idx);
+
+	rt2800_register_read(rt2x00dev, offset, &reg);
+	rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_KEYTAB,
+			   !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
+	rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER,
+			   (crypto->cmd == SET_KEY) * crypto->cipher);
+	rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_BSS_IDX,
+			   (crypto->cmd == SET_KEY) * crypto->bssidx);
+	rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_RX_WIUDF, crypto->cipher);
+	rt2800_register_write(rt2x00dev, offset, reg);
+
+	offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
+
+	memset(&iveiv_entry, 0, sizeof(iveiv_entry));
+	if ((crypto->cipher == CIPHER_TKIP) ||
+	    (crypto->cipher == CIPHER_TKIP_NO_MIC) ||
+	    (crypto->cipher == CIPHER_AES))
+		iveiv_entry.iv[3] |= 0x20;
+	iveiv_entry.iv[3] |= key->keyidx << 6;
+	rt2800_register_multiwrite(rt2x00dev, offset,
+				      &iveiv_entry, sizeof(iveiv_entry));
+
+	offset = MAC_WCID_ENTRY(key->hw_key_idx);
+
+	memset(&wcid_entry, 0, sizeof(wcid_entry));
+	if (crypto->cmd == SET_KEY)
+		memcpy(&wcid_entry, crypto->address, ETH_ALEN);
+	rt2800_register_multiwrite(rt2x00dev, offset,
+				      &wcid_entry, sizeof(wcid_entry));
+}
+
+int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev,
+			     struct rt2x00lib_crypto *crypto,
+			     struct ieee80211_key_conf *key)
+{
+	struct hw_key_entry key_entry;
+	struct rt2x00_field32 field;
+	u32 offset;
+	u32 reg;
+
+	if (crypto->cmd == SET_KEY) {
+		key->hw_key_idx = (4 * crypto->bssidx) + key->keyidx;
+
+		memcpy(key_entry.key, crypto->key,
+		       sizeof(key_entry.key));
+		memcpy(key_entry.tx_mic, crypto->tx_mic,
+		       sizeof(key_entry.tx_mic));
+		memcpy(key_entry.rx_mic, crypto->rx_mic,
+		       sizeof(key_entry.rx_mic));
+
+		offset = SHARED_KEY_ENTRY(key->hw_key_idx);
+		rt2800_register_multiwrite(rt2x00dev, offset,
+					      &key_entry, sizeof(key_entry));
+	}
+
+	/*
+	 * The cipher types are stored over multiple registers
+	 * starting with SHARED_KEY_MODE_BASE each word will have
+	 * 32 bits and contains the cipher types for 2 bssidx each.
+	 * Using the correct defines correctly will cause overhead,
+	 * so just calculate the correct offset.
+	 */
+	field.bit_offset = 4 * (key->hw_key_idx % 8);
+	field.bit_mask = 0x7 << field.bit_offset;
+
+	offset = SHARED_KEY_MODE_ENTRY(key->hw_key_idx / 8);
+
+	rt2800_register_read(rt2x00dev, offset, &reg);
+	rt2x00_set_field32(&reg, field,
+			   (crypto->cmd == SET_KEY) * crypto->cipher);
+	rt2800_register_write(rt2x00dev, offset, reg);
+
+	/*
+	 * Update WCID information
+	 */
+	rt2800_config_wcid_attr(rt2x00dev, crypto, key);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rt2800_config_shared_key);
+
+int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
+			       struct rt2x00lib_crypto *crypto,
+			       struct ieee80211_key_conf *key)
+{
+	struct hw_key_entry key_entry;
+	u32 offset;
+
+	if (crypto->cmd == SET_KEY) {
+		/*
+		 * 1 pairwise key is possible per AID, this means that the AID
+		 * equals our hw_key_idx. Make sure the WCID starts _after_ the
+		 * last possible shared key entry.
+		 */
+		if (crypto->aid > (256 - 32))
+			return -ENOSPC;
+
+		key->hw_key_idx = 32 + crypto->aid;
+
+		memcpy(key_entry.key, crypto->key,
+		       sizeof(key_entry.key));
+		memcpy(key_entry.tx_mic, crypto->tx_mic,
+		       sizeof(key_entry.tx_mic));
+		memcpy(key_entry.rx_mic, crypto->rx_mic,
+		       sizeof(key_entry.rx_mic));
+
+		offset = PAIRWISE_KEY_ENTRY(key->hw_key_idx);
+		rt2800_register_multiwrite(rt2x00dev, offset,
+					      &key_entry, sizeof(key_entry));
+	}
+
+	/*
+	 * Update WCID information
+	 */
+	rt2800_config_wcid_attr(rt2x00dev, crypto, key);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rt2800_config_pairwise_key);
+
+void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
+			  const unsigned int filter_flags)
+{
+	u32 reg;
+
+	/*
+	 * Start configuration steps.
+	 * Note that the version error will always be dropped
+	 * and broadcast frames will always be accepted since
+	 * there is no filter for it at this time.
+	 */
+	rt2800_register_read(rt2x00dev, RX_FILTER_CFG, &reg);
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CRC_ERROR,
+			   !(filter_flags & FIF_FCSFAIL));
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PHY_ERROR,
+			   !(filter_flags & FIF_PLCPFAIL));
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME,
+			   !(filter_flags & FIF_PROMISC_IN_BSS));
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_MY_BSSD, 0);
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_VER_ERROR, 1);
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_MULTICAST,
+			   !(filter_flags & FIF_ALLMULTI));
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BROADCAST, 0);
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_DUPLICATE, 1);
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CF_END_ACK,
+			   !(filter_flags & FIF_CONTROL));
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CF_END,
+			   !(filter_flags & FIF_CONTROL));
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_ACK,
+			   !(filter_flags & FIF_CONTROL));
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CTS,
+			   !(filter_flags & FIF_CONTROL));
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_RTS,
+			   !(filter_flags & FIF_CONTROL));
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PSPOLL,
+			   !(filter_flags & FIF_PSPOLL));
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA, 1);
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR, 0);
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CNTL,
+			   !(filter_flags & FIF_CONTROL));
+	rt2800_register_write(rt2x00dev, RX_FILTER_CFG, reg);
+}
+EXPORT_SYMBOL_GPL(rt2800_config_filter);
+
+void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
+			struct rt2x00intf_conf *conf, const unsigned int flags)
+{
+	unsigned int beacon_base;
+	u32 reg;
+
+	if (flags & CONFIG_UPDATE_TYPE) {
+		/*
+		 * Clear current synchronisation setup.
+		 * For the Beacon base registers we only need to clear
+		 * the first byte since that byte contains the VALID and OWNER
+		 * bits which (when set to 0) will invalidate the entire beacon.
+		 */
+		beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
+		rt2800_register_write(rt2x00dev, beacon_base, 0);
+
+		/*
+		 * Enable synchronisation.
+		 */
+		rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+		rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
+		rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, conf->sync);
+		rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE,
+				   (conf->sync == TSF_SYNC_BEACON));
+		rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+	}
+
+	if (flags & CONFIG_UPDATE_MAC) {
+		reg = le32_to_cpu(conf->mac[1]);
+		rt2x00_set_field32(&reg, MAC_ADDR_DW1_UNICAST_TO_ME_MASK, 0xff);
+		conf->mac[1] = cpu_to_le32(reg);
+
+		rt2800_register_multiwrite(rt2x00dev, MAC_ADDR_DW0,
+					      conf->mac, sizeof(conf->mac));
+	}
+
+	if (flags & CONFIG_UPDATE_BSSID) {
+		reg = le32_to_cpu(conf->bssid[1]);
+		rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 0);
+		rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_BCN_NUM, 0);
+		conf->bssid[1] = cpu_to_le32(reg);
+
+		rt2800_register_multiwrite(rt2x00dev, MAC_BSSID_DW0,
+					      conf->bssid, sizeof(conf->bssid));
+	}
+}
+EXPORT_SYMBOL_GPL(rt2800_config_intf);
+
+void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp)
+{
+	u32 reg;
+
+	rt2800_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
+	rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_RX_ACK_TIMEOUT, 0x20);
+	rt2800_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
+
+	rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
+	rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY,
+			   !!erp->short_preamble);
+	rt2x00_set_field32(&reg, AUTO_RSP_CFG_AR_PREAMBLE,
+			   !!erp->short_preamble);
+	rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
+
+	rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
+	rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL,
+			   erp->cts_protection ? 2 : 0);
+	rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
+
+	rt2800_register_write(rt2x00dev, LEGACY_BASIC_RATE,
+				 erp->basic_rates);
+	rt2800_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003);
+
+	rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG, &reg);
+	rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_SLOT_TIME, erp->slot_time);
+	rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2);
+	rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
+
+	rt2800_register_read(rt2x00dev, XIFS_TIME_CFG, &reg);
+	rt2x00_set_field32(&reg, XIFS_TIME_CFG_CCKM_SIFS_TIME, erp->sifs);
+	rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_SIFS_TIME, erp->sifs);
+	rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_XIFS_TIME, 4);
+	rt2x00_set_field32(&reg, XIFS_TIME_CFG_EIFS, erp->eifs);
+	rt2x00_set_field32(&reg, XIFS_TIME_CFG_BB_RXEND_ENABLE, 1);
+	rt2800_register_write(rt2x00dev, XIFS_TIME_CFG, reg);
+
+	rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+	rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
+			   erp->beacon_int * 16);
+	rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+}
+EXPORT_SYMBOL_GPL(rt2800_config_erp);
+
+void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
+{
+	u8 r1;
+	u8 r3;
+
+	rt2800_bbp_read(rt2x00dev, 1, &r1);
+	rt2800_bbp_read(rt2x00dev, 3, &r3);
+
+	/*
+	 * Configure the TX antenna.
+	 */
+	switch ((int)ant->tx) {
+	case 1:
+		rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
+		if (rt2x00_intf_is_pci(rt2x00dev))
+			rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
+		break;
+	case 2:
+		rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
+		break;
+	case 3:
+		/* Do nothing */
+		break;
+	}
+
+	/*
+	 * Configure the RX antenna.
+	 */
+	switch ((int)ant->rx) {
+	case 1:
+		rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
+		break;
+	case 2:
+		rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 1);
+		break;
+	case 3:
+		rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 2);
+		break;
+	}
+
+	rt2800_bbp_write(rt2x00dev, 3, r3);
+	rt2800_bbp_write(rt2x00dev, 1, r1);
+}
+EXPORT_SYMBOL_GPL(rt2800_config_ant);
+
+static void rt2800_config_lna_gain(struct rt2x00_dev *rt2x00dev,
+				   struct rt2x00lib_conf *libconf)
+{
+	u16 eeprom;
+	short lna_gain;
+
+	if (libconf->rf.channel <= 14) {
+		rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
+		lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_BG);
+	} else if (libconf->rf.channel <= 64) {
+		rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
+		lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_A0);
+	} else if (libconf->rf.channel <= 128) {
+		rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
+		lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_LNA_A1);
+	} else {
+		rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
+		lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_LNA_A2);
+	}
+
+	rt2x00dev->lna_gain = lna_gain;
+}
+
+static void rt2800_config_channel_rt2x(struct rt2x00_dev *rt2x00dev,
+				       struct ieee80211_conf *conf,
+				       struct rf_channel *rf,
+				       struct channel_info *info)
+{
+	rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
+
+	if (rt2x00dev->default_ant.tx == 1)
+		rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_TX1, 1);
+
+	if (rt2x00dev->default_ant.rx == 1) {
+		rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX1, 1);
+		rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
+	} else if (rt2x00dev->default_ant.rx == 2)
+		rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
+
+	if (rf->channel > 14) {
+		/*
+		 * When TX power is below 0, we should increase it by 7 to
+		 * make it a positive value (Minumum value is -7).
+		 * However this means that values between 0 and 7 have
+		 * double meaning, and we should set a 7DBm boost flag.
+		 */
+		rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A_7DBM_BOOST,
+				   (info->tx_power1 >= 0));
+
+		if (info->tx_power1 < 0)
+			info->tx_power1 += 7;
+
+		rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A,
+				   TXPOWER_A_TO_DEV(info->tx_power1));
+
+		rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A_7DBM_BOOST,
+				   (info->tx_power2 >= 0));
+
+		if (info->tx_power2 < 0)
+			info->tx_power2 += 7;
+
+		rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A,
+				   TXPOWER_A_TO_DEV(info->tx_power2));
+	} else {
+		rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_G,
+				   TXPOWER_G_TO_DEV(info->tx_power1));
+		rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_G,
+				   TXPOWER_G_TO_DEV(info->tx_power2));
+	}
+
+	rt2x00_set_field32(&rf->rf4, RF4_HT40, conf_is_ht40(conf));
+
+	rt2800_rf_write(rt2x00dev, 1, rf->rf1);
+	rt2800_rf_write(rt2x00dev, 2, rf->rf2);
+	rt2800_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
+	rt2800_rf_write(rt2x00dev, 4, rf->rf4);
+
+	udelay(200);
+
+	rt2800_rf_write(rt2x00dev, 1, rf->rf1);
+	rt2800_rf_write(rt2x00dev, 2, rf->rf2);
+	rt2800_rf_write(rt2x00dev, 3, rf->rf3 | 0x00000004);
+	rt2800_rf_write(rt2x00dev, 4, rf->rf4);
+
+	udelay(200);
+
+	rt2800_rf_write(rt2x00dev, 1, rf->rf1);
+	rt2800_rf_write(rt2x00dev, 2, rf->rf2);
+	rt2800_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
+	rt2800_rf_write(rt2x00dev, 4, rf->rf4);
+}
+
+static void rt2800_config_channel_rt3x(struct rt2x00_dev *rt2x00dev,
+				       struct ieee80211_conf *conf,
+				       struct rf_channel *rf,
+				       struct channel_info *info)
+{
+	u8 rfcsr;
+
+	rt2800_rfcsr_write(rt2x00dev, 2, rf->rf1);
+	rt2800_rfcsr_write(rt2x00dev, 3, rf->rf3);
+
+	rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
+	rt2x00_set_field8(&rfcsr, RFCSR6_R, rf->rf2);
+	rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
+
+	rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr);
+	rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER,
+			  TXPOWER_G_TO_DEV(info->tx_power1));
+	rt2800_rfcsr_write(rt2x00dev, 12, rfcsr);
+
+	rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
+	rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
+	rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
+
+	rt2800_rfcsr_write(rt2x00dev, 24,
+			      rt2x00dev->calibration[conf_is_ht40(conf)]);
+
+	rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
+	rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
+	rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
+}
+
+static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
+				  struct ieee80211_conf *conf,
+				  struct rf_channel *rf,
+				  struct channel_info *info)
+{
+	u32 reg;
+	unsigned int tx_pin;
+	u8 bbp;
+
+	if ((rt2x00_rt(&rt2x00dev->chip, RT3070) ||
+	     rt2x00_rt(&rt2x00dev->chip, RT3090)) &&
+	    (rt2x00_rf(&rt2x00dev->chip, RF2020) ||
+	     rt2x00_rf(&rt2x00dev->chip, RF3020) ||
+	     rt2x00_rf(&rt2x00dev->chip, RF3021) ||
+	     rt2x00_rf(&rt2x00dev->chip, RF3022)))
+		rt2800_config_channel_rt3x(rt2x00dev, conf, rf, info);
+	else
+		rt2800_config_channel_rt2x(rt2x00dev, conf, rf, info);
+
+	/*
+	 * Change BBP settings
+	 */
+	rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
+	rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
+	rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
+	rt2800_bbp_write(rt2x00dev, 86, 0);
+
+	if (rf->channel <= 14) {
+		if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) {
+			rt2800_bbp_write(rt2x00dev, 82, 0x62);
+			rt2800_bbp_write(rt2x00dev, 75, 0x46);
+		} else {
+			rt2800_bbp_write(rt2x00dev, 82, 0x84);
+			rt2800_bbp_write(rt2x00dev, 75, 0x50);
+		}
+	} else {
+		rt2800_bbp_write(rt2x00dev, 82, 0xf2);
+
+		if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags))
+			rt2800_bbp_write(rt2x00dev, 75, 0x46);
+		else
+			rt2800_bbp_write(rt2x00dev, 75, 0x50);
+	}
+
+	rt2800_register_read(rt2x00dev, TX_BAND_CFG, &reg);
+	rt2x00_set_field32(&reg, TX_BAND_CFG_HT40_PLUS, conf_is_ht40_plus(conf));
+	rt2x00_set_field32(&reg, TX_BAND_CFG_A, rf->channel > 14);
+	rt2x00_set_field32(&reg, TX_BAND_CFG_BG, rf->channel <= 14);
+	rt2800_register_write(rt2x00dev, TX_BAND_CFG, reg);
+
+	tx_pin = 0;
+
+	/* Turn on unused PA or LNA when not using 1T or 1R */
+	if (rt2x00dev->default_ant.tx != 1) {
+		rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, 1);
+		rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, 1);
+	}
+
+	/* Turn on unused PA or LNA when not using 1T or 1R */
+	if (rt2x00dev->default_ant.rx != 1) {
+		rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
+		rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
+	}
+
+	rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
+	rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
+	rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFTR_EN, 1);
+	rt2x00_set_field32(&tx_pin, TX_PIN_CFG_TRSW_EN, 1);
+	rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, rf->channel <= 14);
+	rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN, rf->channel > 14);
+
+	rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
+
+	rt2800_bbp_read(rt2x00dev, 4, &bbp);
+	rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * conf_is_ht40(conf));
+	rt2800_bbp_write(rt2x00dev, 4, bbp);
+
+	rt2800_bbp_read(rt2x00dev, 3, &bbp);
+	rt2x00_set_field8(&bbp, BBP3_HT40_PLUS, conf_is_ht40_plus(conf));
+	rt2800_bbp_write(rt2x00dev, 3, bbp);
+
+	if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) {
+		if (conf_is_ht40(conf)) {
+			rt2800_bbp_write(rt2x00dev, 69, 0x1a);
+			rt2800_bbp_write(rt2x00dev, 70, 0x0a);
+			rt2800_bbp_write(rt2x00dev, 73, 0x16);
+		} else {
+			rt2800_bbp_write(rt2x00dev, 69, 0x16);
+			rt2800_bbp_write(rt2x00dev, 70, 0x08);
+			rt2800_bbp_write(rt2x00dev, 73, 0x11);
+		}
+	}
+
+	msleep(1);
+}
+
+static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
+				  const int txpower)
+{
+	u32 reg;
+	u32 value = TXPOWER_G_TO_DEV(txpower);
+	u8 r1;
+
+	rt2800_bbp_read(rt2x00dev, 1, &r1);
+	rt2x00_set_field8(&reg, BBP1_TX_POWER, 0);
+	rt2800_bbp_write(rt2x00dev, 1, r1);
+
+	rt2800_register_read(rt2x00dev, TX_PWR_CFG_0, &reg);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_0_1MBS, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_0_2MBS, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_0_55MBS, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_0_11MBS, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_0_6MBS, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_0_9MBS, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_0_12MBS, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_0_18MBS, value);
+	rt2800_register_write(rt2x00dev, TX_PWR_CFG_0, reg);
+
+	rt2800_register_read(rt2x00dev, TX_PWR_CFG_1, &reg);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_1_24MBS, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_1_36MBS, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_1_48MBS, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_1_54MBS, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS0, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS1, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS2, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS3, value);
+	rt2800_register_write(rt2x00dev, TX_PWR_CFG_1, reg);
+
+	rt2800_register_read(rt2x00dev, TX_PWR_CFG_2, &reg);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS4, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS5, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS6, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS7, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS8, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS9, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS10, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS11, value);
+	rt2800_register_write(rt2x00dev, TX_PWR_CFG_2, reg);
+
+	rt2800_register_read(rt2x00dev, TX_PWR_CFG_3, &reg);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS12, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS13, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS14, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS15, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN1, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN2, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN3, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN4, value);
+	rt2800_register_write(rt2x00dev, TX_PWR_CFG_3, reg);
+
+	rt2800_register_read(rt2x00dev, TX_PWR_CFG_4, &reg);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN5, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN6, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN7, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN8, value);
+	rt2800_register_write(rt2x00dev, TX_PWR_CFG_4, reg);
+}
+
+static void rt2800_config_retry_limit(struct rt2x00_dev *rt2x00dev,
+				      struct rt2x00lib_conf *libconf)
+{
+	u32 reg;
+
+	rt2800_register_read(rt2x00dev, TX_RTY_CFG, &reg);
+	rt2x00_set_field32(&reg, TX_RTY_CFG_SHORT_RTY_LIMIT,
+			   libconf->conf->short_frame_max_tx_count);
+	rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_LIMIT,
+			   libconf->conf->long_frame_max_tx_count);
+	rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_THRE, 2000);
+	rt2x00_set_field32(&reg, TX_RTY_CFG_NON_AGG_RTY_MODE, 0);
+	rt2x00_set_field32(&reg, TX_RTY_CFG_AGG_RTY_MODE, 0);
+	rt2x00_set_field32(&reg, TX_RTY_CFG_TX_AUTO_FB_ENABLE, 1);
+	rt2800_register_write(rt2x00dev, TX_RTY_CFG, reg);
+}
+
+static void rt2800_config_ps(struct rt2x00_dev *rt2x00dev,
+			     struct rt2x00lib_conf *libconf)
+{
+	enum dev_state state =
+	    (libconf->conf->flags & IEEE80211_CONF_PS) ?
+		STATE_SLEEP : STATE_AWAKE;
+	u32 reg;
+
+	if (state == STATE_SLEEP) {
+		rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0);
+
+		rt2800_register_read(rt2x00dev, AUTOWAKEUP_CFG, &reg);
+		rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 5);
+		rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE,
+				   libconf->conf->listen_interval - 1);
+		rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTOWAKE, 1);
+		rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg);
+
+		rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
+	} else {
+		rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
+
+		rt2800_register_read(rt2x00dev, AUTOWAKEUP_CFG, &reg);
+		rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 0);
+		rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE, 0);
+		rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTOWAKE, 0);
+		rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg);
+	}
+}
+
+void rt2800_config(struct rt2x00_dev *rt2x00dev,
+		   struct rt2x00lib_conf *libconf,
+		   const unsigned int flags)
+{
+	/* Always recalculate LNA gain before changing configuration */
+	rt2800_config_lna_gain(rt2x00dev, libconf);
+
+	if (flags & IEEE80211_CONF_CHANGE_CHANNEL)
+		rt2800_config_channel(rt2x00dev, libconf->conf,
+				      &libconf->rf, &libconf->channel);
+	if (flags & IEEE80211_CONF_CHANGE_POWER)
+		rt2800_config_txpower(rt2x00dev, libconf->conf->power_level);
+	if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
+		rt2800_config_retry_limit(rt2x00dev, libconf);
+	if (flags & IEEE80211_CONF_CHANGE_PS)
+		rt2800_config_ps(rt2x00dev, libconf);
+}
+EXPORT_SYMBOL_GPL(rt2800_config);
+
+/*
+ * Link tuning
+ */
+void rt2800_link_stats(struct rt2x00_dev *rt2x00dev, struct link_qual *qual)
+{
+	u32 reg;
+
+	/*
+	 * Update FCS error count from register.
+	 */
+	rt2800_register_read(rt2x00dev, RX_STA_CNT0, &reg);
+	qual->rx_failed = rt2x00_get_field32(reg, RX_STA_CNT0_CRC_ERR);
+}
+EXPORT_SYMBOL_GPL(rt2800_link_stats);
+
+static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
+{
+	if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
+		if (rt2x00_intf_is_usb(rt2x00dev) &&
+		    rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION)
+			return 0x1c + (2 * rt2x00dev->lna_gain);
+		else
+			return 0x2e + rt2x00dev->lna_gain;
+	}
+
+	if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
+		return 0x32 + (rt2x00dev->lna_gain * 5) / 3;
+	else
+		return 0x3a + (rt2x00dev->lna_gain * 5) / 3;
+}
+
+static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev,
+				  struct link_qual *qual, u8 vgc_level)
+{
+	if (qual->vgc_level != vgc_level) {
+		rt2800_bbp_write(rt2x00dev, 66, vgc_level);
+		qual->vgc_level = vgc_level;
+		qual->vgc_level_reg = vgc_level;
+	}
+}
+
+void rt2800_reset_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual)
+{
+	rt2800_set_vgc(rt2x00dev, qual, rt2800_get_default_vgc(rt2x00dev));
+}
+EXPORT_SYMBOL_GPL(rt2800_reset_tuner);
+
+void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
+		       const u32 count)
+{
+	if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION)
+		return;
+
+	/*
+	 * When RSSI is better then -80 increase VGC level with 0x10
+	 */
+	rt2800_set_vgc(rt2x00dev, qual,
+		       rt2800_get_default_vgc(rt2x00dev) +
+		       ((qual->rssi > -80) * 0x10));
+}
+EXPORT_SYMBOL_GPL(rt2800_link_tuner);
+
+/*
+ * Initialization functions.
+ */
+int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
+{
+	u32 reg;
+	unsigned int i;
+
+	if (rt2x00_intf_is_usb(rt2x00dev)) {
+		/*
+		 * Wait until BBP and RF are ready.
+		 */
+		for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+			rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
+			if (reg && reg != ~0)
+				break;
+			msleep(1);
+		}
+
+		if (i == REGISTER_BUSY_COUNT) {
+			ERROR(rt2x00dev, "Unstable hardware.\n");
+			return -EBUSY;
+		}
+
+		rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
+		rt2800_register_write(rt2x00dev, PBF_SYS_CTRL,
+				      reg & ~0x00002000);
+	} else if (rt2x00_intf_is_pci(rt2x00dev))
+		rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
+
+	rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+	rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
+	rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
+	rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+
+	if (rt2x00_intf_is_usb(rt2x00dev)) {
+		rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
+#ifdef CONFIG_RT2800USB
+		rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
+					    USB_MODE_RESET, REGISTER_TIMEOUT);
+#endif
+	}
+
+	rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
+
+	rt2800_register_read(rt2x00dev, BCN_OFFSET0, &reg);
+	rt2x00_set_field32(&reg, BCN_OFFSET0_BCN0, 0xe0); /* 0x3800 */
+	rt2x00_set_field32(&reg, BCN_OFFSET0_BCN1, 0xe8); /* 0x3a00 */
+	rt2x00_set_field32(&reg, BCN_OFFSET0_BCN2, 0xf0); /* 0x3c00 */
+	rt2x00_set_field32(&reg, BCN_OFFSET0_BCN3, 0xf8); /* 0x3e00 */
+	rt2800_register_write(rt2x00dev, BCN_OFFSET0, reg);
+
+	rt2800_register_read(rt2x00dev, BCN_OFFSET1, &reg);
+	rt2x00_set_field32(&reg, BCN_OFFSET1_BCN4, 0xc8); /* 0x3200 */
+	rt2x00_set_field32(&reg, BCN_OFFSET1_BCN5, 0xd0); /* 0x3400 */
+	rt2x00_set_field32(&reg, BCN_OFFSET1_BCN6, 0x77); /* 0x1dc0 */
+	rt2x00_set_field32(&reg, BCN_OFFSET1_BCN7, 0x6f); /* 0x1bc0 */
+	rt2800_register_write(rt2x00dev, BCN_OFFSET1, reg);
+
+	rt2800_register_write(rt2x00dev, LEGACY_BASIC_RATE, 0x0000013f);
+	rt2800_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003);
+
+	rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
+
+	rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+	rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL, 0);
+	rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
+	rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, 0);
+	rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
+	rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
+	rt2x00_set_field32(&reg, BCN_TIME_CFG_TX_TIME_COMPENSATE, 0);
+	rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+	if (rt2x00_intf_is_usb(rt2x00dev) &&
+	    rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) {
+		rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
+		rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
+		rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
+	} else {
+		rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
+		rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
+	}
+
+	rt2800_register_read(rt2x00dev, TX_LINK_CFG, &reg);
+	rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFB_LIFETIME, 32);
+	rt2x00_set_field32(&reg, TX_LINK_CFG_MFB_ENABLE, 0);
+	rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_UMFS_ENABLE, 0);
+	rt2x00_set_field32(&reg, TX_LINK_CFG_TX_MRQ_EN, 0);
+	rt2x00_set_field32(&reg, TX_LINK_CFG_TX_RDG_EN, 0);
+	rt2x00_set_field32(&reg, TX_LINK_CFG_TX_CF_ACK_EN, 1);
+	rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFB, 0);
+	rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFS, 0);
+	rt2800_register_write(rt2x00dev, TX_LINK_CFG, reg);
+
+	rt2800_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
+	rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_MPDU_LIFETIME, 9);
+	rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_TX_OP_TIMEOUT, 10);
+	rt2800_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
+
+	rt2800_register_read(rt2x00dev, MAX_LEN_CFG, &reg);
+	rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE);
+	if (rt2x00_rev(&rt2x00dev->chip) >= RT2880E_VERSION &&
+	    rt2x00_rev(&rt2x00dev->chip) < RT3070_VERSION)
+		rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2);
+	else
+		rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1);
+	rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_PSDU, 0);
+	rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 0);
+	rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg);
+
+	rt2800_register_write(rt2x00dev, PBF_MAX_PCNT, 0x1f3fbf9f);
+
+	rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
+	rt2x00_set_field32(&reg, AUTO_RSP_CFG_AUTORESPONDER, 1);
+	rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MMODE, 0);
+	rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MREF, 0);
+	rt2x00_set_field32(&reg, AUTO_RSP_CFG_DUAL_CTS_EN, 0);
+	rt2x00_set_field32(&reg, AUTO_RSP_CFG_ACK_CTS_PSM_BIT, 0);
+	rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
+
+	rt2800_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
+	rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 8);
+	rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_CTRL, 0);
+	rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV, 1);
+	rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1);
+	rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
+	rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1);
+	rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM40, 1);
+	rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF20, 1);
+	rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF40, 1);
+	rt2800_register_write(rt2x00dev, CCK_PROT_CFG, reg);
+
+	rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
+	rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 8);
+	rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, 0);
+	rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV, 1);
+	rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1);
+	rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
+	rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1);
+	rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM40, 1);
+	rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF20, 1);
+	rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF40, 1);
+	rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
+
+	rt2800_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
+	rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_RATE, 0x4004);
+	rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_CTRL, 0);
+	rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_NAV, 1);
+	rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
+	rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
+	rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
+	rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
+	rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
+	rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
+	rt2800_register_write(rt2x00dev, MM20_PROT_CFG, reg);
+
+	rt2800_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
+	rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084);
+	rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, 0);
+	rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV, 1);
+	rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
+	rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
+	rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
+	rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
+	rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
+	rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
+	rt2800_register_write(rt2x00dev, MM40_PROT_CFG, reg);
+
+	rt2800_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
+	rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_RATE, 0x4004);
+	rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_CTRL, 0);
+	rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_NAV, 1);
+	rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
+	rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
+	rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
+	rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
+	rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
+	rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
+	rt2800_register_write(rt2x00dev, GF20_PROT_CFG, reg);
+
+	rt2800_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
+	rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_RATE, 0x4084);
+	rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_CTRL, 0);
+	rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_NAV, 1);
+	rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
+	rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
+	rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
+	rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
+	rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
+	rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
+	rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg);
+
+	if (rt2x00_intf_is_usb(rt2x00dev)) {
+		rt2800_register_write(rt2x00dev, PBF_CFG, 0xf40006);
+
+		rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+		rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
+		rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
+		rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
+		rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
+		rt2x00_set_field32(&reg, WPDMA_GLO_CFG_WP_DMA_BURST_SIZE, 3);
+		rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 0);
+		rt2x00_set_field32(&reg, WPDMA_GLO_CFG_BIG_ENDIAN, 0);
+		rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_HDR_SCATTER, 0);
+		rt2x00_set_field32(&reg, WPDMA_GLO_CFG_HDR_SEG_LEN, 0);
+		rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
+	}
+
+	rt2800_register_write(rt2x00dev, TXOP_CTRL_CFG, 0x0000583f);
+	rt2800_register_write(rt2x00dev, TXOP_HLDR_ET, 0x00000002);
+
+	rt2800_register_read(rt2x00dev, TX_RTS_CFG, &reg);
+	rt2x00_set_field32(&reg, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 32);
+	rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_THRES,
+			   IEEE80211_MAX_RTS_THRESHOLD);
+	rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_FBK_EN, 0);
+	rt2800_register_write(rt2x00dev, TX_RTS_CFG, reg);
+
+	rt2800_register_write(rt2x00dev, EXP_ACK_TIME, 0x002400ca);
+	rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
+
+	/*
+	 * ASIC will keep garbage value after boot, clear encryption keys.
+	 */
+	for (i = 0; i < 4; i++)
+		rt2800_register_write(rt2x00dev,
+					 SHARED_KEY_MODE_ENTRY(i), 0);
+
+	for (i = 0; i < 256; i++) {
+		u32 wcid[2] = { 0xffffffff, 0x00ffffff };
+		rt2800_register_multiwrite(rt2x00dev, MAC_WCID_ENTRY(i),
+					      wcid, sizeof(wcid));
+
+		rt2800_register_write(rt2x00dev, MAC_WCID_ATTR_ENTRY(i), 1);
+		rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0);
+	}
+
+	/*
+	 * Clear all beacons
+	 * For the Beacon base registers we only need to clear
+	 * the first byte since that byte contains the VALID and OWNER
+	 * bits which (when set to 0) will invalidate the entire beacon.
+	 */
+	rt2800_register_write(rt2x00dev, HW_BEACON_BASE0, 0);
+	rt2800_register_write(rt2x00dev, HW_BEACON_BASE1, 0);
+	rt2800_register_write(rt2x00dev, HW_BEACON_BASE2, 0);
+	rt2800_register_write(rt2x00dev, HW_BEACON_BASE3, 0);
+	rt2800_register_write(rt2x00dev, HW_BEACON_BASE4, 0);
+	rt2800_register_write(rt2x00dev, HW_BEACON_BASE5, 0);
+	rt2800_register_write(rt2x00dev, HW_BEACON_BASE6, 0);
+	rt2800_register_write(rt2x00dev, HW_BEACON_BASE7, 0);
+
+	if (rt2x00_intf_is_usb(rt2x00dev)) {
+		rt2800_register_read(rt2x00dev, USB_CYC_CFG, &reg);
+		rt2x00_set_field32(&reg, USB_CYC_CFG_CLOCK_CYCLE, 30);
+		rt2800_register_write(rt2x00dev, USB_CYC_CFG, reg);
+	}
+
+	rt2800_register_read(rt2x00dev, HT_FBK_CFG0, &reg);
+	rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS0FBK, 0);
+	rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS1FBK, 0);
+	rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS2FBK, 1);
+	rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS3FBK, 2);
+	rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS4FBK, 3);
+	rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS5FBK, 4);
+	rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS6FBK, 5);
+	rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS7FBK, 6);
+	rt2800_register_write(rt2x00dev, HT_FBK_CFG0, reg);
+
+	rt2800_register_read(rt2x00dev, HT_FBK_CFG1, &reg);
+	rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS8FBK, 8);
+	rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS9FBK, 8);
+	rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS10FBK, 9);
+	rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS11FBK, 10);
+	rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS12FBK, 11);
+	rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS13FBK, 12);
+	rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS14FBK, 13);
+	rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS15FBK, 14);
+	rt2800_register_write(rt2x00dev, HT_FBK_CFG1, reg);
+
+	rt2800_register_read(rt2x00dev, LG_FBK_CFG0, &reg);
+	rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS0FBK, 8);
+	rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS1FBK, 8);
+	rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS2FBK, 9);
+	rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS3FBK, 10);
+	rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS4FBK, 11);
+	rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS5FBK, 12);
+	rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS6FBK, 13);
+	rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS7FBK, 14);
+	rt2800_register_write(rt2x00dev, LG_FBK_CFG0, reg);
+
+	rt2800_register_read(rt2x00dev, LG_FBK_CFG1, &reg);
+	rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS0FBK, 0);
+	rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS1FBK, 0);
+	rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS2FBK, 1);
+	rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS3FBK, 2);
+	rt2800_register_write(rt2x00dev, LG_FBK_CFG1, reg);
+
+	/*
+	 * We must clear the error counters.
+	 * These registers are cleared on read,
+	 * so we may pass a useless variable to store the value.
+	 */
+	rt2800_register_read(rt2x00dev, RX_STA_CNT0, &reg);
+	rt2800_register_read(rt2x00dev, RX_STA_CNT1, &reg);
+	rt2800_register_read(rt2x00dev, RX_STA_CNT2, &reg);
+	rt2800_register_read(rt2x00dev, TX_STA_CNT0, &reg);
+	rt2800_register_read(rt2x00dev, TX_STA_CNT1, &reg);
+	rt2800_register_read(rt2x00dev, TX_STA_CNT2, &reg);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rt2800_init_registers);
+
+static int rt2800_wait_bbp_rf_ready(struct rt2x00_dev *rt2x00dev)
+{
+	unsigned int i;
+	u32 reg;
+
+	for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+		rt2800_register_read(rt2x00dev, MAC_STATUS_CFG, &reg);
+		if (!rt2x00_get_field32(reg, MAC_STATUS_CFG_BBP_RF_BUSY))
+			return 0;
+
+		udelay(REGISTER_BUSY_DELAY);
+	}
+
+	ERROR(rt2x00dev, "BBP/RF register access failed, aborting.\n");
+	return -EACCES;
+}
+
+static int rt2800_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
+{
+	unsigned int i;
+	u8 value;
+
+	/*
+	 * BBP was enabled after firmware was loaded,
+	 * but we need to reactivate it now.
+	 */
+	rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
+	rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
+	msleep(1);
+
+	for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+		rt2800_bbp_read(rt2x00dev, 0, &value);
+		if ((value != 0xff) && (value != 0x00))
+			return 0;
+		udelay(REGISTER_BUSY_DELAY);
+	}
+
+	ERROR(rt2x00dev, "BBP register access failed, aborting.\n");
+	return -EACCES;
+}
+
+int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
+{
+	unsigned int i;
+	u16 eeprom;
+	u8 reg_id;
+	u8 value;
+
+	if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev) ||
+		     rt2800_wait_bbp_ready(rt2x00dev)))
+		return -EACCES;
+
+	rt2800_bbp_write(rt2x00dev, 65, 0x2c);
+	rt2800_bbp_write(rt2x00dev, 66, 0x38);
+	rt2800_bbp_write(rt2x00dev, 69, 0x12);
+	rt2800_bbp_write(rt2x00dev, 70, 0x0a);
+	rt2800_bbp_write(rt2x00dev, 73, 0x10);
+	rt2800_bbp_write(rt2x00dev, 81, 0x37);
+	rt2800_bbp_write(rt2x00dev, 82, 0x62);
+	rt2800_bbp_write(rt2x00dev, 83, 0x6a);
+	rt2800_bbp_write(rt2x00dev, 84, 0x99);
+	rt2800_bbp_write(rt2x00dev, 86, 0x00);
+	rt2800_bbp_write(rt2x00dev, 91, 0x04);
+	rt2800_bbp_write(rt2x00dev, 92, 0x00);
+	rt2800_bbp_write(rt2x00dev, 103, 0x00);
+	rt2800_bbp_write(rt2x00dev, 105, 0x05);
+
+	if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) {
+		rt2800_bbp_write(rt2x00dev, 69, 0x16);
+		rt2800_bbp_write(rt2x00dev, 73, 0x12);
+	}
+
+	if (rt2x00_rev(&rt2x00dev->chip) > RT2860D_VERSION)
+		rt2800_bbp_write(rt2x00dev, 84, 0x19);
+
+	if (rt2x00_intf_is_usb(rt2x00dev) &&
+	    rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) {
+		rt2800_bbp_write(rt2x00dev, 70, 0x0a);
+		rt2800_bbp_write(rt2x00dev, 84, 0x99);
+		rt2800_bbp_write(rt2x00dev, 105, 0x05);
+	}
+
+	if (rt2x00_rt(&rt2x00dev->chip, RT3052)) {
+		rt2800_bbp_write(rt2x00dev, 31, 0x08);
+		rt2800_bbp_write(rt2x00dev, 78, 0x0e);
+		rt2800_bbp_write(rt2x00dev, 80, 0x08);
+	}
+
+	for (i = 0; i < EEPROM_BBP_SIZE; i++) {
+		rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
+
+		if (eeprom != 0xffff && eeprom != 0x0000) {
+			reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID);
+			value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE);
+			rt2800_bbp_write(rt2x00dev, reg_id, value);
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rt2800_init_bbp);
+
+static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev,
+				bool bw40, u8 rfcsr24, u8 filter_target)
+{
+	unsigned int i;
+	u8 bbp;
+	u8 rfcsr;
+	u8 passband;
+	u8 stopband;
+	u8 overtuned = 0;
+
+	rt2800_rfcsr_write(rt2x00dev, 24, rfcsr24);
+
+	rt2800_bbp_read(rt2x00dev, 4, &bbp);
+	rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * bw40);
+	rt2800_bbp_write(rt2x00dev, 4, bbp);
+
+	rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
+	rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 1);
+	rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
+
+	/*
+	 * Set power & frequency of passband test tone
+	 */
+	rt2800_bbp_write(rt2x00dev, 24, 0);
+
+	for (i = 0; i < 100; i++) {
+		rt2800_bbp_write(rt2x00dev, 25, 0x90);
+		msleep(1);
+
+		rt2800_bbp_read(rt2x00dev, 55, &passband);
+		if (passband)
+			break;
+	}
+
+	/*
+	 * Set power & frequency of stopband test tone
+	 */
+	rt2800_bbp_write(rt2x00dev, 24, 0x06);
+
+	for (i = 0; i < 100; i++) {
+		rt2800_bbp_write(rt2x00dev, 25, 0x90);
+		msleep(1);
+
+		rt2800_bbp_read(rt2x00dev, 55, &stopband);
+
+		if ((passband - stopband) <= filter_target) {
+			rfcsr24++;
+			overtuned += ((passband - stopband) == filter_target);
+		} else
+			break;
+
+		rt2800_rfcsr_write(rt2x00dev, 24, rfcsr24);
+	}
+
+	rfcsr24 -= !!overtuned;
+
+	rt2800_rfcsr_write(rt2x00dev, 24, rfcsr24);
+	return rfcsr24;
+}
+
+int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
+{
+	u8 rfcsr;
+	u8 bbp;
+
+	if (rt2x00_intf_is_usb(rt2x00dev) &&
+	    rt2x00_rev(&rt2x00dev->chip) != RT3070_VERSION)
+		return 0;
+
+	if (rt2x00_intf_is_pci(rt2x00dev)) {
+		if (!rt2x00_rf(&rt2x00dev->chip, RF3020) &&
+		    !rt2x00_rf(&rt2x00dev->chip, RF3021) &&
+		    !rt2x00_rf(&rt2x00dev->chip, RF3022))
+			return 0;
+	}
+
+	/*
+	 * Init RF calibration.
+	 */
+	rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+	rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
+	rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+	msleep(1);
+	rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
+	rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+
+	if (rt2x00_intf_is_usb(rt2x00dev)) {
+		rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
+		rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
+		rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
+		rt2800_rfcsr_write(rt2x00dev, 7, 0x70);
+		rt2800_rfcsr_write(rt2x00dev, 9, 0x0f);
+		rt2800_rfcsr_write(rt2x00dev, 10, 0x71);
+		rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
+		rt2800_rfcsr_write(rt2x00dev, 12, 0x7b);
+		rt2800_rfcsr_write(rt2x00dev, 14, 0x90);
+		rt2800_rfcsr_write(rt2x00dev, 15, 0x58);
+		rt2800_rfcsr_write(rt2x00dev, 16, 0xb3);
+		rt2800_rfcsr_write(rt2x00dev, 17, 0x92);
+		rt2800_rfcsr_write(rt2x00dev, 18, 0x2c);
+		rt2800_rfcsr_write(rt2x00dev, 19, 0x02);
+		rt2800_rfcsr_write(rt2x00dev, 20, 0xba);
+		rt2800_rfcsr_write(rt2x00dev, 21, 0xdb);
+		rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
+		rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
+		rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
+		rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
+	} else if (rt2x00_intf_is_pci(rt2x00dev)) {
+		rt2800_rfcsr_write(rt2x00dev, 0, 0x50);
+		rt2800_rfcsr_write(rt2x00dev, 1, 0x01);
+		rt2800_rfcsr_write(rt2x00dev, 2, 0xf7);
+		rt2800_rfcsr_write(rt2x00dev, 3, 0x75);
+		rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
+		rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
+		rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
+		rt2800_rfcsr_write(rt2x00dev, 7, 0x50);
+		rt2800_rfcsr_write(rt2x00dev, 8, 0x39);
+		rt2800_rfcsr_write(rt2x00dev, 9, 0x0f);
+		rt2800_rfcsr_write(rt2x00dev, 10, 0x60);
+		rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
+		rt2800_rfcsr_write(rt2x00dev, 12, 0x75);
+		rt2800_rfcsr_write(rt2x00dev, 13, 0x75);
+		rt2800_rfcsr_write(rt2x00dev, 14, 0x90);
+		rt2800_rfcsr_write(rt2x00dev, 15, 0x58);
+		rt2800_rfcsr_write(rt2x00dev, 16, 0xb3);
+		rt2800_rfcsr_write(rt2x00dev, 17, 0x92);
+		rt2800_rfcsr_write(rt2x00dev, 18, 0x2c);
+		rt2800_rfcsr_write(rt2x00dev, 19, 0x02);
+		rt2800_rfcsr_write(rt2x00dev, 20, 0xba);
+		rt2800_rfcsr_write(rt2x00dev, 21, 0xdb);
+		rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
+		rt2800_rfcsr_write(rt2x00dev, 23, 0x31);
+		rt2800_rfcsr_write(rt2x00dev, 24, 0x08);
+		rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
+		rt2800_rfcsr_write(rt2x00dev, 26, 0x25);
+		rt2800_rfcsr_write(rt2x00dev, 27, 0x23);
+		rt2800_rfcsr_write(rt2x00dev, 28, 0x13);
+		rt2800_rfcsr_write(rt2x00dev, 29, 0x83);
+	}
+
+	/*
+	 * Set RX Filter calibration for 20MHz and 40MHz
+	 */
+	rt2x00dev->calibration[0] =
+	    rt2800_init_rx_filter(rt2x00dev, false, 0x07, 0x16);
+	rt2x00dev->calibration[1] =
+	    rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x19);
+
+	/*
+	 * Set back to initial state
+	 */
+	rt2800_bbp_write(rt2x00dev, 24, 0);
+
+	rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
+	rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0);
+	rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
+
+	/*
+	 * set BBP back to BW20
+	 */
+	rt2800_bbp_read(rt2x00dev, 4, &bbp);
+	rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
+	rt2800_bbp_write(rt2x00dev, 4, bbp);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rt2800_init_rfcsr);
+
+int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev)
+{
+	u32 reg;
+
+	rt2800_register_read(rt2x00dev, EFUSE_CTRL, &reg);
+
+	return rt2x00_get_field32(reg, EFUSE_CTRL_PRESENT);
+}
+EXPORT_SYMBOL_GPL(rt2800_efuse_detect);
+
+static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
+{
+	u32 reg;
+
+	mutex_lock(&rt2x00dev->csr_mutex);
+
+	rt2800_register_read_lock(rt2x00dev, EFUSE_CTRL, &reg);
+	rt2x00_set_field32(&reg, EFUSE_CTRL_ADDRESS_IN, i);
+	rt2x00_set_field32(&reg, EFUSE_CTRL_MODE, 0);
+	rt2x00_set_field32(&reg, EFUSE_CTRL_KICK, 1);
+	rt2800_register_write_lock(rt2x00dev, EFUSE_CTRL, reg);
+
+	/* Wait until the EEPROM has been loaded */
+	rt2800_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, &reg);
+
+	/* Apparently the data is read from end to start */
+	rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3,
+					(u32 *)&rt2x00dev->eeprom[i]);
+	rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2,
+					(u32 *)&rt2x00dev->eeprom[i + 2]);
+	rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1,
+					(u32 *)&rt2x00dev->eeprom[i + 4]);
+	rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0,
+					(u32 *)&rt2x00dev->eeprom[i + 6]);
+
+	mutex_unlock(&rt2x00dev->csr_mutex);
+}
+
+void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
+{
+	unsigned int i;
+
+	for (i = 0; i < EEPROM_SIZE / sizeof(u16); i += 8)
+		rt2800_efuse_read(rt2x00dev, i);
+}
+EXPORT_SYMBOL_GPL(rt2800_read_eeprom_efuse);
+
+int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
+{
+	u16 word;
+	u8 *mac;
+	u8 default_lna_gain;
+
+	/*
+	 * Start validation of the data that has been read.
+	 */
+	mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
+	if (!is_valid_ether_addr(mac)) {
+		random_ether_addr(mac);
+		EEPROM(rt2x00dev, "MAC: %pM\n", mac);
+	}
+
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
+	if (word == 0xffff) {
+		rt2x00_set_field16(&word, EEPROM_ANTENNA_RXPATH, 2);
+		rt2x00_set_field16(&word, EEPROM_ANTENNA_TXPATH, 1);
+		rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2820);
+		rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
+		EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word);
+	} else if (rt2x00_rev(&rt2x00dev->chip) < RT2883_VERSION) {
+		/*
+		 * There is a max of 2 RX streams for RT28x0 series
+		 */
+		if (rt2x00_get_field16(word, EEPROM_ANTENNA_RXPATH) > 2)
+			rt2x00_set_field16(&word, EEPROM_ANTENNA_RXPATH, 2);
+		rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
+	}
+
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word);
+	if (word == 0xffff) {
+		rt2x00_set_field16(&word, EEPROM_NIC_HW_RADIO, 0);
+		rt2x00_set_field16(&word, EEPROM_NIC_DYNAMIC_TX_AGC, 0);
+		rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_BG, 0);
+		rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_A, 0);
+		rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0);
+		rt2x00_set_field16(&word, EEPROM_NIC_BW40M_SB_BG, 0);
+		rt2x00_set_field16(&word, EEPROM_NIC_BW40M_SB_A, 0);
+		rt2x00_set_field16(&word, EEPROM_NIC_WPS_PBC, 0);
+		rt2x00_set_field16(&word, EEPROM_NIC_BW40M_BG, 0);
+		rt2x00_set_field16(&word, EEPROM_NIC_BW40M_A, 0);
+		rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word);
+		EEPROM(rt2x00dev, "NIC: 0x%04x\n", word);
+	}
+
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word);
+	if ((word & 0x00ff) == 0x00ff) {
+		rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0);
+		rt2x00_set_field16(&word, EEPROM_FREQ_LED_MODE,
+				   LED_MODE_TXRX_ACTIVITY);
+		rt2x00_set_field16(&word, EEPROM_FREQ_LED_POLARITY, 0);
+		rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
+		rt2x00_eeprom_write(rt2x00dev, EEPROM_LED1, 0x5555);
+		rt2x00_eeprom_write(rt2x00dev, EEPROM_LED2, 0x2221);
+		rt2x00_eeprom_write(rt2x00dev, EEPROM_LED3, 0xa9f8);
+		EEPROM(rt2x00dev, "Freq: 0x%04x\n", word);
+	}
+
+	/*
+	 * During the LNA validation we are going to use
+	 * lna0 as correct value. Note that EEPROM_LNA
+	 * is never validated.
+	 */
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &word);
+	default_lna_gain = rt2x00_get_field16(word, EEPROM_LNA_A0);
+
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &word);
+	if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET0)) > 10)
+		rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET0, 0);
+	if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET1)) > 10)
+		rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET1, 0);
+	rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG, word);
+
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &word);
+	if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG2_OFFSET2)) > 10)
+		rt2x00_set_field16(&word, EEPROM_RSSI_BG2_OFFSET2, 0);
+	if (rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0x00 ||
+	    rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0xff)
+		rt2x00_set_field16(&word, EEPROM_RSSI_BG2_LNA_A1,
+				   default_lna_gain);
+	rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG2, word);
+
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &word);
+	if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET0)) > 10)
+		rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET0, 0);
+	if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET1)) > 10)
+		rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET1, 0);
+	rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A, word);
+
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &word);
+	if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A2_OFFSET2)) > 10)
+		rt2x00_set_field16(&word, EEPROM_RSSI_A2_OFFSET2, 0);
+	if (rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0x00 ||
+	    rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0xff)
+		rt2x00_set_field16(&word, EEPROM_RSSI_A2_LNA_A2,
+				   default_lna_gain);
+	rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rt2800_validate_eeprom);
+
+int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
+{
+	u32 reg;
+	u16 value;
+	u16 eeprom;
+
+	/*
+	 * Read EEPROM word for configuration.
+	 */
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
+
+	/*
+	 * Identify RF chipset.
+	 */
+	value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
+	rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
+
+	rt2x00_set_chip_rf(rt2x00dev, value, reg);
+
+	if (rt2x00_intf_is_usb(rt2x00dev)) {
+		struct rt2x00_chip *chip = &rt2x00dev->chip;
+
+		/*
+		 * The check for rt2860 is not a typo, some rt2870 hardware
+		 * identifies itself as rt2860 in the CSR register.
+		 */
+		if (rt2x00_check_rev(chip, 0xfff00000, 0x28600000) ||
+		    rt2x00_check_rev(chip, 0xfff00000, 0x28700000) ||
+		    rt2x00_check_rev(chip, 0xfff00000, 0x28800000)) {
+			rt2x00_set_chip_rt(rt2x00dev, RT2870);
+		} else if (rt2x00_check_rev(chip, 0xffff0000, 0x30700000)) {
+			rt2x00_set_chip_rt(rt2x00dev, RT3070);
+		} else {
+			ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
+			return -ENODEV;
+		}
+	}
+	rt2x00_print_chip(rt2x00dev);
+
+	if (!rt2x00_rf(&rt2x00dev->chip, RF2820) &&
+	    !rt2x00_rf(&rt2x00dev->chip, RF2850) &&
+	    !rt2x00_rf(&rt2x00dev->chip, RF2720) &&
+	    !rt2x00_rf(&rt2x00dev->chip, RF2750) &&
+	    !rt2x00_rf(&rt2x00dev->chip, RF3020) &&
+	    !rt2x00_rf(&rt2x00dev->chip, RF2020) &&
+	    !rt2x00_rf(&rt2x00dev->chip, RF3021) &&
+	    !rt2x00_rf(&rt2x00dev->chip, RF3022)) {
+		ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
+		return -ENODEV;
+	}
+
+	/*
+	 * Identify default antenna configuration.
+	 */
+	rt2x00dev->default_ant.tx =
+	    rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH);
+	rt2x00dev->default_ant.rx =
+	    rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH);
+
+	/*
+	 * Read frequency offset and RF programming sequence.
+	 */
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
+	rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET);
+
+	/*
+	 * Read external LNA informations.
+	 */
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
+
+	if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_A))
+		__set_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags);
+	if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_BG))
+		__set_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags);
+
+	/*
+	 * Detect if this device has an hardware controlled radio.
+	 */
+	if (rt2x00_get_field16(eeprom, EEPROM_NIC_HW_RADIO))
+		__set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags);
+
+	/*
+	 * Store led settings, for correct led behaviour.
+	 */
+#ifdef CONFIG_RT2X00_LIB_LEDS
+	rt2800_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
+	rt2800_init_led(rt2x00dev, &rt2x00dev->led_assoc, LED_TYPE_ASSOC);
+	rt2800_init_led(rt2x00dev, &rt2x00dev->led_qual, LED_TYPE_QUALITY);
+
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &rt2x00dev->led_mcu_reg);
+#endif /* CONFIG_RT2X00_LIB_LEDS */
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rt2800_init_eeprom);
+
+/*
+ * RF value list for rt28x0
+ * Supports: 2.4 GHz (all) & 5.2 GHz (RF2850 & RF2750)
+ */
+static const struct rf_channel rf_vals[] = {
+	{ 1,  0x18402ecc, 0x184c0786, 0x1816b455, 0x1800510b },
+	{ 2,  0x18402ecc, 0x184c0786, 0x18168a55, 0x1800519f },
+	{ 3,  0x18402ecc, 0x184c078a, 0x18168a55, 0x1800518b },
+	{ 4,  0x18402ecc, 0x184c078a, 0x18168a55, 0x1800519f },
+	{ 5,  0x18402ecc, 0x184c078e, 0x18168a55, 0x1800518b },
+	{ 6,  0x18402ecc, 0x184c078e, 0x18168a55, 0x1800519f },
+	{ 7,  0x18402ecc, 0x184c0792, 0x18168a55, 0x1800518b },
+	{ 8,  0x18402ecc, 0x184c0792, 0x18168a55, 0x1800519f },
+	{ 9,  0x18402ecc, 0x184c0796, 0x18168a55, 0x1800518b },
+	{ 10, 0x18402ecc, 0x184c0796, 0x18168a55, 0x1800519f },
+	{ 11, 0x18402ecc, 0x184c079a, 0x18168a55, 0x1800518b },
+	{ 12, 0x18402ecc, 0x184c079a, 0x18168a55, 0x1800519f },
+	{ 13, 0x18402ecc, 0x184c079e, 0x18168a55, 0x1800518b },
+	{ 14, 0x18402ecc, 0x184c07a2, 0x18168a55, 0x18005193 },
+
+	/* 802.11 UNI / HyperLan 2 */
+	{ 36, 0x18402ecc, 0x184c099a, 0x18158a55, 0x180ed1a3 },
+	{ 38, 0x18402ecc, 0x184c099e, 0x18158a55, 0x180ed193 },
+	{ 40, 0x18402ec8, 0x184c0682, 0x18158a55, 0x180ed183 },
+	{ 44, 0x18402ec8, 0x184c0682, 0x18158a55, 0x180ed1a3 },
+	{ 46, 0x18402ec8, 0x184c0686, 0x18158a55, 0x180ed18b },
+	{ 48, 0x18402ec8, 0x184c0686, 0x18158a55, 0x180ed19b },
+	{ 52, 0x18402ec8, 0x184c068a, 0x18158a55, 0x180ed193 },
+	{ 54, 0x18402ec8, 0x184c068a, 0x18158a55, 0x180ed1a3 },
+	{ 56, 0x18402ec8, 0x184c068e, 0x18158a55, 0x180ed18b },
+	{ 60, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed183 },
+	{ 62, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed193 },
+	{ 64, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed1a3 },
+
+	/* 802.11 HyperLan 2 */
+	{ 100, 0x18402ec8, 0x184c06b2, 0x18178a55, 0x180ed783 },
+	{ 102, 0x18402ec8, 0x184c06b2, 0x18578a55, 0x180ed793 },
+	{ 104, 0x18402ec8, 0x185c06b2, 0x18578a55, 0x180ed1a3 },
+	{ 108, 0x18402ecc, 0x185c0a32, 0x18578a55, 0x180ed193 },
+	{ 110, 0x18402ecc, 0x184c0a36, 0x18178a55, 0x180ed183 },
+	{ 112, 0x18402ecc, 0x184c0a36, 0x18178a55, 0x180ed19b },
+	{ 116, 0x18402ecc, 0x184c0a3a, 0x18178a55, 0x180ed1a3 },
+	{ 118, 0x18402ecc, 0x184c0a3e, 0x18178a55, 0x180ed193 },
+	{ 120, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed183 },
+	{ 124, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed193 },
+	{ 126, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed15b },
+	{ 128, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed1a3 },
+	{ 132, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed18b },
+	{ 134, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed193 },
+	{ 136, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed19b },
+	{ 140, 0x18402ec4, 0x184c038a, 0x18178a55, 0x180ed183 },
+
+	/* 802.11 UNII */
+	{ 149, 0x18402ec4, 0x184c038a, 0x18178a55, 0x180ed1a7 },
+	{ 151, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed187 },
+	{ 153, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed18f },
+	{ 157, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed19f },
+	{ 159, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed1a7 },
+	{ 161, 0x18402ec4, 0x184c0392, 0x18178a55, 0x180ed187 },
+	{ 165, 0x18402ec4, 0x184c0392, 0x18178a55, 0x180ed197 },
+	{ 167, 0x18402ec4, 0x184c03d2, 0x18179855, 0x1815531f },
+	{ 169, 0x18402ec4, 0x184c03d2, 0x18179855, 0x18155327 },
+	{ 171, 0x18402ec4, 0x184c03d6, 0x18179855, 0x18155307 },
+	{ 173, 0x18402ec4, 0x184c03d6, 0x18179855, 0x1815530f },
+
+	/* 802.11 Japan */
+	{ 184, 0x15002ccc, 0x1500491e, 0x1509be55, 0x150c0a0b },
+	{ 188, 0x15002ccc, 0x15004922, 0x1509be55, 0x150c0a13 },
+	{ 192, 0x15002ccc, 0x15004926, 0x1509be55, 0x150c0a1b },
+	{ 196, 0x15002ccc, 0x1500492a, 0x1509be55, 0x150c0a23 },
+	{ 208, 0x15002ccc, 0x1500493a, 0x1509be55, 0x150c0a13 },
+	{ 212, 0x15002ccc, 0x1500493e, 0x1509be55, 0x150c0a1b },
+	{ 216, 0x15002ccc, 0x15004982, 0x1509be55, 0x150c0a23 },
+};
+
+/*
+ * RF value list for rt3070
+ * Supports: 2.4 GHz
+ */
+static const struct rf_channel rf_vals_302x[] = {
+	{1,  241, 2, 2 },
+	{2,  241, 2, 7 },
+	{3,  242, 2, 2 },
+	{4,  242, 2, 7 },
+	{5,  243, 2, 2 },
+	{6,  243, 2, 7 },
+	{7,  244, 2, 2 },
+	{8,  244, 2, 7 },
+	{9,  245, 2, 2 },
+	{10, 245, 2, 7 },
+	{11, 246, 2, 2 },
+	{12, 246, 2, 7 },
+	{13, 247, 2, 2 },
+	{14, 248, 2, 4 },
+};
+
+int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+{
+	struct rt2x00_chip *chip = &rt2x00dev->chip;
+	struct hw_mode_spec *spec = &rt2x00dev->spec;
+	struct channel_info *info;
+	char *tx_power1;
+	char *tx_power2;
+	unsigned int i;
+	u16 eeprom;
+
+	/*
+	 * Initialize all hw fields.
+	 */
+	rt2x00dev->hw->flags =
+	    IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
+	    IEEE80211_HW_SIGNAL_DBM |
+	    IEEE80211_HW_SUPPORTS_PS |
+	    IEEE80211_HW_PS_NULLFUNC_STACK;
+
+	SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
+	SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
+				rt2x00_eeprom_addr(rt2x00dev,
+						   EEPROM_MAC_ADDR_0));
+
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
+
+	/*
+	 * Initialize hw_mode information.
+	 */
+	spec->supported_bands = SUPPORT_BAND_2GHZ;
+	spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
+
+	if (rt2x00_rf(chip, RF2820) ||
+	    rt2x00_rf(chip, RF2720) ||
+	    (rt2x00_intf_is_pci(rt2x00dev) && rt2x00_rf(chip, RF3052))) {
+		spec->num_channels = 14;
+		spec->channels = rf_vals;
+	} else if (rt2x00_rf(chip, RF2850) || rt2x00_rf(chip, RF2750)) {
+		spec->supported_bands |= SUPPORT_BAND_5GHZ;
+		spec->num_channels = ARRAY_SIZE(rf_vals);
+		spec->channels = rf_vals;
+	} else if (rt2x00_rf(chip, RF3020) ||
+		   rt2x00_rf(chip, RF2020) ||
+		   rt2x00_rf(chip, RF3021) ||
+		   rt2x00_rf(chip, RF3022)) {
+		spec->num_channels = ARRAY_SIZE(rf_vals_302x);
+		spec->channels = rf_vals_302x;
+	}
+
+	/*
+	 * Initialize HT information.
+	 */
+	if (!rt2x00_rf(chip, RF2020))
+		spec->ht.ht_supported = true;
+	else
+		spec->ht.ht_supported = false;
+
+	spec->ht.cap =
+	    IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+	    IEEE80211_HT_CAP_GRN_FLD |
+	    IEEE80211_HT_CAP_SGI_20 |
+	    IEEE80211_HT_CAP_SGI_40 |
+	    IEEE80211_HT_CAP_TX_STBC |
+	    IEEE80211_HT_CAP_RX_STBC |
+	    IEEE80211_HT_CAP_PSMP_SUPPORT;
+	spec->ht.ampdu_factor = 3;
+	spec->ht.ampdu_density = 4;
+	spec->ht.mcs.tx_params =
+	    IEEE80211_HT_MCS_TX_DEFINED |
+	    IEEE80211_HT_MCS_TX_RX_DIFF |
+	    ((rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) - 1) <<
+		IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+
+	switch (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH)) {
+	case 3:
+		spec->ht.mcs.rx_mask[2] = 0xff;
+	case 2:
+		spec->ht.mcs.rx_mask[1] = 0xff;
+	case 1:
+		spec->ht.mcs.rx_mask[0] = 0xff;
+		spec->ht.mcs.rx_mask[4] = 0x1; /* MCS32 */
+		break;
+	}
+
+	/*
+	 * Create channel information array
+	 */
+	info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	spec->channels_info = info;
+
+	tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
+	tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
+
+	for (i = 0; i < 14; i++) {
+		info[i].tx_power1 = TXPOWER_G_FROM_DEV(tx_power1[i]);
+		info[i].tx_power2 = TXPOWER_G_FROM_DEV(tx_power2[i]);
+	}
+
+	if (spec->num_channels > 14) {
+		tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1);
+		tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
+
+		for (i = 14; i < spec->num_channels; i++) {
+			info[i].tx_power1 = TXPOWER_A_FROM_DEV(tx_power1[i]);
+			info[i].tx_power2 = TXPOWER_A_FROM_DEV(tx_power2[i]);
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rt2800_probe_hw_mode);
+
+/*
+ * IEEE80211 stack callback functions.
+ */
+static void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx,
+				u32 *iv32, u16 *iv16)
+{
+	struct rt2x00_dev *rt2x00dev = hw->priv;
+	struct mac_iveiv_entry iveiv_entry;
+	u32 offset;
+
+	offset = MAC_IVEIV_ENTRY(hw_key_idx);
+	rt2800_register_multiread(rt2x00dev, offset,
+				      &iveiv_entry, sizeof(iveiv_entry));
+
+	memcpy(&iveiv_entry.iv[0], iv16, sizeof(iv16));
+	memcpy(&iveiv_entry.iv[4], iv32, sizeof(iv32));
+}
+
+static int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+	struct rt2x00_dev *rt2x00dev = hw->priv;
+	u32 reg;
+	bool enabled = (value < IEEE80211_MAX_RTS_THRESHOLD);
+
+	rt2800_register_read(rt2x00dev, TX_RTS_CFG, &reg);
+	rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_THRES, value);
+	rt2800_register_write(rt2x00dev, TX_RTS_CFG, reg);
+
+	rt2800_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
+	rt2x00_set_field32(&reg, CCK_PROT_CFG_RTS_TH_EN, enabled);
+	rt2800_register_write(rt2x00dev, CCK_PROT_CFG, reg);
+
+	rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
+	rt2x00_set_field32(&reg, OFDM_PROT_CFG_RTS_TH_EN, enabled);
+	rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
+
+	rt2800_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
+	rt2x00_set_field32(&reg, MM20_PROT_CFG_RTS_TH_EN, enabled);
+	rt2800_register_write(rt2x00dev, MM20_PROT_CFG, reg);
+
+	rt2800_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
+	rt2x00_set_field32(&reg, MM40_PROT_CFG_RTS_TH_EN, enabled);
+	rt2800_register_write(rt2x00dev, MM40_PROT_CFG, reg);
+
+	rt2800_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
+	rt2x00_set_field32(&reg, GF20_PROT_CFG_RTS_TH_EN, enabled);
+	rt2800_register_write(rt2x00dev, GF20_PROT_CFG, reg);
+
+	rt2800_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
+	rt2x00_set_field32(&reg, GF40_PROT_CFG_RTS_TH_EN, enabled);
+	rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg);
+
+	return 0;
+}
+
+static int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
+			  const struct ieee80211_tx_queue_params *params)
+{
+	struct rt2x00_dev *rt2x00dev = hw->priv;
+	struct data_queue *queue;
+	struct rt2x00_field32 field;
+	int retval;
+	u32 reg;
+	u32 offset;
+
+	/*
+	 * First pass the configuration through rt2x00lib, that will
+	 * update the queue settings and validate the input. After that
+	 * we are free to update the registers based on the value
+	 * in the queue parameter.
+	 */
+	retval = rt2x00mac_conf_tx(hw, queue_idx, params);
+	if (retval)
+		return retval;
+
+	/*
+	 * We only need to perform additional register initialization
+	 * for WMM queues/
+	 */
+	if (queue_idx >= 4)
+		return 0;
+
+	queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
+
+	/* Update WMM TXOP register */
+	offset = WMM_TXOP0_CFG + (sizeof(u32) * (!!(queue_idx & 2)));
+	field.bit_offset = (queue_idx & 1) * 16;
+	field.bit_mask = 0xffff << field.bit_offset;
+
+	rt2800_register_read(rt2x00dev, offset, &reg);
+	rt2x00_set_field32(&reg, field, queue->txop);
+	rt2800_register_write(rt2x00dev, offset, reg);
+
+	/* Update WMM registers */
+	field.bit_offset = queue_idx * 4;
+	field.bit_mask = 0xf << field.bit_offset;
+
+	rt2800_register_read(rt2x00dev, WMM_AIFSN_CFG, &reg);
+	rt2x00_set_field32(&reg, field, queue->aifs);
+	rt2800_register_write(rt2x00dev, WMM_AIFSN_CFG, reg);
+
+	rt2800_register_read(rt2x00dev, WMM_CWMIN_CFG, &reg);
+	rt2x00_set_field32(&reg, field, queue->cw_min);
+	rt2800_register_write(rt2x00dev, WMM_CWMIN_CFG, reg);
+
+	rt2800_register_read(rt2x00dev, WMM_CWMAX_CFG, &reg);
+	rt2x00_set_field32(&reg, field, queue->cw_max);
+	rt2800_register_write(rt2x00dev, WMM_CWMAX_CFG, reg);
+
+	/* Update EDCA registers */
+	offset = EDCA_AC0_CFG + (sizeof(u32) * queue_idx);
+
+	rt2800_register_read(rt2x00dev, offset, &reg);
+	rt2x00_set_field32(&reg, EDCA_AC0_CFG_TX_OP, queue->txop);
+	rt2x00_set_field32(&reg, EDCA_AC0_CFG_AIFSN, queue->aifs);
+	rt2x00_set_field32(&reg, EDCA_AC0_CFG_CWMIN, queue->cw_min);
+	rt2x00_set_field32(&reg, EDCA_AC0_CFG_CWMAX, queue->cw_max);
+	rt2800_register_write(rt2x00dev, offset, reg);
+
+	return 0;
+}
+
+static u64 rt2800_get_tsf(struct ieee80211_hw *hw)
+{
+	struct rt2x00_dev *rt2x00dev = hw->priv;
+	u64 tsf;
+	u32 reg;
+
+	rt2800_register_read(rt2x00dev, TSF_TIMER_DW1, &reg);
+	tsf = (u64) rt2x00_get_field32(reg, TSF_TIMER_DW1_HIGH_WORD) << 32;
+	rt2800_register_read(rt2x00dev, TSF_TIMER_DW0, &reg);
+	tsf |= rt2x00_get_field32(reg, TSF_TIMER_DW0_LOW_WORD);
+
+	return tsf;
+}
+
+const struct ieee80211_ops rt2800_mac80211_ops = {
+	.tx			= rt2x00mac_tx,
+	.start			= rt2x00mac_start,
+	.stop			= rt2x00mac_stop,
+	.add_interface		= rt2x00mac_add_interface,
+	.remove_interface	= rt2x00mac_remove_interface,
+	.config			= rt2x00mac_config,
+	.configure_filter	= rt2x00mac_configure_filter,
+	.set_tim		= rt2x00mac_set_tim,
+	.set_key		= rt2x00mac_set_key,
+	.get_stats		= rt2x00mac_get_stats,
+	.get_tkip_seq		= rt2800_get_tkip_seq,
+	.set_rts_threshold	= rt2800_set_rts_threshold,
+	.bss_info_changed	= rt2x00mac_bss_info_changed,
+	.conf_tx		= rt2800_conf_tx,
+	.get_tx_stats		= rt2x00mac_get_tx_stats,
+	.get_tsf		= rt2800_get_tsf,
+	.rfkill_poll		= rt2x00mac_rfkill_poll,
+};
+EXPORT_SYMBOL_GPL(rt2800_mac80211_ops);
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
new file mode 100644
index 000000000000..535ce22f2ac8
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -0,0 +1,151 @@
+/*
+	Copyright (C) 2009 Bartlomiej Zolnierkiewicz
+
+	This program is free software; you can redistribute it and/or modify
+	it under the terms of the GNU General Public License as published by
+	the Free Software Foundation; either version 2 of the License, or
+	(at your option) any later version.
+
+	This program is distributed in the hope that it will be useful,
+	but WITHOUT ANY WARRANTY; without even the implied warranty of
+	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+	GNU General Public License for more details.
+
+	You should have received a copy of the GNU General Public License
+	along with this program; if not, write to the
+	Free Software Foundation, Inc.,
+	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef RT2800LIB_H
+#define RT2800LIB_H
+
+struct rt2800_ops {
+	void (*register_read)(struct rt2x00_dev *rt2x00dev,
+			      const unsigned int offset, u32 *value);
+	void (*register_read_lock)(struct rt2x00_dev *rt2x00dev,
+				   const unsigned int offset, u32 *value);
+	void (*register_write)(struct rt2x00_dev *rt2x00dev,
+			       const unsigned int offset, u32 value);
+	void (*register_write_lock)(struct rt2x00_dev *rt2x00dev,
+				    const unsigned int offset, u32 value);
+
+	void (*register_multiread)(struct rt2x00_dev *rt2x00dev,
+				   const unsigned int offset,
+				   void *value, const u32 length);
+	void (*register_multiwrite)(struct rt2x00_dev *rt2x00dev,
+				    const unsigned int offset,
+				    const void *value, const u32 length);
+
+	int (*regbusy_read)(struct rt2x00_dev *rt2x00dev,
+			    const unsigned int offset,
+			    const struct rt2x00_field32 field, u32 *reg);
+};
+
+static inline void rt2800_register_read(struct rt2x00_dev *rt2x00dev,
+					const unsigned int offset,
+					u32 *value)
+{
+	const struct rt2800_ops *rt2800ops = rt2x00dev->priv;
+
+	rt2800ops->register_read(rt2x00dev, offset, value);
+}
+
+static inline void rt2800_register_read_lock(struct rt2x00_dev *rt2x00dev,
+					     const unsigned int offset,
+					     u32 *value)
+{
+	const struct rt2800_ops *rt2800ops = rt2x00dev->priv;
+
+	rt2800ops->register_read_lock(rt2x00dev, offset, value);
+}
+
+static inline void rt2800_register_write(struct rt2x00_dev *rt2x00dev,
+					 const unsigned int offset,
+					 u32 value)
+{
+	const struct rt2800_ops *rt2800ops = rt2x00dev->priv;
+
+	rt2800ops->register_write(rt2x00dev, offset, value);
+}
+
+static inline void rt2800_register_write_lock(struct rt2x00_dev *rt2x00dev,
+					      const unsigned int offset,
+					      u32 value)
+{
+	const struct rt2800_ops *rt2800ops = rt2x00dev->priv;
+
+	rt2800ops->register_write_lock(rt2x00dev, offset, value);
+}
+
+static inline void rt2800_register_multiread(struct rt2x00_dev *rt2x00dev,
+					     const unsigned int offset,
+					     void *value, const u32 length)
+{
+	const struct rt2800_ops *rt2800ops = rt2x00dev->priv;
+
+	rt2800ops->register_multiread(rt2x00dev, offset, value, length);
+}
+
+static inline void rt2800_register_multiwrite(struct rt2x00_dev *rt2x00dev,
+					      const unsigned int offset,
+					      const void *value,
+					      const u32 length)
+{
+	const struct rt2800_ops *rt2800ops = rt2x00dev->priv;
+
+	rt2800ops->register_multiwrite(rt2x00dev, offset, value, length);
+}
+
+static inline int rt2800_regbusy_read(struct rt2x00_dev *rt2x00dev,
+				      const unsigned int offset,
+				      const struct rt2x00_field32 field,
+				      u32 *reg)
+{
+	const struct rt2800_ops *rt2800ops = rt2x00dev->priv;
+
+	return rt2800ops->regbusy_read(rt2x00dev, offset, field, reg);
+}
+
+void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
+			const u8 command, const u8 token,
+			const u8 arg0, const u8 arg1);
+
+extern const struct rt2x00debug rt2800_rt2x00debug;
+
+int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev);
+void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
+		     struct rt2x00_led *led, enum led_type type);
+int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev,
+			     struct rt2x00lib_crypto *crypto,
+			     struct ieee80211_key_conf *key);
+int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
+			       struct rt2x00lib_crypto *crypto,
+			       struct ieee80211_key_conf *key);
+void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
+			  const unsigned int filter_flags);
+void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
+			struct rt2x00intf_conf *conf, const unsigned int flags);
+void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp);
+void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant);
+void rt2800_config(struct rt2x00_dev *rt2x00dev,
+		   struct rt2x00lib_conf *libconf,
+		   const unsigned int flags);
+void rt2800_link_stats(struct rt2x00_dev *rt2x00dev, struct link_qual *qual);
+void rt2800_reset_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual);
+void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
+		       const u32 count);
+
+int rt2800_init_registers(struct rt2x00_dev *rt2x00dev);
+int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev);
+int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev);
+
+int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev);
+void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev);
+int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev);
+int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev);
+int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev);
+
+extern const struct ieee80211_ops rt2800_mac80211_ops;
+
+#endif /* RT2800LIB_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
new file mode 100644
index 000000000000..dfc886fcb44d
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -0,0 +1,1322 @@
+/*
+	Copyright (C) 2009 Ivo van Doorn <IvDoorn@gmail.com>
+	Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
+	Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
+	Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
+	Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
+	Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
+	Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
+	Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
+	<http://rt2x00.serialmonkey.com>
+
+	This program is free software; you can redistribute it and/or modify
+	it under the terms of the GNU General Public License as published by
+	the Free Software Foundation; either version 2 of the License, or
+	(at your option) any later version.
+
+	This program is distributed in the hope that it will be useful,
+	but WITHOUT ANY WARRANTY; without even the implied warranty of
+	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+	GNU General Public License for more details.
+
+	You should have received a copy of the GNU General Public License
+	along with this program; if not, write to the
+	Free Software Foundation, Inc.,
+	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+	Module: rt2800pci
+	Abstract: rt2800pci device specific routines.
+	Supported chipsets: RT2800E & RT2800ED.
+ */
+
+#include <linux/crc-ccitt.h>
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/eeprom_93cx6.h>
+
+#include "rt2x00.h"
+#include "rt2x00pci.h"
+#include "rt2x00soc.h"
+#include "rt2800lib.h"
+#include "rt2800.h"
+#include "rt2800pci.h"
+
+#ifdef CONFIG_RT2800PCI_PCI_MODULE
+#define CONFIG_RT2800PCI_PCI
+#endif
+
+#ifdef CONFIG_RT2800PCI_WISOC_MODULE
+#define CONFIG_RT2800PCI_WISOC
+#endif
+
+/*
+ * Allow hardware encryption to be disabled.
+ */
+static int modparam_nohwcrypt = 1;
+module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
+MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
+
+static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
+{
+	unsigned int i;
+	u32 reg;
+
+	for (i = 0; i < 200; i++) {
+		rt2800_register_read(rt2x00dev, H2M_MAILBOX_CID, &reg);
+
+		if ((rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD0) == token) ||
+		    (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD1) == token) ||
+		    (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD2) == token) ||
+		    (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD3) == token))
+			break;
+
+		udelay(REGISTER_BUSY_DELAY);
+	}
+
+	if (i == 200)
+		ERROR(rt2x00dev, "MCU request failed, no response from hardware\n");
+
+	rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0);
+	rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
+}
+
+#ifdef CONFIG_RT2800PCI_WISOC
+static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
+{
+	u32 *base_addr = (u32 *) KSEG1ADDR(0x1F040000); /* XXX for RT3052 */
+
+	memcpy_fromio(rt2x00dev->eeprom, base_addr, EEPROM_SIZE);
+}
+#else
+static inline void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
+{
+}
+#endif /* CONFIG_RT2800PCI_WISOC */
+
+#ifdef CONFIG_RT2800PCI_PCI
+static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
+{
+	struct rt2x00_dev *rt2x00dev = eeprom->data;
+	u32 reg;
+
+	rt2800_register_read(rt2x00dev, E2PROM_CSR, &reg);
+
+	eeprom->reg_data_in = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_IN);
+	eeprom->reg_data_out = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_OUT);
+	eeprom->reg_data_clock =
+	    !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_CLOCK);
+	eeprom->reg_chip_select =
+	    !!rt2x00_get_field32(reg, E2PROM_CSR_CHIP_SELECT);
+}
+
+static void rt2800pci_eepromregister_write(struct eeprom_93cx6 *eeprom)
+{
+	struct rt2x00_dev *rt2x00dev = eeprom->data;
+	u32 reg = 0;
+
+	rt2x00_set_field32(&reg, E2PROM_CSR_DATA_IN, !!eeprom->reg_data_in);
+	rt2x00_set_field32(&reg, E2PROM_CSR_DATA_OUT, !!eeprom->reg_data_out);
+	rt2x00_set_field32(&reg, E2PROM_CSR_DATA_CLOCK,
+			   !!eeprom->reg_data_clock);
+	rt2x00_set_field32(&reg, E2PROM_CSR_CHIP_SELECT,
+			   !!eeprom->reg_chip_select);
+
+	rt2800_register_write(rt2x00dev, E2PROM_CSR, reg);
+}
+
+static void rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev)
+{
+	struct eeprom_93cx6 eeprom;
+	u32 reg;
+
+	rt2800_register_read(rt2x00dev, E2PROM_CSR, &reg);
+
+	eeprom.data = rt2x00dev;
+	eeprom.register_read = rt2800pci_eepromregister_read;
+	eeprom.register_write = rt2800pci_eepromregister_write;
+	eeprom.width = !rt2x00_get_field32(reg, E2PROM_CSR_TYPE) ?
+	    PCI_EEPROM_WIDTH_93C46 : PCI_EEPROM_WIDTH_93C66;
+	eeprom.reg_data_in = 0;
+	eeprom.reg_data_out = 0;
+	eeprom.reg_data_clock = 0;
+	eeprom.reg_chip_select = 0;
+
+	eeprom_93cx6_multiread(&eeprom, EEPROM_BASE, rt2x00dev->eeprom,
+			       EEPROM_SIZE / sizeof(u16));
+}
+
+static int rt2800pci_efuse_detect(struct rt2x00_dev *rt2x00dev)
+{
+	return rt2800_efuse_detect(rt2x00dev);
+}
+
+static inline void rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
+{
+	rt2800_read_eeprom_efuse(rt2x00dev);
+}
+#else
+static inline void rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev)
+{
+}
+
+static inline int rt2800pci_efuse_detect(struct rt2x00_dev *rt2x00dev)
+{
+	return 0;
+}
+
+static inline void rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
+{
+}
+#endif /* CONFIG_RT2800PCI_PCI */
+
+/*
+ * Firmware functions
+ */
+static char *rt2800pci_get_firmware_name(struct rt2x00_dev *rt2x00dev)
+{
+	return FIRMWARE_RT2860;
+}
+
+static int rt2800pci_check_firmware(struct rt2x00_dev *rt2x00dev,
+				    const u8 *data, const size_t len)
+{
+	u16 fw_crc;
+	u16 crc;
+
+	/*
+	 * Only support 8kb firmware files.
+	 */
+	if (len != 8192)
+		return FW_BAD_LENGTH;
+
+	/*
+	 * The last 2 bytes in the firmware array are the crc checksum itself,
+	 * this means that we should never pass those 2 bytes to the crc
+	 * algorithm.
+	 */
+	fw_crc = (data[len - 2] << 8 | data[len - 1]);
+
+	/*
+	 * Use the crc ccitt algorithm.
+	 * This will return the same value as the legacy driver which
+	 * used bit ordering reversion on the both the firmware bytes
+	 * before input input as well as on the final output.
+	 * Obviously using crc ccitt directly is much more efficient.
+	 */
+	crc = crc_ccitt(~0, data, len - 2);
+
+	/*
+	 * There is a small difference between the crc-itu-t + bitrev and
+	 * the crc-ccitt crc calculation. In the latter method the 2 bytes
+	 * will be swapped, use swab16 to convert the crc to the correct
+	 * value.
+	 */
+	crc = swab16(crc);
+
+	return (fw_crc == crc) ? FW_OK : FW_BAD_CRC;
+}
+
+static int rt2800pci_load_firmware(struct rt2x00_dev *rt2x00dev,
+				   const u8 *data, const size_t len)
+{
+	unsigned int i;
+	u32 reg;
+
+	/*
+	 * Wait for stable hardware.
+	 */
+	for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+		rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
+		if (reg && reg != ~0)
+			break;
+		msleep(1);
+	}
+
+	if (i == REGISTER_BUSY_COUNT) {
+		ERROR(rt2x00dev, "Unstable hardware.\n");
+		return -EBUSY;
+	}
+
+	rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002);
+	rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0x00000000);
+
+	/*
+	 * Disable DMA, will be reenabled later when enabling
+	 * the radio.
+	 */
+	rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
+	rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
+
+	/*
+	 * enable Host program ram write selection
+	 */
+	reg = 0;
+	rt2x00_set_field32(&reg, PBF_SYS_CTRL_HOST_RAM_WRITE, 1);
+	rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, reg);
+
+	/*
+	 * Write firmware to device.
+	 */
+	rt2800_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
+				      data, len);
+
+	rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000);
+	rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001);
+
+	/*
+	 * Wait for device to stabilize.
+	 */
+	for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+		rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
+		if (rt2x00_get_field32(reg, PBF_SYS_CTRL_READY))
+			break;
+		msleep(1);
+	}
+
+	if (i == REGISTER_BUSY_COUNT) {
+		ERROR(rt2x00dev, "PBF system register not ready.\n");
+		return -EBUSY;
+	}
+
+	/*
+	 * Disable interrupts
+	 */
+	rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_IRQ_OFF);
+
+	/*
+	 * Initialize BBP R/W access agent
+	 */
+	rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
+	rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
+
+	return 0;
+}
+
+/*
+ * Initialization functions.
+ */
+static bool rt2800pci_get_entry_state(struct queue_entry *entry)
+{
+	struct queue_entry_priv_pci *entry_priv = entry->priv_data;
+	u32 word;
+
+	if (entry->queue->qid == QID_RX) {
+		rt2x00_desc_read(entry_priv->desc, 1, &word);
+
+		return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE));
+	} else {
+		rt2x00_desc_read(entry_priv->desc, 1, &word);
+
+		return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE));
+	}
+}
+
+static void rt2800pci_clear_entry(struct queue_entry *entry)
+{
+	struct queue_entry_priv_pci *entry_priv = entry->priv_data;
+	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+	u32 word;
+
+	if (entry->queue->qid == QID_RX) {
+		rt2x00_desc_read(entry_priv->desc, 0, &word);
+		rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma);
+		rt2x00_desc_write(entry_priv->desc, 0, word);
+
+		rt2x00_desc_read(entry_priv->desc, 1, &word);
+		rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0);
+		rt2x00_desc_write(entry_priv->desc, 1, word);
+	} else {
+		rt2x00_desc_read(entry_priv->desc, 1, &word);
+		rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
+		rt2x00_desc_write(entry_priv->desc, 1, word);
+	}
+}
+
+static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
+{
+	struct queue_entry_priv_pci *entry_priv;
+	u32 reg;
+
+	rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
+	rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
+
+	rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
+	rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
+
+	/*
+	 * Initialize registers.
+	 */
+	entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
+	rt2800_register_write(rt2x00dev, TX_BASE_PTR0, entry_priv->desc_dma);
+	rt2800_register_write(rt2x00dev, TX_MAX_CNT0, rt2x00dev->tx[0].limit);
+	rt2800_register_write(rt2x00dev, TX_CTX_IDX0, 0);
+	rt2800_register_write(rt2x00dev, TX_DTX_IDX0, 0);
+
+	entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
+	rt2800_register_write(rt2x00dev, TX_BASE_PTR1, entry_priv->desc_dma);
+	rt2800_register_write(rt2x00dev, TX_MAX_CNT1, rt2x00dev->tx[1].limit);
+	rt2800_register_write(rt2x00dev, TX_CTX_IDX1, 0);
+	rt2800_register_write(rt2x00dev, TX_DTX_IDX1, 0);
+
+	entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
+	rt2800_register_write(rt2x00dev, TX_BASE_PTR2, entry_priv->desc_dma);
+	rt2800_register_write(rt2x00dev, TX_MAX_CNT2, rt2x00dev->tx[2].limit);
+	rt2800_register_write(rt2x00dev, TX_CTX_IDX2, 0);
+	rt2800_register_write(rt2x00dev, TX_DTX_IDX2, 0);
+
+	entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
+	rt2800_register_write(rt2x00dev, TX_BASE_PTR3, entry_priv->desc_dma);
+	rt2800_register_write(rt2x00dev, TX_MAX_CNT3, rt2x00dev->tx[3].limit);
+	rt2800_register_write(rt2x00dev, TX_CTX_IDX3, 0);
+	rt2800_register_write(rt2x00dev, TX_DTX_IDX3, 0);
+
+	entry_priv = rt2x00dev->rx->entries[0].priv_data;
+	rt2800_register_write(rt2x00dev, RX_BASE_PTR, entry_priv->desc_dma);
+	rt2800_register_write(rt2x00dev, RX_MAX_CNT, rt2x00dev->rx[0].limit);
+	rt2800_register_write(rt2x00dev, RX_CRX_IDX, rt2x00dev->rx[0].limit - 1);
+	rt2800_register_write(rt2x00dev, RX_DRX_IDX, 0);
+
+	/*
+	 * Enable global DMA configuration
+	 */
+	rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
+	rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
+
+	rt2800_register_write(rt2x00dev, DELAY_INT_CFG, 0);
+
+	return 0;
+}
+
+/*
+ * Device state switch handlers.
+ */
+static void rt2800pci_toggle_rx(struct rt2x00_dev *rt2x00dev,
+				enum dev_state state)
+{
+	u32 reg;
+
+	rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+	rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX,
+			   (state == STATE_RADIO_RX_ON) ||
+			   (state == STATE_RADIO_RX_ON_LINK));
+	rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+}
+
+static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
+				 enum dev_state state)
+{
+	int mask = (state == STATE_RADIO_IRQ_ON);
+	u32 reg;
+
+	/*
+	 * When interrupts are being enabled, the interrupt registers
+	 * should clear the register to assure a clean state.
+	 */
+	if (state == STATE_RADIO_IRQ_ON) {
+		rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
+		rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
+	}
+
+	rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_AC0_DMA_DONE, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_AC1_DMA_DONE, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_AC2_DMA_DONE, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_AC3_DMA_DONE, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_HCCA_DMA_DONE, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_MGMT_DMA_DONE, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_MCU_COMMAND, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_RXTX_COHERENT, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_GPTIMER, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, mask);
+	rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
+}
+
+static int rt2800pci_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
+{
+	unsigned int i;
+	u32 reg;
+
+	for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+		rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+		if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
+		    !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
+			return 0;
+
+		msleep(1);
+	}
+
+	ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
+	return -EACCES;
+}
+
+static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
+{
+	u32 reg;
+	u16 word;
+
+	/*
+	 * Initialize all registers.
+	 */
+	if (unlikely(rt2800pci_wait_wpdma_ready(rt2x00dev) ||
+		     rt2800pci_init_queues(rt2x00dev) ||
+		     rt2800_init_registers(rt2x00dev) ||
+		     rt2800pci_wait_wpdma_ready(rt2x00dev) ||
+		     rt2800_init_bbp(rt2x00dev) ||
+		     rt2800_init_rfcsr(rt2x00dev)))
+		return -EIO;
+
+	/*
+	 * Send signal to firmware during boot time.
+	 */
+	rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0xff, 0, 0);
+
+	/*
+	 * Enable RX.
+	 */
+	rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+	rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
+	rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
+	rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+
+	rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 1);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 1);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_WP_DMA_BURST_SIZE, 2);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
+	rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
+
+	rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+	rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
+	rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
+	rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+
+	/*
+	 * Initialize LED control
+	 */
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_LED1, &word);
+	rt2800_mcu_request(rt2x00dev, MCU_LED_1, 0xff,
+			      word & 0xff, (word >> 8) & 0xff);
+
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_LED2, &word);
+	rt2800_mcu_request(rt2x00dev, MCU_LED_2, 0xff,
+			      word & 0xff, (word >> 8) & 0xff);
+
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_LED3, &word);
+	rt2800_mcu_request(rt2x00dev, MCU_LED_3, 0xff,
+			      word & 0xff, (word >> 8) & 0xff);
+
+	return 0;
+}
+
+static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
+{
+	u32 reg;
+
+	rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
+	rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
+
+	rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0);
+	rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
+	rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
+
+	rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001280);
+
+	rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
+	rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
+
+	rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
+	rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
+
+	/* Wait for DMA, ignore error */
+	rt2800pci_wait_wpdma_ready(rt2x00dev);
+}
+
+static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
+			       enum dev_state state)
+{
+	/*
+	 * Always put the device to sleep (even when we intend to wakeup!)
+	 * if the device is booting and wasn't asleep it will return
+	 * failure when attempting to wakeup.
+	 */
+	rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 2);
+
+	if (state == STATE_AWAKE) {
+		rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0);
+		rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKUP);
+	}
+
+	return 0;
+}
+
+static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
+				      enum dev_state state)
+{
+	int retval = 0;
+
+	switch (state) {
+	case STATE_RADIO_ON:
+		/*
+		 * Before the radio can be enabled, the device first has
+		 * to be woken up. After that it needs a bit of time
+		 * to be fully awake and then the radio can be enabled.
+		 */
+		rt2800pci_set_state(rt2x00dev, STATE_AWAKE);
+		msleep(1);
+		retval = rt2800pci_enable_radio(rt2x00dev);
+		break;
+	case STATE_RADIO_OFF:
+		/*
+		 * After the radio has been disabled, the device should
+		 * be put to sleep for powersaving.
+		 */
+		rt2800pci_disable_radio(rt2x00dev);
+		rt2800pci_set_state(rt2x00dev, STATE_SLEEP);
+		break;
+	case STATE_RADIO_RX_ON:
+	case STATE_RADIO_RX_ON_LINK:
+	case STATE_RADIO_RX_OFF:
+	case STATE_RADIO_RX_OFF_LINK:
+		rt2800pci_toggle_rx(rt2x00dev, state);
+		break;
+	case STATE_RADIO_IRQ_ON:
+	case STATE_RADIO_IRQ_OFF:
+		rt2800pci_toggle_irq(rt2x00dev, state);
+		break;
+	case STATE_DEEP_SLEEP:
+	case STATE_SLEEP:
+	case STATE_STANDBY:
+	case STATE_AWAKE:
+		retval = rt2800pci_set_state(rt2x00dev, state);
+		break;
+	default:
+		retval = -ENOTSUPP;
+		break;
+	}
+
+	if (unlikely(retval))
+		ERROR(rt2x00dev, "Device failed to enter state %d (%d).\n",
+		      state, retval);
+
+	return retval;
+}
+
+/*
+ * TX descriptor initialization
+ */
+static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
+				    struct sk_buff *skb,
+				    struct txentry_desc *txdesc)
+{
+	struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
+	__le32 *txd = skbdesc->desc;
+	__le32 *txwi = (__le32 *)(skb->data - rt2x00dev->ops->extra_tx_headroom);
+	u32 word;
+
+	/*
+	 * Initialize TX Info descriptor
+	 */
+	rt2x00_desc_read(txwi, 0, &word);
+	rt2x00_set_field32(&word, TXWI_W0_FRAG,
+			   test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
+	rt2x00_set_field32(&word, TXWI_W0_MIMO_PS, 0);
+	rt2x00_set_field32(&word, TXWI_W0_CF_ACK, 0);
+	rt2x00_set_field32(&word, TXWI_W0_TS,
+			   test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
+	rt2x00_set_field32(&word, TXWI_W0_AMPDU,
+			   test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags));
+	rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, txdesc->mpdu_density);
+	rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->ifs);
+	rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->mcs);
+	rt2x00_set_field32(&word, TXWI_W0_BW,
+			   test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags));
+	rt2x00_set_field32(&word, TXWI_W0_SHORT_GI,
+			   test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags));
+	rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->stbc);
+	rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode);
+	rt2x00_desc_write(txwi, 0, word);
+
+	rt2x00_desc_read(txwi, 1, &word);
+	rt2x00_set_field32(&word, TXWI_W1_ACK,
+			   test_bit(ENTRY_TXD_ACK, &txdesc->flags));
+	rt2x00_set_field32(&word, TXWI_W1_NSEQ,
+			   test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
+	rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size);
+	rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
+			   test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
+			   txdesc->key_idx : 0xff);
+	rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
+			   skb->len - txdesc->l2pad);
+	rt2x00_set_field32(&word, TXWI_W1_PACKETID,
+			   skbdesc->entry->queue->qid + 1);
+	rt2x00_desc_write(txwi, 1, word);
+
+	/*
+	 * Always write 0 to IV/EIV fields, hardware will insert the IV
+	 * from the IVEIV register when TXD_W3_WIV is set to 0.
+	 * When TXD_W3_WIV is set to 1 it will use the IV data
+	 * from the descriptor. The TXWI_W1_WIRELESS_CLI_ID indicates which
+	 * crypto entry in the registers should be used to encrypt the frame.
+	 */
+	_rt2x00_desc_write(txwi, 2, 0 /* skbdesc->iv[0] */);
+	_rt2x00_desc_write(txwi, 3, 0 /* skbdesc->iv[1] */);
+
+	/*
+	 * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
+	 * must contains a TXWI structure + 802.11 header + padding + 802.11
+	 * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
+	 * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11
+	 * data. It means that LAST_SEC0 is always 0.
+	 */
+
+	/*
+	 * Initialize TX descriptor
+	 */
+	rt2x00_desc_read(txd, 0, &word);
+	rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma);
+	rt2x00_desc_write(txd, 0, word);
+
+	rt2x00_desc_read(txd, 1, &word);
+	rt2x00_set_field32(&word, TXD_W1_SD_LEN1, skb->len);
+	rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
+			   !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
+	rt2x00_set_field32(&word, TXD_W1_BURST,
+			   test_bit(ENTRY_TXD_BURST, &txdesc->flags));
+	rt2x00_set_field32(&word, TXD_W1_SD_LEN0,
+			   rt2x00dev->ops->extra_tx_headroom);
+	rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
+	rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
+	rt2x00_desc_write(txd, 1, word);
+
+	rt2x00_desc_read(txd, 2, &word);
+	rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
+			   skbdesc->skb_dma + rt2x00dev->ops->extra_tx_headroom);
+	rt2x00_desc_write(txd, 2, word);
+
+	rt2x00_desc_read(txd, 3, &word);
+	rt2x00_set_field32(&word, TXD_W3_WIV,
+			   !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
+	rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
+	rt2x00_desc_write(txd, 3, word);
+}
+
+/*
+ * TX data initialization
+ */
+static void rt2800pci_write_beacon(struct queue_entry *entry)
+{
+	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+	unsigned int beacon_base;
+	u32 reg;
+
+	/*
+	 * Disable beaconing while we are reloading the beacon data,
+	 * otherwise we might be sending out invalid data.
+	 */
+	rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+	rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
+	rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+	/*
+	 * Write entire beacon with descriptor to register.
+	 */
+	beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
+	rt2800_register_multiwrite(rt2x00dev,
+				      beacon_base,
+				      skbdesc->desc, skbdesc->desc_len);
+	rt2800_register_multiwrite(rt2x00dev,
+				      beacon_base + skbdesc->desc_len,
+				      entry->skb->data, entry->skb->len);
+
+	/*
+	 * Clean up beacon skb.
+	 */
+	dev_kfree_skb_any(entry->skb);
+	entry->skb = NULL;
+}
+
+static void rt2800pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
+				    const enum data_queue_qid queue_idx)
+{
+	struct data_queue *queue;
+	unsigned int idx, qidx = 0;
+	u32 reg;
+
+	if (queue_idx == QID_BEACON) {
+		rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+		if (!rt2x00_get_field32(reg, BCN_TIME_CFG_BEACON_GEN)) {
+			rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
+			rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
+			rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
+			rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+		}
+		return;
+	}
+
+	if (queue_idx > QID_HCCA && queue_idx != QID_MGMT)
+		return;
+
+	queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
+	idx = queue->index[Q_INDEX];
+
+	if (queue_idx == QID_MGMT)
+		qidx = 5;
+	else
+		qidx = queue_idx;
+
+	rt2800_register_write(rt2x00dev, TX_CTX_IDX(qidx), idx);
+}
+
+static void rt2800pci_kill_tx_queue(struct rt2x00_dev *rt2x00dev,
+				    const enum data_queue_qid qid)
+{
+	u32 reg;
+
+	if (qid == QID_BEACON) {
+		rt2800_register_write(rt2x00dev, BCN_TIME_CFG, 0);
+		return;
+	}
+
+	rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, (qid == QID_AC_BE));
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, (qid == QID_AC_BK));
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, (qid == QID_AC_VI));
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, (qid == QID_AC_VO));
+	rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
+}
+
+/*
+ * RX control handlers
+ */
+static void rt2800pci_fill_rxdone(struct queue_entry *entry,
+				  struct rxdone_entry_desc *rxdesc)
+{
+	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+	struct queue_entry_priv_pci *entry_priv = entry->priv_data;
+	__le32 *rxd = entry_priv->desc;
+	__le32 *rxwi = (__le32 *)entry->skb->data;
+	u32 rxd3;
+	u32 rxwi0;
+	u32 rxwi1;
+	u32 rxwi2;
+	u32 rxwi3;
+
+	rt2x00_desc_read(rxd, 3, &rxd3);
+	rt2x00_desc_read(rxwi, 0, &rxwi0);
+	rt2x00_desc_read(rxwi, 1, &rxwi1);
+	rt2x00_desc_read(rxwi, 2, &rxwi2);
+	rt2x00_desc_read(rxwi, 3, &rxwi3);
+
+	if (rt2x00_get_field32(rxd3, RXD_W3_CRC_ERROR))
+		rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
+
+	if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
+		/*
+		 * Unfortunately we don't know the cipher type used during
+		 * decryption. This prevents us from correct providing
+		 * correct statistics through debugfs.
+		 */
+		rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF);
+		rxdesc->cipher_status =
+		    rt2x00_get_field32(rxd3, RXD_W3_CIPHER_ERROR);
+	}
+
+	if (rt2x00_get_field32(rxd3, RXD_W3_DECRYPTED)) {
+		/*
+		 * Hardware has stripped IV/EIV data from 802.11 frame during
+		 * decryption. Unfortunately the descriptor doesn't contain
+		 * any fields with the EIV/IV data either, so they can't
+		 * be restored by rt2x00lib.
+		 */
+		rxdesc->flags |= RX_FLAG_IV_STRIPPED;
+
+		if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
+			rxdesc->flags |= RX_FLAG_DECRYPTED;
+		else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
+			rxdesc->flags |= RX_FLAG_MMIC_ERROR;
+	}
+
+	if (rt2x00_get_field32(rxd3, RXD_W3_MY_BSS))
+		rxdesc->dev_flags |= RXDONE_MY_BSS;
+
+	if (rt2x00_get_field32(rxd3, RXD_W3_L2PAD)) {
+		rxdesc->dev_flags |= RXDONE_L2PAD;
+		skbdesc->flags |= SKBDESC_L2_PADDED;
+	}
+
+	if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
+		rxdesc->flags |= RX_FLAG_SHORT_GI;
+
+	if (rt2x00_get_field32(rxwi1, RXWI_W1_BW))
+		rxdesc->flags |= RX_FLAG_40MHZ;
+
+	/*
+	 * Detect RX rate, always use MCS as signal type.
+	 */
+	rxdesc->dev_flags |= RXDONE_SIGNAL_MCS;
+	rxdesc->rate_mode = rt2x00_get_field32(rxwi1, RXWI_W1_PHYMODE);
+	rxdesc->signal = rt2x00_get_field32(rxwi1, RXWI_W1_MCS);
+
+	/*
+	 * Mask of 0x8 bit to remove the short preamble flag.
+	 */
+	if (rxdesc->rate_mode == RATE_MODE_CCK)
+		rxdesc->signal &= ~0x8;
+
+	rxdesc->rssi =
+	    (rt2x00_get_field32(rxwi2, RXWI_W2_RSSI0) +
+	     rt2x00_get_field32(rxwi2, RXWI_W2_RSSI1)) / 2;
+
+	rxdesc->noise =
+	    (rt2x00_get_field32(rxwi3, RXWI_W3_SNR0) +
+	     rt2x00_get_field32(rxwi3, RXWI_W3_SNR1)) / 2;
+
+	rxdesc->size = rt2x00_get_field32(rxwi0, RXWI_W0_MPDU_TOTAL_BYTE_COUNT);
+
+	/*
+	 * Set RX IDX in register to inform hardware that we have handled
+	 * this entry and it is available for reuse again.
+	 */
+	rt2800_register_write(rt2x00dev, RX_CRX_IDX, entry->entry_idx);
+
+	/*
+	 * Remove TXWI descriptor from start of buffer.
+	 */
+	skb_pull(entry->skb, RXWI_DESC_SIZE);
+	skb_trim(entry->skb, rxdesc->size);
+}
+
+/*
+ * Interrupt functions.
+ */
+static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
+{
+	struct data_queue *queue;
+	struct queue_entry *entry;
+	struct queue_entry *entry_done;
+	struct queue_entry_priv_pci *entry_priv;
+	struct txdone_entry_desc txdesc;
+	u32 word;
+	u32 reg;
+	u32 old_reg;
+	unsigned int type;
+	unsigned int index;
+	u16 mcs, real_mcs;
+
+	/*
+	 * During each loop we will compare the freshly read
+	 * TX_STA_FIFO register value with the value read from
+	 * the previous loop. If the 2 values are equal then
+	 * we should stop processing because the chance it
+	 * quite big that the device has been unplugged and
+	 * we risk going into an endless loop.
+	 */
+	old_reg = 0;
+
+	while (1) {
+		rt2800_register_read(rt2x00dev, TX_STA_FIFO, &reg);
+		if (!rt2x00_get_field32(reg, TX_STA_FIFO_VALID))
+			break;
+
+		if (old_reg == reg)
+			break;
+		old_reg = reg;
+
+		/*
+		 * Skip this entry when it contains an invalid
+		 * queue identication number.
+		 */
+		type = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE) - 1;
+		if (type >= QID_RX)
+			continue;
+
+		queue = rt2x00queue_get_queue(rt2x00dev, type);
+		if (unlikely(!queue))
+			continue;
+
+		/*
+		 * Skip this entry when it contains an invalid
+		 * index number.
+		 */
+		index = rt2x00_get_field32(reg, TX_STA_FIFO_WCID) - 1;
+		if (unlikely(index >= queue->limit))
+			continue;
+
+		entry = &queue->entries[index];
+		entry_priv = entry->priv_data;
+		rt2x00_desc_read((__le32 *)entry->skb->data, 0, &word);
+
+		entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
+		while (entry != entry_done) {
+			/*
+			 * Catch up.
+			 * Just report any entries we missed as failed.
+			 */
+			WARNING(rt2x00dev,
+				"TX status report missed for entry %d\n",
+				entry_done->entry_idx);
+
+			txdesc.flags = 0;
+			__set_bit(TXDONE_UNKNOWN, &txdesc.flags);
+			txdesc.retry = 0;
+
+			rt2x00lib_txdone(entry_done, &txdesc);
+			entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
+		}
+
+		/*
+		 * Obtain the status about this packet.
+		 */
+		txdesc.flags = 0;
+		if (rt2x00_get_field32(reg, TX_STA_FIFO_TX_SUCCESS))
+			__set_bit(TXDONE_SUCCESS, &txdesc.flags);
+		else
+			__set_bit(TXDONE_FAILURE, &txdesc.flags);
+
+		/*
+		 * Ralink has a retry mechanism using a global fallback
+		 * table. We setup this fallback table to try immediate
+		 * lower rate for all rates. In the TX_STA_FIFO,
+		 * the MCS field contains the MCS used for the successfull
+		 * transmission. If the first transmission succeed,
+		 * we have mcs == tx_mcs. On the second transmission,
+		 * we have mcs = tx_mcs - 1. So the number of
+		 * retry is (tx_mcs - mcs).
+		 */
+		mcs = rt2x00_get_field32(word, TXWI_W0_MCS);
+		real_mcs = rt2x00_get_field32(reg, TX_STA_FIFO_MCS);
+		__set_bit(TXDONE_FALLBACK, &txdesc.flags);
+		txdesc.retry = mcs - min(mcs, real_mcs);
+
+		rt2x00lib_txdone(entry, &txdesc);
+	}
+}
+
+static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
+{
+	struct rt2x00_dev *rt2x00dev = dev_instance;
+	u32 reg;
+
+	/* Read status and ACK all interrupts */
+	rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
+	rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
+
+	if (!reg)
+		return IRQ_NONE;
+
+	if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+		return IRQ_HANDLED;
+
+	/*
+	 * 1 - Rx ring done interrupt.
+	 */
+	if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
+		rt2x00pci_rxdone(rt2x00dev);
+
+	if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS))
+		rt2800pci_txdone(rt2x00dev);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Device probe functions.
+ */
+static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
+{
+	/*
+	 * Read EEPROM into buffer
+	 */
+	switch (rt2x00dev->chip.rt) {
+	case RT2880:
+	case RT3052:
+		rt2800pci_read_eeprom_soc(rt2x00dev);
+		break;
+	default:
+		if (rt2800pci_efuse_detect(rt2x00dev))
+			rt2800pci_read_eeprom_efuse(rt2x00dev);
+		else
+			rt2800pci_read_eeprom_pci(rt2x00dev);
+		break;
+	}
+
+	return rt2800_validate_eeprom(rt2x00dev);
+}
+
+static const struct rt2800_ops rt2800pci_rt2800_ops = {
+	.register_read		= rt2x00pci_register_read,
+	.register_read_lock	= rt2x00pci_register_read, /* same for PCI */
+	.register_write		= rt2x00pci_register_write,
+	.register_write_lock	= rt2x00pci_register_write, /* same for PCI */
+
+	.register_multiread	= rt2x00pci_register_multiread,
+	.register_multiwrite	= rt2x00pci_register_multiwrite,
+
+	.regbusy_read		= rt2x00pci_regbusy_read,
+};
+
+static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
+{
+	int retval;
+
+	rt2x00dev->priv = (void *)&rt2800pci_rt2800_ops;
+
+	/*
+	 * Allocate eeprom data.
+	 */
+	retval = rt2800pci_validate_eeprom(rt2x00dev);
+	if (retval)
+		return retval;
+
+	retval = rt2800_init_eeprom(rt2x00dev);
+	if (retval)
+		return retval;
+
+	/*
+	 * Initialize hw specifications.
+	 */
+	retval = rt2800_probe_hw_mode(rt2x00dev);
+	if (retval)
+		return retval;
+
+	/*
+	 * This device has multiple filters for control frames
+	 * and has a separate filter for PS Poll frames.
+	 */
+	__set_bit(DRIVER_SUPPORT_CONTROL_FILTERS, &rt2x00dev->flags);
+	__set_bit(DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL, &rt2x00dev->flags);
+
+	/*
+	 * This device requires firmware.
+	 */
+	if (!rt2x00_rt(&rt2x00dev->chip, RT2880) &&
+	    !rt2x00_rt(&rt2x00dev->chip, RT3052))
+		__set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags);
+	__set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
+	__set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags);
+	if (!modparam_nohwcrypt)
+		__set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
+
+	/*
+	 * Set the rssi offset.
+	 */
+	rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET;
+
+	return 0;
+}
+
+static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
+	.irq_handler		= rt2800pci_interrupt,
+	.probe_hw		= rt2800pci_probe_hw,
+	.get_firmware_name	= rt2800pci_get_firmware_name,
+	.check_firmware		= rt2800pci_check_firmware,
+	.load_firmware		= rt2800pci_load_firmware,
+	.initialize		= rt2x00pci_initialize,
+	.uninitialize		= rt2x00pci_uninitialize,
+	.get_entry_state	= rt2800pci_get_entry_state,
+	.clear_entry		= rt2800pci_clear_entry,
+	.set_device_state	= rt2800pci_set_device_state,
+	.rfkill_poll		= rt2800_rfkill_poll,
+	.link_stats		= rt2800_link_stats,
+	.reset_tuner		= rt2800_reset_tuner,
+	.link_tuner		= rt2800_link_tuner,
+	.write_tx_desc		= rt2800pci_write_tx_desc,
+	.write_tx_data		= rt2x00pci_write_tx_data,
+	.write_beacon		= rt2800pci_write_beacon,
+	.kick_tx_queue		= rt2800pci_kick_tx_queue,
+	.kill_tx_queue		= rt2800pci_kill_tx_queue,
+	.fill_rxdone		= rt2800pci_fill_rxdone,
+	.config_shared_key	= rt2800_config_shared_key,
+	.config_pairwise_key	= rt2800_config_pairwise_key,
+	.config_filter		= rt2800_config_filter,
+	.config_intf		= rt2800_config_intf,
+	.config_erp		= rt2800_config_erp,
+	.config_ant		= rt2800_config_ant,
+	.config			= rt2800_config,
+};
+
+static const struct data_queue_desc rt2800pci_queue_rx = {
+	.entry_num		= RX_ENTRIES,
+	.data_size		= AGGREGATION_SIZE,
+	.desc_size		= RXD_DESC_SIZE,
+	.priv_size		= sizeof(struct queue_entry_priv_pci),
+};
+
+static const struct data_queue_desc rt2800pci_queue_tx = {
+	.entry_num		= TX_ENTRIES,
+	.data_size		= AGGREGATION_SIZE,
+	.desc_size		= TXD_DESC_SIZE,
+	.priv_size		= sizeof(struct queue_entry_priv_pci),
+};
+
+static const struct data_queue_desc rt2800pci_queue_bcn = {
+	.entry_num		= 8 * BEACON_ENTRIES,
+	.data_size		= 0, /* No DMA required for beacons */
+	.desc_size		= TXWI_DESC_SIZE,
+	.priv_size		= sizeof(struct queue_entry_priv_pci),
+};
+
+static const struct rt2x00_ops rt2800pci_ops = {
+	.name			= KBUILD_MODNAME,
+	.max_sta_intf		= 1,
+	.max_ap_intf		= 8,
+	.eeprom_size		= EEPROM_SIZE,
+	.rf_size		= RF_SIZE,
+	.tx_queues		= NUM_TX_QUEUES,
+	.extra_tx_headroom	= TXWI_DESC_SIZE,
+	.rx			= &rt2800pci_queue_rx,
+	.tx			= &rt2800pci_queue_tx,
+	.bcn			= &rt2800pci_queue_bcn,
+	.lib			= &rt2800pci_rt2x00_ops,
+	.hw			= &rt2800_mac80211_ops,
+#ifdef CONFIG_RT2X00_LIB_DEBUGFS
+	.debugfs		= &rt2800_rt2x00debug,
+#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
+};
+
+/*
+ * RT2800pci module information.
+ */
+static struct pci_device_id rt2800pci_device_table[] = {
+	{ PCI_DEVICE(0x1462, 0x891a), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1432, 0x7708), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1432, 0x7727), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1432, 0x7728), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1432, 0x7738), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1432, 0x7748), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1432, 0x7758), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1432, 0x7768), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1814, 0x0601), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1814, 0x0681), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1814, 0x0701), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1814, 0x0781), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1814, 0x3090), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1814, 0x3091), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1814, 0x3092), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1a3b, 0x1059), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ 0, }
+};
+
+MODULE_AUTHOR(DRV_PROJECT);
+MODULE_VERSION(DRV_VERSION);
+MODULE_DESCRIPTION("Ralink RT2800 PCI & PCMCIA Wireless LAN driver.");
+MODULE_SUPPORTED_DEVICE("Ralink RT2860 PCI & PCMCIA chipset based cards");
+#ifdef CONFIG_RT2800PCI_PCI
+MODULE_FIRMWARE(FIRMWARE_RT2860);
+MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);
+#endif /* CONFIG_RT2800PCI_PCI */
+MODULE_LICENSE("GPL");
+
+#ifdef CONFIG_RT2800PCI_WISOC
+#if defined(CONFIG_RALINK_RT288X)
+__rt2x00soc_probe(RT2880, &rt2800pci_ops);
+#elif defined(CONFIG_RALINK_RT305X)
+__rt2x00soc_probe(RT3052, &rt2800pci_ops);
+#endif
+
+static struct platform_driver rt2800soc_driver = {
+	.driver		= {
+		.name		= "rt2800_wmac",
+		.owner		= THIS_MODULE,
+		.mod_name	= KBUILD_MODNAME,
+	},
+	.probe		= __rt2x00soc_probe,
+	.remove		= __devexit_p(rt2x00soc_remove),
+	.suspend	= rt2x00soc_suspend,
+	.resume		= rt2x00soc_resume,
+};
+#endif /* CONFIG_RT2800PCI_WISOC */
+
+#ifdef CONFIG_RT2800PCI_PCI
+static struct pci_driver rt2800pci_driver = {
+	.name		= KBUILD_MODNAME,
+	.id_table	= rt2800pci_device_table,
+	.probe		= rt2x00pci_probe,
+	.remove		= __devexit_p(rt2x00pci_remove),
+	.suspend	= rt2x00pci_suspend,
+	.resume		= rt2x00pci_resume,
+};
+#endif /* CONFIG_RT2800PCI_PCI */
+
+static int __init rt2800pci_init(void)
+{
+	int ret = 0;
+
+#ifdef CONFIG_RT2800PCI_WISOC
+	ret = platform_driver_register(&rt2800soc_driver);
+	if (ret)
+		return ret;
+#endif
+#ifdef CONFIG_RT2800PCI_PCI
+	ret = pci_register_driver(&rt2800pci_driver);
+	if (ret) {
+#ifdef CONFIG_RT2800PCI_WISOC
+		platform_driver_unregister(&rt2800soc_driver);
+#endif
+		return ret;
+	}
+#endif
+
+	return ret;
+}
+
+static void __exit rt2800pci_exit(void)
+{
+#ifdef CONFIG_RT2800PCI_PCI
+	pci_unregister_driver(&rt2800pci_driver);
+#endif
+#ifdef CONFIG_RT2800PCI_WISOC
+	platform_driver_unregister(&rt2800soc_driver);
+#endif
+}
+
+module_init(rt2800pci_init);
+module_exit(rt2800pci_exit);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.h b/drivers/net/wireless/rt2x00/rt2800pci.h
new file mode 100644
index 000000000000..afc8e7da27cb
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800pci.h
@@ -0,0 +1,159 @@
+/*
+	Copyright (C) 2009 Ivo van Doorn <IvDoorn@gmail.com>
+	Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
+	Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
+	Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
+	Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
+	Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
+	Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
+	Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
+	<http://rt2x00.serialmonkey.com>
+
+	This program is free software; you can redistribute it and/or modify
+	it under the terms of the GNU General Public License as published by
+	the Free Software Foundation; either version 2 of the License, or
+	(at your option) any later version.
+
+	This program is distributed in the hope that it will be useful,
+	but WITHOUT ANY WARRANTY; without even the implied warranty of
+	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+	GNU General Public License for more details.
+
+	You should have received a copy of the GNU General Public License
+	along with this program; if not, write to the
+	Free Software Foundation, Inc.,
+	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+	Module: rt2800pci
+	Abstract: Data structures and registers for the rt2800pci module.
+	Supported chipsets: RT2800E & RT2800ED.
+ */
+
+#ifndef RT2800PCI_H
+#define RT2800PCI_H
+
+/*
+ * PCI registers.
+ */
+
+/*
+ * E2PROM_CSR: EEPROM control register.
+ * RELOAD: Write 1 to reload eeprom content.
+ * TYPE: 0: 93c46, 1:93c66.
+ * LOAD_STATUS: 1:loading, 0:done.
+ */
+#define E2PROM_CSR			0x0004
+#define E2PROM_CSR_DATA_CLOCK		FIELD32(0x00000001)
+#define E2PROM_CSR_CHIP_SELECT		FIELD32(0x00000002)
+#define E2PROM_CSR_DATA_IN		FIELD32(0x00000004)
+#define E2PROM_CSR_DATA_OUT		FIELD32(0x00000008)
+#define E2PROM_CSR_TYPE			FIELD32(0x00000030)
+#define E2PROM_CSR_LOAD_STATUS		FIELD32(0x00000040)
+#define E2PROM_CSR_RELOAD		FIELD32(0x00000080)
+
+/*
+ * Queue register offset macros
+ */
+#define TX_QUEUE_REG_OFFSET		0x10
+#define TX_BASE_PTR(__x)		TX_BASE_PTR0 + ((__x) * TX_QUEUE_REG_OFFSET)
+#define TX_MAX_CNT(__x)			TX_MAX_CNT0 + ((__x) * TX_QUEUE_REG_OFFSET)
+#define TX_CTX_IDX(__x)			TX_CTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET)
+#define TX_DTX_IDX(__x)			TX_DTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET)
+
+/*
+ * 8051 firmware image.
+ */
+#define FIRMWARE_RT2860			"rt2860.bin"
+#define FIRMWARE_IMAGE_BASE		0x2000
+
+/*
+ * DMA descriptor defines.
+ */
+#define TXD_DESC_SIZE			( 4 * sizeof(__le32) )
+#define RXD_DESC_SIZE			( 4 * sizeof(__le32) )
+
+/*
+ * TX descriptor format for TX, PRIO and Beacon Ring.
+ */
+
+/*
+ * Word0
+ */
+#define TXD_W0_SD_PTR0			FIELD32(0xffffffff)
+
+/*
+ * Word1
+ */
+#define TXD_W1_SD_LEN1			FIELD32(0x00003fff)
+#define TXD_W1_LAST_SEC1		FIELD32(0x00004000)
+#define TXD_W1_BURST			FIELD32(0x00008000)
+#define TXD_W1_SD_LEN0			FIELD32(0x3fff0000)
+#define TXD_W1_LAST_SEC0		FIELD32(0x40000000)
+#define TXD_W1_DMA_DONE			FIELD32(0x80000000)
+
+/*
+ * Word2
+ */
+#define TXD_W2_SD_PTR1			FIELD32(0xffffffff)
+
+/*
+ * Word3
+ * WIV: Wireless Info Valid. 1: Driver filled WI, 0: DMA needs to copy WI
+ * QSEL: Select on-chip FIFO ID for 2nd-stage output scheduler.
+ *       0:MGMT, 1:HCCA 2:EDCA
+ */
+#define TXD_W3_WIV			FIELD32(0x01000000)
+#define TXD_W3_QSEL			FIELD32(0x06000000)
+#define TXD_W3_TCO			FIELD32(0x20000000)
+#define TXD_W3_UCO			FIELD32(0x40000000)
+#define TXD_W3_ICO			FIELD32(0x80000000)
+
+/*
+ * RX descriptor format for RX Ring.
+ */
+
+/*
+ * Word0
+ */
+#define RXD_W0_SDP0			FIELD32(0xffffffff)
+
+/*
+ * Word1
+ */
+#define RXD_W1_SDL1			FIELD32(0x00003fff)
+#define RXD_W1_SDL0			FIELD32(0x3fff0000)
+#define RXD_W1_LS0			FIELD32(0x40000000)
+#define RXD_W1_DMA_DONE			FIELD32(0x80000000)
+
+/*
+ * Word2
+ */
+#define RXD_W2_SDP1			FIELD32(0xffffffff)
+
+/*
+ * Word3
+ * AMSDU: RX with 802.3 header, not 802.11 header.
+ * DECRYPTED: This frame is being decrypted.
+ */
+#define RXD_W3_BA			FIELD32(0x00000001)
+#define RXD_W3_DATA			FIELD32(0x00000002)
+#define RXD_W3_NULLDATA			FIELD32(0x00000004)
+#define RXD_W3_FRAG			FIELD32(0x00000008)
+#define RXD_W3_UNICAST_TO_ME		FIELD32(0x00000010)
+#define RXD_W3_MULTICAST		FIELD32(0x00000020)
+#define RXD_W3_BROADCAST		FIELD32(0x00000040)
+#define RXD_W3_MY_BSS			FIELD32(0x00000080)
+#define RXD_W3_CRC_ERROR		FIELD32(0x00000100)
+#define RXD_W3_CIPHER_ERROR		FIELD32(0x00000600)
+#define RXD_W3_AMSDU			FIELD32(0x00000800)
+#define RXD_W3_HTC			FIELD32(0x00001000)
+#define RXD_W3_RSSI			FIELD32(0x00002000)
+#define RXD_W3_L2PAD			FIELD32(0x00004000)
+#define RXD_W3_AMPDU			FIELD32(0x00008000)
+#define RXD_W3_DECRYPTED		FIELD32(0x00010000)
+#define RXD_W3_PLCP_SIGNAL		FIELD32(0x00020000)
+#define RXD_W3_PLCP_RSSI		FIELD32(0x00040000)
+
+#endif /* RT2800PCI_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 9fe770f7d7bb..af85d18cdbe7 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -1,5 +1,9 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2009 Ivo van Doorn <IvDoorn@gmail.com>
+	Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
+	Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
+	Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
+	Copyright (C) 2009 Axel Kollhofer <rain_maker@root-forum.org>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
@@ -34,6 +38,8 @@
 
 #include "rt2x00.h"
 #include "rt2x00usb.h"
+#include "rt2800lib.h"
+#include "rt2800.h"
 #include "rt2800usb.h"
 
 /*
@@ -44,1027 +50,6 @@ module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
 /*
- * Register access.
- * All access to the CSR registers will go through the methods
- * rt2x00usb_register_read and rt2x00usb_register_write.
- * BBP and RF register require indirect register access,
- * and use the CSR registers BBPCSR and RFCSR to achieve this.
- * These indirect registers work with busy bits,
- * and we will try maximal REGISTER_BUSY_COUNT times to access
- * the register while taking a REGISTER_BUSY_DELAY us delay
- * between each attampt. When the busy bit is still set at that time,
- * the access attempt is considered to have failed,
- * and we will print an error.
- * The _lock versions must be used if you already hold the csr_mutex
- */
-#define WAIT_FOR_BBP(__dev, __reg) \
-	rt2x00usb_regbusy_read((__dev), BBP_CSR_CFG, BBP_CSR_CFG_BUSY, (__reg))
-#define WAIT_FOR_RFCSR(__dev, __reg) \
-	rt2x00usb_regbusy_read((__dev), RF_CSR_CFG, RF_CSR_CFG_BUSY, (__reg))
-#define WAIT_FOR_RF(__dev, __reg) \
-	rt2x00usb_regbusy_read((__dev), RF_CSR_CFG0, RF_CSR_CFG0_BUSY, (__reg))
-#define WAIT_FOR_MCU(__dev, __reg) \
-	rt2x00usb_regbusy_read((__dev), H2M_MAILBOX_CSR, \
-			       H2M_MAILBOX_CSR_OWNER, (__reg))
-
-static void rt2800usb_bbp_write(struct rt2x00_dev *rt2x00dev,
-				const unsigned int word, const u8 value)
-{
-	u32 reg;
-
-	mutex_lock(&rt2x00dev->csr_mutex);
-
-	/*
-	 * Wait until the BBP becomes available, afterwards we
-	 * can safely write the new data into the register.
-	 */
-	if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
-		reg = 0;
-		rt2x00_set_field32(&reg, BBP_CSR_CFG_VALUE, value);
-		rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
-		rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
-		rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 0);
-
-		rt2x00usb_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
-	}
-
-	mutex_unlock(&rt2x00dev->csr_mutex);
-}
-
-static void rt2800usb_bbp_read(struct rt2x00_dev *rt2x00dev,
-			       const unsigned int word, u8 *value)
-{
-	u32 reg;
-
-	mutex_lock(&rt2x00dev->csr_mutex);
-
-	/*
-	 * Wait until the BBP becomes available, afterwards we
-	 * can safely write the read request into the register.
-	 * After the data has been written, we wait until hardware
-	 * returns the correct value, if at any time the register
-	 * doesn't become available in time, reg will be 0xffffffff
-	 * which means we return 0xff to the caller.
-	 */
-	if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
-		reg = 0;
-		rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
-		rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
-		rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 1);
-
-		rt2x00usb_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
-
-		WAIT_FOR_BBP(rt2x00dev, &reg);
-	}
-
-	*value = rt2x00_get_field32(reg, BBP_CSR_CFG_VALUE);
-
-	mutex_unlock(&rt2x00dev->csr_mutex);
-}
-
-static void rt2800usb_rfcsr_write(struct rt2x00_dev *rt2x00dev,
-				  const unsigned int word, const u8 value)
-{
-	u32 reg;
-
-	mutex_lock(&rt2x00dev->csr_mutex);
-
-	/*
-	 * Wait until the RFCSR becomes available, afterwards we
-	 * can safely write the new data into the register.
-	 */
-	if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) {
-		reg = 0;
-		rt2x00_set_field32(&reg, RF_CSR_CFG_DATA, value);
-		rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word);
-		rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 1);
-		rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1);
-
-		rt2x00usb_register_write_lock(rt2x00dev, RF_CSR_CFG, reg);
-	}
-
-	mutex_unlock(&rt2x00dev->csr_mutex);
-}
-
-static void rt2800usb_rfcsr_read(struct rt2x00_dev *rt2x00dev,
-				 const unsigned int word, u8 *value)
-{
-	u32 reg;
-
-	mutex_lock(&rt2x00dev->csr_mutex);
-
-	/*
-	 * Wait until the RFCSR becomes available, afterwards we
-	 * can safely write the read request into the register.
-	 * After the data has been written, we wait until hardware
-	 * returns the correct value, if at any time the register
-	 * doesn't become available in time, reg will be 0xffffffff
-	 * which means we return 0xff to the caller.
-	 */
-	if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) {
-		reg = 0;
-		rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word);
-		rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 0);
-		rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1);
-
-		rt2x00usb_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
-
-		WAIT_FOR_RFCSR(rt2x00dev, &reg);
-	}
-
-	*value = rt2x00_get_field32(reg, RF_CSR_CFG_DATA);
-
-	mutex_unlock(&rt2x00dev->csr_mutex);
-}
-
-static void rt2800usb_rf_write(struct rt2x00_dev *rt2x00dev,
-			       const unsigned int word, const u32 value)
-{
-	u32 reg;
-
-	mutex_lock(&rt2x00dev->csr_mutex);
-
-	/*
-	 * Wait until the RF becomes available, afterwards we
-	 * can safely write the new data into the register.
-	 */
-	if (WAIT_FOR_RF(rt2x00dev, &reg)) {
-		reg = 0;
-		rt2x00_set_field32(&reg, RF_CSR_CFG0_REG_VALUE_BW, value);
-		rt2x00_set_field32(&reg, RF_CSR_CFG0_STANDBYMODE, 0);
-		rt2x00_set_field32(&reg, RF_CSR_CFG0_SEL, 0);
-		rt2x00_set_field32(&reg, RF_CSR_CFG0_BUSY, 1);
-
-		rt2x00usb_register_write_lock(rt2x00dev, RF_CSR_CFG0, reg);
-		rt2x00_rf_write(rt2x00dev, word, value);
-	}
-
-	mutex_unlock(&rt2x00dev->csr_mutex);
-}
-
-static void rt2800usb_mcu_request(struct rt2x00_dev *rt2x00dev,
-				  const u8 command, const u8 token,
-				  const u8 arg0, const u8 arg1)
-{
-	u32 reg;
-
-	mutex_lock(&rt2x00dev->csr_mutex);
-
-	/*
-	 * Wait until the MCU becomes available, afterwards we
-	 * can safely write the new data into the register.
-	 */
-	if (WAIT_FOR_MCU(rt2x00dev, &reg)) {
-		rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_OWNER, 1);
-		rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_CMD_TOKEN, token);
-		rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG0, arg0);
-		rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG1, arg1);
-		rt2x00usb_register_write_lock(rt2x00dev, H2M_MAILBOX_CSR, reg);
-
-		reg = 0;
-		rt2x00_set_field32(&reg, HOST_CMD_CSR_HOST_COMMAND, command);
-		rt2x00usb_register_write_lock(rt2x00dev, HOST_CMD_CSR, reg);
-	}
-
-	mutex_unlock(&rt2x00dev->csr_mutex);
-}
-
-#ifdef CONFIG_RT2X00_LIB_DEBUGFS
-static const struct rt2x00debug rt2800usb_rt2x00debug = {
-	.owner	= THIS_MODULE,
-	.csr	= {
-		.read		= rt2x00usb_register_read,
-		.write		= rt2x00usb_register_write,
-		.flags		= RT2X00DEBUGFS_OFFSET,
-		.word_base	= CSR_REG_BASE,
-		.word_size	= sizeof(u32),
-		.word_count	= CSR_REG_SIZE / sizeof(u32),
-	},
-	.eeprom	= {
-		.read		= rt2x00_eeprom_read,
-		.write		= rt2x00_eeprom_write,
-		.word_base	= EEPROM_BASE,
-		.word_size	= sizeof(u16),
-		.word_count	= EEPROM_SIZE / sizeof(u16),
-	},
-	.bbp	= {
-		.read		= rt2800usb_bbp_read,
-		.write		= rt2800usb_bbp_write,
-		.word_base	= BBP_BASE,
-		.word_size	= sizeof(u8),
-		.word_count	= BBP_SIZE / sizeof(u8),
-	},
-	.rf	= {
-		.read		= rt2x00_rf_read,
-		.write		= rt2800usb_rf_write,
-		.word_base	= RF_BASE,
-		.word_size	= sizeof(u32),
-		.word_count	= RF_SIZE / sizeof(u32),
-	},
-};
-#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
-
-static int rt2800usb_rfkill_poll(struct rt2x00_dev *rt2x00dev)
-{
-	u32 reg;
-
-	rt2x00usb_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
-	return rt2x00_get_field32(reg, GPIO_CTRL_CFG_BIT2);
-}
-
-#ifdef CONFIG_RT2X00_LIB_LEDS
-static void rt2800usb_brightness_set(struct led_classdev *led_cdev,
-				     enum led_brightness brightness)
-{
-	struct rt2x00_led *led =
-	    container_of(led_cdev, struct rt2x00_led, led_dev);
-	unsigned int enabled = brightness != LED_OFF;
-	unsigned int bg_mode =
-	    (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
-	unsigned int polarity =
-		rt2x00_get_field16(led->rt2x00dev->led_mcu_reg,
-				   EEPROM_FREQ_LED_POLARITY);
-	unsigned int ledmode =
-		rt2x00_get_field16(led->rt2x00dev->led_mcu_reg,
-				   EEPROM_FREQ_LED_MODE);
-
-	if (led->type == LED_TYPE_RADIO) {
-		rt2800usb_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode,
-				      enabled ? 0x20 : 0);
-	} else if (led->type == LED_TYPE_ASSOC) {
-		rt2800usb_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode,
-				      enabled ? (bg_mode ? 0x60 : 0xa0) : 0x20);
-	} else if (led->type == LED_TYPE_QUALITY) {
-		/*
-		 * The brightness is divided into 6 levels (0 - 5),
-		 * The specs tell us the following levels:
-		 *	0, 1 ,3, 7, 15, 31
-		 * to determine the level in a simple way we can simply
-		 * work with bitshifting:
-		 *	(1 << level) - 1
-		 */
-		rt2800usb_mcu_request(led->rt2x00dev, MCU_LED_STRENGTH, 0xff,
-				      (1 << brightness / (LED_FULL / 6)) - 1,
-				      polarity);
-	}
-}
-
-static int rt2800usb_blink_set(struct led_classdev *led_cdev,
-			       unsigned long *delay_on,
-			       unsigned long *delay_off)
-{
-	struct rt2x00_led *led =
-	    container_of(led_cdev, struct rt2x00_led, led_dev);
-	u32 reg;
-
-	rt2x00usb_register_read(led->rt2x00dev, LED_CFG, &reg);
-	rt2x00_set_field32(&reg, LED_CFG_ON_PERIOD, *delay_on);
-	rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, *delay_off);
-	rt2x00_set_field32(&reg, LED_CFG_SLOW_BLINK_PERIOD, 3);
-	rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, 3);
-	rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 12);
-	rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, 3);
-	rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, 1);
-	rt2x00usb_register_write(led->rt2x00dev, LED_CFG, reg);
-
-	return 0;
-}
-
-static void rt2800usb_init_led(struct rt2x00_dev *rt2x00dev,
-			       struct rt2x00_led *led,
-			       enum led_type type)
-{
-	led->rt2x00dev = rt2x00dev;
-	led->type = type;
-	led->led_dev.brightness_set = rt2800usb_brightness_set;
-	led->led_dev.blink_set = rt2800usb_blink_set;
-	led->flags = LED_INITIALIZED;
-}
-#endif /* CONFIG_RT2X00_LIB_LEDS */
-
-/*
- * Configuration handlers.
- */
-static void rt2800usb_config_wcid_attr(struct rt2x00_dev *rt2x00dev,
-				       struct rt2x00lib_crypto *crypto,
-				       struct ieee80211_key_conf *key)
-{
-	struct mac_wcid_entry wcid_entry;
-	struct mac_iveiv_entry iveiv_entry;
-	u32 offset;
-	u32 reg;
-
-	offset = MAC_WCID_ATTR_ENTRY(key->hw_key_idx);
-
-	rt2x00usb_register_read(rt2x00dev, offset, &reg);
-	rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_KEYTAB,
-			   !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
-	rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER,
-			   (crypto->cmd == SET_KEY) * crypto->cipher);
-	rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_BSS_IDX,
-			   (crypto->cmd == SET_KEY) * crypto->bssidx);
-	rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_RX_WIUDF, crypto->cipher);
-	rt2x00usb_register_write(rt2x00dev, offset, reg);
-
-	offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
-
-	memset(&iveiv_entry, 0, sizeof(iveiv_entry));
-	if ((crypto->cipher == CIPHER_TKIP) ||
-	    (crypto->cipher == CIPHER_TKIP_NO_MIC) ||
-	    (crypto->cipher == CIPHER_AES))
-		iveiv_entry.iv[3] |= 0x20;
-	iveiv_entry.iv[3] |= key->keyidx << 6;
-	rt2x00usb_register_multiwrite(rt2x00dev, offset,
-				      &iveiv_entry, sizeof(iveiv_entry));
-
-	offset = MAC_WCID_ENTRY(key->hw_key_idx);
-
-	memset(&wcid_entry, 0, sizeof(wcid_entry));
-	if (crypto->cmd == SET_KEY)
-		memcpy(&wcid_entry, crypto->address, ETH_ALEN);
-	rt2x00usb_register_multiwrite(rt2x00dev, offset,
-				      &wcid_entry, sizeof(wcid_entry));
-}
-
-static int rt2800usb_config_shared_key(struct rt2x00_dev *rt2x00dev,
-				       struct rt2x00lib_crypto *crypto,
-				       struct ieee80211_key_conf *key)
-{
-	struct hw_key_entry key_entry;
-	struct rt2x00_field32 field;
-	int timeout;
-	u32 offset;
-	u32 reg;
-
-	if (crypto->cmd == SET_KEY) {
-		key->hw_key_idx = (4 * crypto->bssidx) + key->keyidx;
-
-		memcpy(key_entry.key, crypto->key,
-		       sizeof(key_entry.key));
-		memcpy(key_entry.tx_mic, crypto->tx_mic,
-		       sizeof(key_entry.tx_mic));
-		memcpy(key_entry.rx_mic, crypto->rx_mic,
-		       sizeof(key_entry.rx_mic));
-
-		offset = SHARED_KEY_ENTRY(key->hw_key_idx);
-		timeout = REGISTER_TIMEOUT32(sizeof(key_entry));
-		rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
-						    USB_VENDOR_REQUEST_OUT,
-						    offset, &key_entry,
-						    sizeof(key_entry),
-						    timeout);
-	}
-
-	/*
-	 * The cipher types are stored over multiple registers
-	 * starting with SHARED_KEY_MODE_BASE each word will have
-	 * 32 bits and contains the cipher types for 2 bssidx each.
-	 * Using the correct defines correctly will cause overhead,
-	 * so just calculate the correct offset.
-	 */
-	field.bit_offset = 4 * (key->hw_key_idx % 8);
-	field.bit_mask = 0x7 << field.bit_offset;
-
-	offset = SHARED_KEY_MODE_ENTRY(key->hw_key_idx / 8);
-
-	rt2x00usb_register_read(rt2x00dev, offset, &reg);
-	rt2x00_set_field32(&reg, field,
-			   (crypto->cmd == SET_KEY) * crypto->cipher);
-	rt2x00usb_register_write(rt2x00dev, offset, reg);
-
-	/*
-	 * Update WCID information
-	 */
-	rt2800usb_config_wcid_attr(rt2x00dev, crypto, key);
-
-	return 0;
-}
-
-static int rt2800usb_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
-					 struct rt2x00lib_crypto *crypto,
-					 struct ieee80211_key_conf *key)
-{
-	struct hw_key_entry key_entry;
-	int timeout;
-	u32 offset;
-
-	if (crypto->cmd == SET_KEY) {
-		/*
-		 * 1 pairwise key is possible per AID, this means that the AID
-		 * equals our hw_key_idx. Make sure the WCID starts _after_ the
-		 * last possible shared key entry.
-		 */
-		if (crypto->aid > (256 - 32))
-			return -ENOSPC;
-
-		key->hw_key_idx = 32 + crypto->aid;
-
-		memcpy(key_entry.key, crypto->key,
-		       sizeof(key_entry.key));
-		memcpy(key_entry.tx_mic, crypto->tx_mic,
-		       sizeof(key_entry.tx_mic));
-		memcpy(key_entry.rx_mic, crypto->rx_mic,
-		       sizeof(key_entry.rx_mic));
-
-		offset = PAIRWISE_KEY_ENTRY(key->hw_key_idx);
-		timeout = REGISTER_TIMEOUT32(sizeof(key_entry));
-		rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
-						    USB_VENDOR_REQUEST_OUT,
-						    offset, &key_entry,
-						    sizeof(key_entry),
-						    timeout);
-	}
-
-	/*
-	 * Update WCID information
-	 */
-	rt2800usb_config_wcid_attr(rt2x00dev, crypto, key);
-
-	return 0;
-}
-
-static void rt2800usb_config_filter(struct rt2x00_dev *rt2x00dev,
-				    const unsigned int filter_flags)
-{
-	u32 reg;
-
-	/*
-	 * Start configuration steps.
-	 * Note that the version error will always be dropped
-	 * and broadcast frames will always be accepted since
-	 * there is no filter for it at this time.
-	 */
-	rt2x00usb_register_read(rt2x00dev, RX_FILTER_CFG, &reg);
-	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CRC_ERROR,
-			   !(filter_flags & FIF_FCSFAIL));
-	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PHY_ERROR,
-			   !(filter_flags & FIF_PLCPFAIL));
-	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME,
-			   !(filter_flags & FIF_PROMISC_IN_BSS));
-	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_MY_BSSD, 0);
-	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_VER_ERROR, 1);
-	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_MULTICAST,
-			   !(filter_flags & FIF_ALLMULTI));
-	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BROADCAST, 0);
-	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_DUPLICATE, 1);
-	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CF_END_ACK,
-			   !(filter_flags & FIF_CONTROL));
-	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CF_END,
-			   !(filter_flags & FIF_CONTROL));
-	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_ACK,
-			   !(filter_flags & FIF_CONTROL));
-	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CTS,
-			   !(filter_flags & FIF_CONTROL));
-	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_RTS,
-			   !(filter_flags & FIF_CONTROL));
-	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PSPOLL,
-			   !(filter_flags & FIF_PSPOLL));
-	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA, 1);
-	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR, 0);
-	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CNTL,
-			   !(filter_flags & FIF_CONTROL));
-	rt2x00usb_register_write(rt2x00dev, RX_FILTER_CFG, reg);
-}
-
-static void rt2800usb_config_intf(struct rt2x00_dev *rt2x00dev,
-				  struct rt2x00_intf *intf,
-				  struct rt2x00intf_conf *conf,
-				  const unsigned int flags)
-{
-	unsigned int beacon_base;
-	u32 reg;
-
-	if (flags & CONFIG_UPDATE_TYPE) {
-		/*
-		 * Clear current synchronisation setup.
-		 * For the Beacon base registers we only need to clear
-		 * the first byte since that byte contains the VALID and OWNER
-		 * bits which (when set to 0) will invalidate the entire beacon.
-		 */
-		beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
-		rt2x00usb_register_write(rt2x00dev, beacon_base, 0);
-
-		/*
-		 * Enable synchronisation.
-		 */
-		rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
-		rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
-		rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, conf->sync);
-		rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
-		rt2x00usb_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-	}
-
-	if (flags & CONFIG_UPDATE_MAC) {
-		reg = le32_to_cpu(conf->mac[1]);
-		rt2x00_set_field32(&reg, MAC_ADDR_DW1_UNICAST_TO_ME_MASK, 0xff);
-		conf->mac[1] = cpu_to_le32(reg);
-
-		rt2x00usb_register_multiwrite(rt2x00dev, MAC_ADDR_DW0,
-					      conf->mac, sizeof(conf->mac));
-	}
-
-	if (flags & CONFIG_UPDATE_BSSID) {
-		reg = le32_to_cpu(conf->bssid[1]);
-		rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 0);
-		rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_BCN_NUM, 0);
-		conf->bssid[1] = cpu_to_le32(reg);
-
-		rt2x00usb_register_multiwrite(rt2x00dev, MAC_BSSID_DW0,
-					      conf->bssid, sizeof(conf->bssid));
-	}
-}
-
-static void rt2800usb_config_erp(struct rt2x00_dev *rt2x00dev,
-				 struct rt2x00lib_erp *erp)
-{
-	u32 reg;
-
-	rt2x00usb_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
-	rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_RX_ACK_TIMEOUT, 0x20);
-	rt2x00usb_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
-
-	rt2x00usb_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
-	rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY,
-			   !!erp->short_preamble);
-	rt2x00_set_field32(&reg, AUTO_RSP_CFG_AR_PREAMBLE,
-			   !!erp->short_preamble);
-	rt2x00usb_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
-
-	rt2x00usb_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
-	rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL,
-			   erp->cts_protection ? 2 : 0);
-	rt2x00usb_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
-
-	rt2x00usb_register_write(rt2x00dev, LEGACY_BASIC_RATE,
-				 erp->basic_rates);
-	rt2x00usb_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003);
-
-	rt2x00usb_register_read(rt2x00dev, BKOFF_SLOT_CFG, &reg);
-	rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_SLOT_TIME, erp->slot_time);
-	rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2);
-	rt2x00usb_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
-
-	rt2x00usb_register_read(rt2x00dev, XIFS_TIME_CFG, &reg);
-	rt2x00_set_field32(&reg, XIFS_TIME_CFG_CCKM_SIFS_TIME, erp->sifs);
-	rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_SIFS_TIME, erp->sifs);
-	rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_XIFS_TIME, 4);
-	rt2x00_set_field32(&reg, XIFS_TIME_CFG_EIFS, erp->eifs);
-	rt2x00_set_field32(&reg, XIFS_TIME_CFG_BB_RXEND_ENABLE, 1);
-	rt2x00usb_register_write(rt2x00dev, XIFS_TIME_CFG, reg);
-
-	rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
-	rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
-			   erp->beacon_int * 16);
-	rt2x00usb_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-}
-
-static void rt2800usb_config_ant(struct rt2x00_dev *rt2x00dev,
-				 struct antenna_setup *ant)
-{
-	u8 r1;
-	u8 r3;
-
-	rt2800usb_bbp_read(rt2x00dev, 1, &r1);
-	rt2800usb_bbp_read(rt2x00dev, 3, &r3);
-
-	/*
-	 * Configure the TX antenna.
-	 */
-	switch ((int)ant->tx) {
-	case 1:
-		rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
-		break;
-	case 2:
-		rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
-		break;
-	case 3:
-		/* Do nothing */
-		break;
-	}
-
-	/*
-	 * Configure the RX antenna.
-	 */
-	switch ((int)ant->rx) {
-	case 1:
-		rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
-		break;
-	case 2:
-		rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 1);
-		break;
-	case 3:
-		rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 2);
-		break;
-	}
-
-	rt2800usb_bbp_write(rt2x00dev, 3, r3);
-	rt2800usb_bbp_write(rt2x00dev, 1, r1);
-}
-
-static void rt2800usb_config_lna_gain(struct rt2x00_dev *rt2x00dev,
-				      struct rt2x00lib_conf *libconf)
-{
-	u16 eeprom;
-	short lna_gain;
-
-	if (libconf->rf.channel <= 14) {
-		rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
-		lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_BG);
-	} else if (libconf->rf.channel <= 64) {
-		rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
-		lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_A0);
-	} else if (libconf->rf.channel <= 128) {
-		rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
-		lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_LNA_A1);
-	} else {
-		rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
-		lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_LNA_A2);
-	}
-
-	rt2x00dev->lna_gain = lna_gain;
-}
-
-static void rt2800usb_config_channel_rt2x(struct rt2x00_dev *rt2x00dev,
-					  struct ieee80211_conf *conf,
-					  struct rf_channel *rf,
-					  struct channel_info *info)
-{
-	rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
-
-	if (rt2x00dev->default_ant.tx == 1)
-		rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_TX1, 1);
-
-	if (rt2x00dev->default_ant.rx == 1) {
-		rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX1, 1);
-		rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
-	} else if (rt2x00dev->default_ant.rx == 2)
-		rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
-
-	if (rf->channel > 14) {
-		/*
-		 * When TX power is below 0, we should increase it by 7 to
-		 * make it a positive value (Minumum value is -7).
-		 * However this means that values between 0 and 7 have
-		 * double meaning, and we should set a 7DBm boost flag.
-		 */
-		rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A_7DBM_BOOST,
-				   (info->tx_power1 >= 0));
-
-		if (info->tx_power1 < 0)
-			info->tx_power1 += 7;
-
-		rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A,
-				   TXPOWER_A_TO_DEV(info->tx_power1));
-
-		rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A_7DBM_BOOST,
-				   (info->tx_power2 >= 0));
-
-		if (info->tx_power2 < 0)
-			info->tx_power2 += 7;
-
-		rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A,
-				   TXPOWER_A_TO_DEV(info->tx_power2));
-	} else {
-		rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_G,
-				   TXPOWER_G_TO_DEV(info->tx_power1));
-		rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_G,
-				   TXPOWER_G_TO_DEV(info->tx_power2));
-	}
-
-	rt2x00_set_field32(&rf->rf4, RF4_HT40, conf_is_ht40(conf));
-
-	rt2800usb_rf_write(rt2x00dev, 1, rf->rf1);
-	rt2800usb_rf_write(rt2x00dev, 2, rf->rf2);
-	rt2800usb_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
-	rt2800usb_rf_write(rt2x00dev, 4, rf->rf4);
-
-	udelay(200);
-
-	rt2800usb_rf_write(rt2x00dev, 1, rf->rf1);
-	rt2800usb_rf_write(rt2x00dev, 2, rf->rf2);
-	rt2800usb_rf_write(rt2x00dev, 3, rf->rf3 | 0x00000004);
-	rt2800usb_rf_write(rt2x00dev, 4, rf->rf4);
-
-	udelay(200);
-
-	rt2800usb_rf_write(rt2x00dev, 1, rf->rf1);
-	rt2800usb_rf_write(rt2x00dev, 2, rf->rf2);
-	rt2800usb_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
-	rt2800usb_rf_write(rt2x00dev, 4, rf->rf4);
-}
-
-static void rt2800usb_config_channel_rt3x(struct rt2x00_dev *rt2x00dev,
-					  struct ieee80211_conf *conf,
-					  struct rf_channel *rf,
-					  struct channel_info *info)
-{
-	u8 rfcsr;
-
-	rt2800usb_rfcsr_write(rt2x00dev, 2, rf->rf1);
-	rt2800usb_rfcsr_write(rt2x00dev, 2, rf->rf3);
-
-	rt2800usb_rfcsr_read(rt2x00dev, 6, &rfcsr);
-	rt2x00_set_field8(&rfcsr, RFCSR6_R, rf->rf2);
-	rt2800usb_rfcsr_write(rt2x00dev, 6, rfcsr);
-
-	rt2800usb_rfcsr_read(rt2x00dev, 12, &rfcsr);
-	rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER,
-			  TXPOWER_G_TO_DEV(info->tx_power1));
-	rt2800usb_rfcsr_write(rt2x00dev, 12, rfcsr);
-
-	rt2800usb_rfcsr_read(rt2x00dev, 23, &rfcsr);
-	rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
-	rt2800usb_rfcsr_write(rt2x00dev, 23, rfcsr);
-
-	rt2800usb_rfcsr_write(rt2x00dev, 24,
-			      rt2x00dev->calibration[conf_is_ht40(conf)]);
-
-	rt2800usb_rfcsr_read(rt2x00dev, 23, &rfcsr);
-	rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
-	rt2800usb_rfcsr_write(rt2x00dev, 23, rfcsr);
-}
-
-static void rt2800usb_config_channel(struct rt2x00_dev *rt2x00dev,
-				     struct ieee80211_conf *conf,
-				     struct rf_channel *rf,
-				     struct channel_info *info)
-{
-	u32 reg;
-	unsigned int tx_pin;
-	u8 bbp;
-
-	if (rt2x00_rev(&rt2x00dev->chip) != RT3070_VERSION)
-		rt2800usb_config_channel_rt2x(rt2x00dev, conf, rf, info);
-	else
-		rt2800usb_config_channel_rt3x(rt2x00dev, conf, rf, info);
-
-	/*
-	 * Change BBP settings
-	 */
-	rt2800usb_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
-	rt2800usb_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
-	rt2800usb_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
-	rt2800usb_bbp_write(rt2x00dev, 86, 0);
-
-	if (rf->channel <= 14) {
-		if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) {
-			rt2800usb_bbp_write(rt2x00dev, 82, 0x62);
-			rt2800usb_bbp_write(rt2x00dev, 75, 0x46);
-		} else {
-			rt2800usb_bbp_write(rt2x00dev, 82, 0x84);
-			rt2800usb_bbp_write(rt2x00dev, 75, 0x50);
-		}
-	} else {
-		rt2800usb_bbp_write(rt2x00dev, 82, 0xf2);
-
-		if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags))
-			rt2800usb_bbp_write(rt2x00dev, 75, 0x46);
-		else
-			rt2800usb_bbp_write(rt2x00dev, 75, 0x50);
-	}
-
-	rt2x00usb_register_read(rt2x00dev, TX_BAND_CFG, &reg);
-	rt2x00_set_field32(&reg, TX_BAND_CFG_HT40_PLUS, conf_is_ht40_plus(conf));
-	rt2x00_set_field32(&reg, TX_BAND_CFG_A, rf->channel > 14);
-	rt2x00_set_field32(&reg, TX_BAND_CFG_BG, rf->channel <= 14);
-	rt2x00usb_register_write(rt2x00dev, TX_BAND_CFG, reg);
-
-	tx_pin = 0;
-
-	/* Turn on unused PA or LNA when not using 1T or 1R */
-	if (rt2x00dev->default_ant.tx != 1) {
-		rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, 1);
-		rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, 1);
-	}
-
-	/* Turn on unused PA or LNA when not using 1T or 1R */
-	if (rt2x00dev->default_ant.rx != 1) {
-		rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
-		rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
-	}
-
-	rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
-	rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
-	rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFTR_EN, 1);
-	rt2x00_set_field32(&tx_pin, TX_PIN_CFG_TRSW_EN, 1);
-	rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, rf->channel <= 14);
-	rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN, rf->channel > 14);
-
-	rt2x00usb_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
-
-	rt2800usb_bbp_read(rt2x00dev, 4, &bbp);
-	rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * conf_is_ht40(conf));
-	rt2800usb_bbp_write(rt2x00dev, 4, bbp);
-
-	rt2800usb_bbp_read(rt2x00dev, 3, &bbp);
-	rt2x00_set_field8(&bbp, BBP3_HT40_PLUS, conf_is_ht40_plus(conf));
-	rt2800usb_bbp_write(rt2x00dev, 3, bbp);
-
-	if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) {
-		if (conf_is_ht40(conf)) {
-			rt2800usb_bbp_write(rt2x00dev, 69, 0x1a);
-			rt2800usb_bbp_write(rt2x00dev, 70, 0x0a);
-			rt2800usb_bbp_write(rt2x00dev, 73, 0x16);
-		} else {
-			rt2800usb_bbp_write(rt2x00dev, 69, 0x16);
-			rt2800usb_bbp_write(rt2x00dev, 70, 0x08);
-			rt2800usb_bbp_write(rt2x00dev, 73, 0x11);
-		}
-	}
-
-	msleep(1);
-}
-
-static void rt2800usb_config_txpower(struct rt2x00_dev *rt2x00dev,
-				     const int txpower)
-{
-	u32 reg;
-	u32 value = TXPOWER_G_TO_DEV(txpower);
-	u8 r1;
-
-	rt2800usb_bbp_read(rt2x00dev, 1, &r1);
-	rt2x00_set_field8(&reg, BBP1_TX_POWER, 0);
-	rt2800usb_bbp_write(rt2x00dev, 1, r1);
-
-	rt2x00usb_register_read(rt2x00dev, TX_PWR_CFG_0, &reg);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_0_1MBS, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_0_2MBS, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_0_55MBS, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_0_11MBS, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_0_6MBS, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_0_9MBS, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_0_12MBS, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_0_18MBS, value);
-	rt2x00usb_register_write(rt2x00dev, TX_PWR_CFG_0, reg);
-
-	rt2x00usb_register_read(rt2x00dev, TX_PWR_CFG_1, &reg);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_1_24MBS, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_1_36MBS, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_1_48MBS, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_1_54MBS, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS0, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS1, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS2, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS3, value);
-	rt2x00usb_register_write(rt2x00dev, TX_PWR_CFG_1, reg);
-
-	rt2x00usb_register_read(rt2x00dev, TX_PWR_CFG_2, &reg);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS4, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS5, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS6, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS7, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS8, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS9, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS10, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS11, value);
-	rt2x00usb_register_write(rt2x00dev, TX_PWR_CFG_2, reg);
-
-	rt2x00usb_register_read(rt2x00dev, TX_PWR_CFG_3, &reg);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS12, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS13, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS14, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS15, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN1, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN2, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN3, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN4, value);
-	rt2x00usb_register_write(rt2x00dev, TX_PWR_CFG_3, reg);
-
-	rt2x00usb_register_read(rt2x00dev, TX_PWR_CFG_4, &reg);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN5, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN6, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN7, value);
-	rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN8, value);
-	rt2x00usb_register_write(rt2x00dev, TX_PWR_CFG_4, reg);
-}
-
-static void rt2800usb_config_retry_limit(struct rt2x00_dev *rt2x00dev,
-					 struct rt2x00lib_conf *libconf)
-{
-	u32 reg;
-
-	rt2x00usb_register_read(rt2x00dev, TX_RTY_CFG, &reg);
-	rt2x00_set_field32(&reg, TX_RTY_CFG_SHORT_RTY_LIMIT,
-			   libconf->conf->short_frame_max_tx_count);
-	rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_LIMIT,
-			   libconf->conf->long_frame_max_tx_count);
-	rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_THRE, 2000);
-	rt2x00_set_field32(&reg, TX_RTY_CFG_NON_AGG_RTY_MODE, 0);
-	rt2x00_set_field32(&reg, TX_RTY_CFG_AGG_RTY_MODE, 0);
-	rt2x00_set_field32(&reg, TX_RTY_CFG_TX_AUTO_FB_ENABLE, 1);
-	rt2x00usb_register_write(rt2x00dev, TX_RTY_CFG, reg);
-}
-
-static void rt2800usb_config_ps(struct rt2x00_dev *rt2x00dev,
-				struct rt2x00lib_conf *libconf)
-{
-	enum dev_state state =
-	    (libconf->conf->flags & IEEE80211_CONF_PS) ?
-		STATE_SLEEP : STATE_AWAKE;
-	u32 reg;
-
-	if (state == STATE_SLEEP) {
-		rt2x00usb_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0);
-
-		rt2x00usb_register_read(rt2x00dev, AUTOWAKEUP_CFG, &reg);
-		rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 5);
-		rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE,
-				   libconf->conf->listen_interval - 1);
-		rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTOWAKE, 1);
-		rt2x00usb_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg);
-
-		rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
-	} else {
-		rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
-
-		rt2x00usb_register_read(rt2x00dev, AUTOWAKEUP_CFG, &reg);
-		rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 0);
-		rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE, 0);
-		rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTOWAKE, 0);
-		rt2x00usb_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg);
-	}
-}
-
-static void rt2800usb_config(struct rt2x00_dev *rt2x00dev,
-			     struct rt2x00lib_conf *libconf,
-			     const unsigned int flags)
-{
-	/* Always recalculate LNA gain before changing configuration */
-	rt2800usb_config_lna_gain(rt2x00dev, libconf);
-
-	if (flags & IEEE80211_CONF_CHANGE_CHANNEL)
-		rt2800usb_config_channel(rt2x00dev, libconf->conf,
-					 &libconf->rf, &libconf->channel);
-	if (flags & IEEE80211_CONF_CHANGE_POWER)
-		rt2800usb_config_txpower(rt2x00dev, libconf->conf->power_level);
-	if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
-		rt2800usb_config_retry_limit(rt2x00dev, libconf);
-	if (flags & IEEE80211_CONF_CHANGE_PS)
-		rt2800usb_config_ps(rt2x00dev, libconf);
-}
-
-/*
- * Link tuning
- */
-static void rt2800usb_link_stats(struct rt2x00_dev *rt2x00dev,
-				 struct link_qual *qual)
-{
-	u32 reg;
-
-	/*
-	 * Update FCS error count from register.
-	 */
-	rt2x00usb_register_read(rt2x00dev, RX_STA_CNT0, &reg);
-	qual->rx_failed = rt2x00_get_field32(reg, RX_STA_CNT0_CRC_ERR);
-}
-
-static u8 rt2800usb_get_default_vgc(struct rt2x00_dev *rt2x00dev)
-{
-	if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
-		if (rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION)
-			return 0x1c + (2 * rt2x00dev->lna_gain);
-		else
-			return 0x2e + rt2x00dev->lna_gain;
-	}
-
-	if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
-		return 0x32 + (rt2x00dev->lna_gain * 5) / 3;
-	else
-		return 0x3a + (rt2x00dev->lna_gain * 5) / 3;
-}
-
-static inline void rt2800usb_set_vgc(struct rt2x00_dev *rt2x00dev,
-				     struct link_qual *qual, u8 vgc_level)
-{
-	if (qual->vgc_level != vgc_level) {
-		rt2800usb_bbp_write(rt2x00dev, 66, vgc_level);
-		qual->vgc_level = vgc_level;
-		qual->vgc_level_reg = vgc_level;
-	}
-}
-
-static void rt2800usb_reset_tuner(struct rt2x00_dev *rt2x00dev,
-				  struct link_qual *qual)
-{
-	rt2800usb_set_vgc(rt2x00dev, qual,
-			  rt2800usb_get_default_vgc(rt2x00dev));
-}
-
-static void rt2800usb_link_tuner(struct rt2x00_dev *rt2x00dev,
-				 struct link_qual *qual, const u32 count)
-{
-	if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION)
-		return;
-
-	/*
-	 * When RSSI is better then -80 increase VGC level with 0x10
-	 */
-	rt2800usb_set_vgc(rt2x00dev, qual,
-			  rt2800usb_get_default_vgc(rt2x00dev) +
-			  ((qual->rssi > -80) * 0x10));
-}
-
-/*
  * Firmware functions
  */
 static char *rt2800usb_get_firmware_name(struct rt2x00_dev *rt2x00dev)
@@ -1172,7 +157,7 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
 	 * Wait for stable hardware.
 	 */
 	for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
-		rt2x00usb_register_read(rt2x00dev, MAC_CSR0, &reg);
+		rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
 		if (reg && reg != ~0)
 			break;
 		msleep(1);
@@ -1192,8 +177,8 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
 					    data + offset, length,
 					    REGISTER_TIMEOUT32(length));
 
-	rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
-	rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0);
+	rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
+	rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0);
 
 	/*
 	 * Send firmware request to device to load firmware,
@@ -1208,18 +193,18 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
 	}
 
 	msleep(10);
-	rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
+	rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
 
 	/*
 	 * Send signal to firmware during boot time.
 	 */
-	rt2800usb_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0xff, 0, 0);
+	rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0xff, 0, 0);
 
 	if ((chipset == 0x3070) ||
 	    (chipset == 0x3071) ||
 	    (chipset == 0x3572)) {
 		udelay(200);
-		rt2800usb_mcu_request(rt2x00dev, MCU_CURRENT, 0, 0, 0);
+		rt2800_mcu_request(rt2x00dev, MCU_CURRENT, 0, 0, 0);
 		udelay(10);
 	}
 
@@ -1227,7 +212,7 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
 	 * Wait for device to stabilize.
 	 */
 	for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
-		rt2x00usb_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
+		rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
 		if (rt2x00_get_field32(reg, PBF_SYS_CTRL_READY))
 			break;
 		msleep(1);
@@ -1241,536 +226,14 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
 	/*
 	 * Initialize firmware.
 	 */
-	rt2x00usb_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
-	rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
+	rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
+	rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
 	msleep(1);
 
 	return 0;
 }
 
 /*
- * Initialization functions.
- */
-static int rt2800usb_init_registers(struct rt2x00_dev *rt2x00dev)
-{
-	u32 reg;
-	unsigned int i;
-
-	/*
-	 * Wait untill BBP and RF are ready.
-	 */
-	for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
-		rt2x00usb_register_read(rt2x00dev, MAC_CSR0, &reg);
-		if (reg && reg != ~0)
-			break;
-		msleep(1);
-	}
-
-	if (i == REGISTER_BUSY_COUNT) {
-		ERROR(rt2x00dev, "Unstable hardware.\n");
-		return -EBUSY;
-	}
-
-	rt2x00usb_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
-	rt2x00usb_register_write(rt2x00dev, PBF_SYS_CTRL, reg & ~0x00002000);
-
-	rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
-	rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
-	rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
-	rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
-
-	rt2x00usb_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
-
-	rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
-				    USB_MODE_RESET, REGISTER_TIMEOUT);
-
-	rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
-
-	rt2x00usb_register_read(rt2x00dev, BCN_OFFSET0, &reg);
-	rt2x00_set_field32(&reg, BCN_OFFSET0_BCN0, 0xe0); /* 0x3800 */
-	rt2x00_set_field32(&reg, BCN_OFFSET0_BCN1, 0xe8); /* 0x3a00 */
-	rt2x00_set_field32(&reg, BCN_OFFSET0_BCN2, 0xf0); /* 0x3c00 */
-	rt2x00_set_field32(&reg, BCN_OFFSET0_BCN3, 0xf8); /* 0x3e00 */
-	rt2x00usb_register_write(rt2x00dev, BCN_OFFSET0, reg);
-
-	rt2x00usb_register_read(rt2x00dev, BCN_OFFSET1, &reg);
-	rt2x00_set_field32(&reg, BCN_OFFSET1_BCN4, 0xc8); /* 0x3200 */
-	rt2x00_set_field32(&reg, BCN_OFFSET1_BCN5, 0xd0); /* 0x3400 */
-	rt2x00_set_field32(&reg, BCN_OFFSET1_BCN6, 0x77); /* 0x1dc0 */
-	rt2x00_set_field32(&reg, BCN_OFFSET1_BCN7, 0x6f); /* 0x1bc0 */
-	rt2x00usb_register_write(rt2x00dev, BCN_OFFSET1, reg);
-
-	rt2x00usb_register_write(rt2x00dev, LEGACY_BASIC_RATE, 0x0000013f);
-	rt2x00usb_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003);
-
-	rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
-
-	rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
-	rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL, 0);
-	rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
-	rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, 0);
-	rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
-	rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
-	rt2x00_set_field32(&reg, BCN_TIME_CFG_TX_TIME_COMPENSATE, 0);
-	rt2x00usb_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-
-	if (rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) {
-		rt2x00usb_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
-		rt2x00usb_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
-		rt2x00usb_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
-	} else {
-		rt2x00usb_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
-		rt2x00usb_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
-	}
-
-	rt2x00usb_register_read(rt2x00dev, TX_LINK_CFG, &reg);
-	rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFB_LIFETIME, 32);
-	rt2x00_set_field32(&reg, TX_LINK_CFG_MFB_ENABLE, 0);
-	rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_UMFS_ENABLE, 0);
-	rt2x00_set_field32(&reg, TX_LINK_CFG_TX_MRQ_EN, 0);
-	rt2x00_set_field32(&reg, TX_LINK_CFG_TX_RDG_EN, 0);
-	rt2x00_set_field32(&reg, TX_LINK_CFG_TX_CF_ACK_EN, 1);
-	rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFB, 0);
-	rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFS, 0);
-	rt2x00usb_register_write(rt2x00dev, TX_LINK_CFG, reg);
-
-	rt2x00usb_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
-	rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_MPDU_LIFETIME, 9);
-	rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_TX_OP_TIMEOUT, 10);
-	rt2x00usb_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
-
-	rt2x00usb_register_read(rt2x00dev, MAX_LEN_CFG, &reg);
-	rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE);
-	if (rt2x00_rev(&rt2x00dev->chip) >= RT2880E_VERSION &&
-	    rt2x00_rev(&rt2x00dev->chip) < RT3070_VERSION)
-		rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2);
-	else
-		rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1);
-	rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_PSDU, 0);
-	rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 0);
-	rt2x00usb_register_write(rt2x00dev, MAX_LEN_CFG, reg);
-
-	rt2x00usb_register_write(rt2x00dev, PBF_MAX_PCNT, 0x1f3fbf9f);
-
-	rt2x00usb_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
-	rt2x00_set_field32(&reg, AUTO_RSP_CFG_AUTORESPONDER, 1);
-	rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MMODE, 0);
-	rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MREF, 0);
-	rt2x00_set_field32(&reg, AUTO_RSP_CFG_DUAL_CTS_EN, 0);
-	rt2x00_set_field32(&reg, AUTO_RSP_CFG_ACK_CTS_PSM_BIT, 0);
-	rt2x00usb_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
-
-	rt2x00usb_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
-	rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 8);
-	rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_CTRL, 0);
-	rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV, 1);
-	rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1);
-	rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
-	rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1);
-	rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM40, 1);
-	rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF20, 1);
-	rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF40, 1);
-	rt2x00usb_register_write(rt2x00dev, CCK_PROT_CFG, reg);
-
-	rt2x00usb_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
-	rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 8);
-	rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, 0);
-	rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV, 1);
-	rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1);
-	rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
-	rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1);
-	rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM40, 1);
-	rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF20, 1);
-	rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF40, 1);
-	rt2x00usb_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
-
-	rt2x00usb_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
-	rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_RATE, 0x4004);
-	rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_CTRL, 0);
-	rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_NAV, 1);
-	rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
-	rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
-	rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
-	rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
-	rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
-	rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
-	rt2x00usb_register_write(rt2x00dev, MM20_PROT_CFG, reg);
-
-	rt2x00usb_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
-	rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084);
-	rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, 0);
-	rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV, 1);
-	rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
-	rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
-	rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
-	rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
-	rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
-	rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
-	rt2x00usb_register_write(rt2x00dev, MM40_PROT_CFG, reg);
-
-	rt2x00usb_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
-	rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_RATE, 0x4004);
-	rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_CTRL, 0);
-	rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_NAV, 1);
-	rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
-	rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
-	rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
-	rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
-	rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
-	rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
-	rt2x00usb_register_write(rt2x00dev, GF20_PROT_CFG, reg);
-
-	rt2x00usb_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
-	rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_RATE, 0x4084);
-	rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_CTRL, 0);
-	rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_NAV, 1);
-	rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
-	rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
-	rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
-	rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
-	rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
-	rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
-	rt2x00usb_register_write(rt2x00dev, GF40_PROT_CFG, reg);
-
-	rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf40006);
-
-	rt2x00usb_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
-	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
-	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
-	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
-	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
-	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_WP_DMA_BURST_SIZE, 3);
-	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 0);
-	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_BIG_ENDIAN, 0);
-	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_HDR_SCATTER, 0);
-	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_HDR_SEG_LEN, 0);
-	rt2x00usb_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
-
-	rt2x00usb_register_write(rt2x00dev, TXOP_CTRL_CFG, 0x0000583f);
-	rt2x00usb_register_write(rt2x00dev, TXOP_HLDR_ET, 0x00000002);
-
-	rt2x00usb_register_read(rt2x00dev, TX_RTS_CFG, &reg);
-	rt2x00_set_field32(&reg, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 32);
-	rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_THRES,
-			   IEEE80211_MAX_RTS_THRESHOLD);
-	rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_FBK_EN, 0);
-	rt2x00usb_register_write(rt2x00dev, TX_RTS_CFG, reg);
-
-	rt2x00usb_register_write(rt2x00dev, EXP_ACK_TIME, 0x002400ca);
-	rt2x00usb_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
-
-	/*
-	 * ASIC will keep garbage value after boot, clear encryption keys.
-	 */
-	for (i = 0; i < 4; i++)
-		rt2x00usb_register_write(rt2x00dev,
-					 SHARED_KEY_MODE_ENTRY(i), 0);
-
-	for (i = 0; i < 256; i++) {
-		u32 wcid[2] = { 0xffffffff, 0x00ffffff };
-		rt2x00usb_register_multiwrite(rt2x00dev, MAC_WCID_ENTRY(i),
-					      wcid, sizeof(wcid));
-
-		rt2x00usb_register_write(rt2x00dev, MAC_WCID_ATTR_ENTRY(i), 1);
-		rt2x00usb_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0);
-	}
-
-	/*
-	 * Clear all beacons
-	 * For the Beacon base registers we only need to clear
-	 * the first byte since that byte contains the VALID and OWNER
-	 * bits which (when set to 0) will invalidate the entire beacon.
-	 */
-	rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE0, 0);
-	rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE1, 0);
-	rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE2, 0);
-	rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE3, 0);
-	rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE4, 0);
-	rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE5, 0);
-	rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE6, 0);
-	rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE7, 0);
-
-	rt2x00usb_register_read(rt2x00dev, USB_CYC_CFG, &reg);
-	rt2x00_set_field32(&reg, USB_CYC_CFG_CLOCK_CYCLE, 30);
-	rt2x00usb_register_write(rt2x00dev, USB_CYC_CFG, reg);
-
-	rt2x00usb_register_read(rt2x00dev, HT_FBK_CFG0, &reg);
-	rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS0FBK, 0);
-	rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS1FBK, 0);
-	rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS2FBK, 1);
-	rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS3FBK, 2);
-	rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS4FBK, 3);
-	rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS5FBK, 4);
-	rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS6FBK, 5);
-	rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS7FBK, 6);
-	rt2x00usb_register_write(rt2x00dev, HT_FBK_CFG0, reg);
-
-	rt2x00usb_register_read(rt2x00dev, HT_FBK_CFG1, &reg);
-	rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS8FBK, 8);
-	rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS9FBK, 8);
-	rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS10FBK, 9);
-	rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS11FBK, 10);
-	rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS12FBK, 11);
-	rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS13FBK, 12);
-	rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS14FBK, 13);
-	rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS15FBK, 14);
-	rt2x00usb_register_write(rt2x00dev, HT_FBK_CFG1, reg);
-
-	rt2x00usb_register_read(rt2x00dev, LG_FBK_CFG0, &reg);
-	rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS0FBK, 8);
-	rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS1FBK, 8);
-	rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS2FBK, 9);
-	rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS3FBK, 10);
-	rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS4FBK, 11);
-	rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS5FBK, 12);
-	rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS6FBK, 13);
-	rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS7FBK, 14);
-	rt2x00usb_register_write(rt2x00dev, LG_FBK_CFG0, reg);
-
-	rt2x00usb_register_read(rt2x00dev, LG_FBK_CFG1, &reg);
-	rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS0FBK, 0);
-	rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS1FBK, 0);
-	rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS2FBK, 1);
-	rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS3FBK, 2);
-	rt2x00usb_register_write(rt2x00dev, LG_FBK_CFG1, reg);
-
-	/*
-	 * We must clear the error counters.
-	 * These registers are cleared on read,
-	 * so we may pass a useless variable to store the value.
-	 */
-	rt2x00usb_register_read(rt2x00dev, RX_STA_CNT0, &reg);
-	rt2x00usb_register_read(rt2x00dev, RX_STA_CNT1, &reg);
-	rt2x00usb_register_read(rt2x00dev, RX_STA_CNT2, &reg);
-	rt2x00usb_register_read(rt2x00dev, TX_STA_CNT0, &reg);
-	rt2x00usb_register_read(rt2x00dev, TX_STA_CNT1, &reg);
-	rt2x00usb_register_read(rt2x00dev, TX_STA_CNT2, &reg);
-
-	return 0;
-}
-
-static int rt2800usb_wait_bbp_rf_ready(struct rt2x00_dev *rt2x00dev)
-{
-	unsigned int i;
-	u32 reg;
-
-	for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
-		rt2x00usb_register_read(rt2x00dev, MAC_STATUS_CFG, &reg);
-		if (!rt2x00_get_field32(reg, MAC_STATUS_CFG_BBP_RF_BUSY))
-			return 0;
-
-		udelay(REGISTER_BUSY_DELAY);
-	}
-
-	ERROR(rt2x00dev, "BBP/RF register access failed, aborting.\n");
-	return -EACCES;
-}
-
-static int rt2800usb_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
-{
-	unsigned int i;
-	u8 value;
-
-	/*
-	 * BBP was enabled after firmware was loaded,
-	 * but we need to reactivate it now.
-	 */
-	rt2x00usb_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
-	rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
-	msleep(1);
-
-	for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
-		rt2800usb_bbp_read(rt2x00dev, 0, &value);
-		if ((value != 0xff) && (value != 0x00))
-			return 0;
-		udelay(REGISTER_BUSY_DELAY);
-	}
-
-	ERROR(rt2x00dev, "BBP register access failed, aborting.\n");
-	return -EACCES;
-}
-
-static int rt2800usb_init_bbp(struct rt2x00_dev *rt2x00dev)
-{
-	unsigned int i;
-	u16 eeprom;
-	u8 reg_id;
-	u8 value;
-
-	if (unlikely(rt2800usb_wait_bbp_rf_ready(rt2x00dev) ||
-		     rt2800usb_wait_bbp_ready(rt2x00dev)))
-		return -EACCES;
-
-	rt2800usb_bbp_write(rt2x00dev, 65, 0x2c);
-	rt2800usb_bbp_write(rt2x00dev, 66, 0x38);
-	rt2800usb_bbp_write(rt2x00dev, 69, 0x12);
-	rt2800usb_bbp_write(rt2x00dev, 70, 0x0a);
-	rt2800usb_bbp_write(rt2x00dev, 73, 0x10);
-	rt2800usb_bbp_write(rt2x00dev, 81, 0x37);
-	rt2800usb_bbp_write(rt2x00dev, 82, 0x62);
-	rt2800usb_bbp_write(rt2x00dev, 83, 0x6a);
-	rt2800usb_bbp_write(rt2x00dev, 84, 0x99);
-	rt2800usb_bbp_write(rt2x00dev, 86, 0x00);
-	rt2800usb_bbp_write(rt2x00dev, 91, 0x04);
-	rt2800usb_bbp_write(rt2x00dev, 92, 0x00);
-	rt2800usb_bbp_write(rt2x00dev, 103, 0x00);
-	rt2800usb_bbp_write(rt2x00dev, 105, 0x05);
-
-	if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) {
-		rt2800usb_bbp_write(rt2x00dev, 69, 0x16);
-		rt2800usb_bbp_write(rt2x00dev, 73, 0x12);
-	}
-
-	if (rt2x00_rev(&rt2x00dev->chip) > RT2860D_VERSION) {
-		rt2800usb_bbp_write(rt2x00dev, 84, 0x19);
-	}
-
-	if (rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) {
-		rt2800usb_bbp_write(rt2x00dev, 70, 0x0a);
-		rt2800usb_bbp_write(rt2x00dev, 84, 0x99);
-		rt2800usb_bbp_write(rt2x00dev, 105, 0x05);
-	}
-
-	for (i = 0; i < EEPROM_BBP_SIZE; i++) {
-		rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
-
-		if (eeprom != 0xffff && eeprom != 0x0000) {
-			reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID);
-			value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE);
-			rt2800usb_bbp_write(rt2x00dev, reg_id, value);
-		}
-	}
-
-	return 0;
-}
-
-static u8 rt2800usb_init_rx_filter(struct rt2x00_dev *rt2x00dev,
-				   bool bw40, u8 rfcsr24, u8 filter_target)
-{
-	unsigned int i;
-	u8 bbp;
-	u8 rfcsr;
-	u8 passband;
-	u8 stopband;
-	u8 overtuned = 0;
-
-	rt2800usb_rfcsr_write(rt2x00dev, 24, rfcsr24);
-
-	rt2800usb_bbp_read(rt2x00dev, 4, &bbp);
-	rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * bw40);
-	rt2800usb_bbp_write(rt2x00dev, 4, bbp);
-
-	rt2800usb_rfcsr_read(rt2x00dev, 22, &rfcsr);
-	rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 1);
-	rt2800usb_rfcsr_write(rt2x00dev, 22, rfcsr);
-
-	/*
-	 * Set power & frequency of passband test tone
-	 */
-	rt2800usb_bbp_write(rt2x00dev, 24, 0);
-
-	for (i = 0; i < 100; i++) {
-		rt2800usb_bbp_write(rt2x00dev, 25, 0x90);
-		msleep(1);
-
-		rt2800usb_bbp_read(rt2x00dev, 55, &passband);
-		if (passband)
-			break;
-	}
-
-	/*
-	 * Set power & frequency of stopband test tone
-	 */
-	rt2800usb_bbp_write(rt2x00dev, 24, 0x06);
-
-	for (i = 0; i < 100; i++) {
-		rt2800usb_bbp_write(rt2x00dev, 25, 0x90);
-		msleep(1);
-
-		rt2800usb_bbp_read(rt2x00dev, 55, &stopband);
-
-		if ((passband - stopband) <= filter_target) {
-			rfcsr24++;
-			overtuned += ((passband - stopband) == filter_target);
-		} else
-			break;
-
-		rt2800usb_rfcsr_write(rt2x00dev, 24, rfcsr24);
-	}
-
-	rfcsr24 -= !!overtuned;
-
-	rt2800usb_rfcsr_write(rt2x00dev, 24, rfcsr24);
-	return rfcsr24;
-}
-
-static int rt2800usb_init_rfcsr(struct rt2x00_dev *rt2x00dev)
-{
-	u8 rfcsr;
-	u8 bbp;
-
-	if (rt2x00_rev(&rt2x00dev->chip) != RT3070_VERSION)
-		return 0;
-
-	/*
-	 * Init RF calibration.
-	 */
-	rt2800usb_rfcsr_read(rt2x00dev, 30, &rfcsr);
-	rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
-	rt2800usb_rfcsr_write(rt2x00dev, 30, rfcsr);
-	msleep(1);
-	rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
-	rt2800usb_rfcsr_write(rt2x00dev, 30, rfcsr);
-
-	rt2800usb_rfcsr_write(rt2x00dev, 4, 0x40);
-	rt2800usb_rfcsr_write(rt2x00dev, 5, 0x03);
-	rt2800usb_rfcsr_write(rt2x00dev, 6, 0x02);
-	rt2800usb_rfcsr_write(rt2x00dev, 7, 0x70);
-	rt2800usb_rfcsr_write(rt2x00dev, 9, 0x0f);
-	rt2800usb_rfcsr_write(rt2x00dev, 10, 0x71);
-	rt2800usb_rfcsr_write(rt2x00dev, 11, 0x21);
-	rt2800usb_rfcsr_write(rt2x00dev, 12, 0x7b);
-	rt2800usb_rfcsr_write(rt2x00dev, 14, 0x90);
-	rt2800usb_rfcsr_write(rt2x00dev, 15, 0x58);
-	rt2800usb_rfcsr_write(rt2x00dev, 16, 0xb3);
-	rt2800usb_rfcsr_write(rt2x00dev, 17, 0x92);
-	rt2800usb_rfcsr_write(rt2x00dev, 18, 0x2c);
-	rt2800usb_rfcsr_write(rt2x00dev, 19, 0x02);
-	rt2800usb_rfcsr_write(rt2x00dev, 20, 0xba);
-	rt2800usb_rfcsr_write(rt2x00dev, 21, 0xdb);
-	rt2800usb_rfcsr_write(rt2x00dev, 24, 0x16);
-	rt2800usb_rfcsr_write(rt2x00dev, 25, 0x01);
-	rt2800usb_rfcsr_write(rt2x00dev, 27, 0x03);
-	rt2800usb_rfcsr_write(rt2x00dev, 29, 0x1f);
-
-	/*
-	 * Set RX Filter calibration for 20MHz and 40MHz
-	 */
-	rt2x00dev->calibration[0] =
-	    rt2800usb_init_rx_filter(rt2x00dev, false, 0x07, 0x16);
-	rt2x00dev->calibration[1] =
-	    rt2800usb_init_rx_filter(rt2x00dev, true, 0x27, 0x19);
-
-	/*
-	 * Set back to initial state
-	 */
-	rt2800usb_bbp_write(rt2x00dev, 24, 0);
-
-	rt2800usb_rfcsr_read(rt2x00dev, 22, &rfcsr);
-	rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0);
-	rt2800usb_rfcsr_write(rt2x00dev, 22, rfcsr);
-
-	/*
-	 * set BBP back to BW20
-	 */
-	rt2800usb_bbp_read(rt2x00dev, 4, &bbp);
-	rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
-	rt2800usb_bbp_write(rt2x00dev, 4, bbp);
-
-	return 0;
-}
-
-/*
  * Device state switch handlers.
  */
 static void rt2800usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
@@ -1778,11 +241,11 @@ static void rt2800usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
 {
 	u32 reg;
 
-	rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+	rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
 	rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX,
 			   (state == STATE_RADIO_RX_ON) ||
 			   (state == STATE_RADIO_RX_ON_LINK));
-	rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+	rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
 }
 
 static int rt2800usb_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
@@ -1791,7 +254,7 @@ static int rt2800usb_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
 	u32 reg;
 
 	for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
-		rt2x00usb_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+		rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
 		if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
 		    !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
 			return 0;
@@ -1812,25 +275,25 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
 	 * Initialize all registers.
 	 */
 	if (unlikely(rt2800usb_wait_wpdma_ready(rt2x00dev) ||
-		     rt2800usb_init_registers(rt2x00dev) ||
-		     rt2800usb_init_bbp(rt2x00dev) ||
-		     rt2800usb_init_rfcsr(rt2x00dev)))
+		     rt2800_init_registers(rt2x00dev) ||
+		     rt2800_init_bbp(rt2x00dev) ||
+		     rt2800_init_rfcsr(rt2x00dev)))
 		return -EIO;
 
-	rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+	rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
 	rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
-	rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+	rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
 
 	udelay(50);
 
-	rt2x00usb_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+	rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
 	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
 	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 1);
 	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 1);
-	rt2x00usb_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
+	rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
 
 
-	rt2x00usb_register_read(rt2x00dev, USB_DMA_CFG, &reg);
+	rt2800_register_read(rt2x00dev, USB_DMA_CFG, &reg);
 	rt2x00_set_field32(&reg, USB_DMA_CFG_PHY_CLEAR, 0);
 	/* Don't use bulk in aggregation when working with USB 1.1 */
 	rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN,
@@ -1844,26 +307,26 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
 			   ((RX_ENTRIES * DATA_FRAME_SIZE) / 1024) - 3);
 	rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_EN, 1);
 	rt2x00_set_field32(&reg, USB_DMA_CFG_TX_BULK_EN, 1);
-	rt2x00usb_register_write(rt2x00dev, USB_DMA_CFG, reg);
+	rt2800_register_write(rt2x00dev, USB_DMA_CFG, reg);
 
-	rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+	rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
 	rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
 	rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
-	rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+	rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
 
 	/*
 	 * Initialize LED control
 	 */
 	rt2x00_eeprom_read(rt2x00dev, EEPROM_LED1, &word);
-	rt2800usb_mcu_request(rt2x00dev, MCU_LED_1, 0xff,
+	rt2800_mcu_request(rt2x00dev, MCU_LED_1, 0xff,
 			      word & 0xff, (word >> 8) & 0xff);
 
 	rt2x00_eeprom_read(rt2x00dev, EEPROM_LED2, &word);
-	rt2800usb_mcu_request(rt2x00dev, MCU_LED_2, 0xff,
+	rt2800_mcu_request(rt2x00dev, MCU_LED_2, 0xff,
 			      word & 0xff, (word >> 8) & 0xff);
 
 	rt2x00_eeprom_read(rt2x00dev, EEPROM_LED3, &word);
-	rt2800usb_mcu_request(rt2x00dev, MCU_LED_3, 0xff,
+	rt2800_mcu_request(rt2x00dev, MCU_LED_3, 0xff,
 			      word & 0xff, (word >> 8) & 0xff);
 
 	return 0;
@@ -1873,14 +336,14 @@ static void rt2800usb_disable_radio(struct rt2x00_dev *rt2x00dev)
 {
 	u32 reg;
 
-	rt2x00usb_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+	rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
 	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
 	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
-	rt2x00usb_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
+	rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
 
-	rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, 0);
-	rt2x00usb_register_write(rt2x00dev, PWR_PIN_CFG, 0);
-	rt2x00usb_register_write(rt2x00dev, TX_PIN_CFG, 0);
+	rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0);
+	rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
+	rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
 
 	/* Wait for DMA, ignore error */
 	rt2800usb_wait_wpdma_ready(rt2x00dev);
@@ -1892,9 +355,9 @@ static int rt2800usb_set_state(struct rt2x00_dev *rt2x00dev,
 			       enum dev_state state)
 {
 	if (state == STATE_AWAKE)
-		rt2800usb_mcu_request(rt2x00dev, MCU_WAKEUP, 0xff, 0, 0);
+		rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, 0xff, 0, 0);
 	else
-		rt2800usb_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 2);
+		rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 2);
 
 	return 0;
 }
@@ -2048,9 +511,9 @@ static void rt2800usb_write_beacon(struct queue_entry *entry)
 	 * Disable beaconing while we are reloading the beacon data,
 	 * otherwise we might be sending out invalid data.
 	 */
-	rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+	rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
 	rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
-	rt2x00usb_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+	rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
 
 	/*
 	 * Write entire beacon with descriptor to register.
@@ -2093,12 +556,12 @@ static void rt2800usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
 		return;
 	}
 
-	rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+	rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
 	if (!rt2x00_get_field32(reg, BCN_TIME_CFG_BEACON_GEN)) {
 		rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
 		rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
 		rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
-		rt2x00usb_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+		rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
 	}
 }
 
@@ -2124,7 +587,7 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
 	 */
 	memcpy(skbdesc->desc, rxd, skbdesc->desc_len);
 	rxd = (__le32 *)skbdesc->desc;
-	rxwi = &rxd[RXD_DESC_SIZE / sizeof(__le32)];
+	rxwi = &rxd[RXINFO_DESC_SIZE / sizeof(__le32)];
 
 	/*
 	 * It is now safe to read the descriptor on all architectures.
@@ -2135,16 +598,16 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
 	rt2x00_desc_read(rxwi, 2, &rxwi2);
 	rt2x00_desc_read(rxwi, 3, &rxwi3);
 
-	if (rt2x00_get_field32(rxd0, RXD_W0_CRC_ERROR))
+	if (rt2x00_get_field32(rxd0, RXINFO_W0_CRC_ERROR))
 		rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
 
 	if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
 		rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF);
 		rxdesc->cipher_status =
-		    rt2x00_get_field32(rxd0, RXD_W0_CIPHER_ERROR);
+		    rt2x00_get_field32(rxd0, RXINFO_W0_CIPHER_ERROR);
 	}
 
-	if (rt2x00_get_field32(rxd0, RXD_W0_DECRYPTED)) {
+	if (rt2x00_get_field32(rxd0, RXINFO_W0_DECRYPTED)) {
 		/*
 		 * Hardware has stripped IV/EIV data from 802.11 frame during
 		 * decryption. Unfortunately the descriptor doesn't contain
@@ -2159,10 +622,10 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
 			rxdesc->flags |= RX_FLAG_MMIC_ERROR;
 	}
 
-	if (rt2x00_get_field32(rxd0, RXD_W0_MY_BSS))
+	if (rt2x00_get_field32(rxd0, RXINFO_W0_MY_BSS))
 		rxdesc->dev_flags |= RXDONE_MY_BSS;
 
-	if (rt2x00_get_field32(rxd0, RXD_W0_L2PAD)) {
+	if (rt2x00_get_field32(rxd0, RXINFO_W0_L2PAD)) {
 		rxdesc->dev_flags |= RXDONE_L2PAD;
 		skbdesc->flags |= SKBDESC_L2_PADDED;
 	}
@@ -2208,402 +671,33 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
  */
 static int rt2800usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
 {
-	u16 word;
-	u8 *mac;
-	u8 default_lna_gain;
-
-	rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, EEPROM_SIZE);
-
-	/*
-	 * Start validation of the data that has been read.
-	 */
-	mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
-	if (!is_valid_ether_addr(mac)) {
-		random_ether_addr(mac);
-		EEPROM(rt2x00dev, "MAC: %pM\n", mac);
-	}
-
-	rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
-	if (word == 0xffff) {
-		rt2x00_set_field16(&word, EEPROM_ANTENNA_RXPATH, 2);
-		rt2x00_set_field16(&word, EEPROM_ANTENNA_TXPATH, 1);
-		rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2820);
-		rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
-		EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word);
-	} else if (rt2x00_rev(&rt2x00dev->chip) < RT2883_VERSION) {
-		/*
-		 * There is a max of 2 RX streams for RT2870 series
-		 */
-		if (rt2x00_get_field16(word, EEPROM_ANTENNA_RXPATH) > 2)
-			rt2x00_set_field16(&word, EEPROM_ANTENNA_RXPATH, 2);
-		rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
-	}
-
-	rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word);
-	if (word == 0xffff) {
-		rt2x00_set_field16(&word, EEPROM_NIC_HW_RADIO, 0);
-		rt2x00_set_field16(&word, EEPROM_NIC_DYNAMIC_TX_AGC, 0);
-		rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_BG, 0);
-		rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_A, 0);
-		rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0);
-		rt2x00_set_field16(&word, EEPROM_NIC_BW40M_SB_BG, 0);
-		rt2x00_set_field16(&word, EEPROM_NIC_BW40M_SB_A, 0);
-		rt2x00_set_field16(&word, EEPROM_NIC_WPS_PBC, 0);
-		rt2x00_set_field16(&word, EEPROM_NIC_BW40M_BG, 0);
-		rt2x00_set_field16(&word, EEPROM_NIC_BW40M_A, 0);
-		rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word);
-		EEPROM(rt2x00dev, "NIC: 0x%04x\n", word);
-	}
-
-	rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word);
-	if ((word & 0x00ff) == 0x00ff) {
-		rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0);
-		rt2x00_set_field16(&word, EEPROM_FREQ_LED_MODE,
-				   LED_MODE_TXRX_ACTIVITY);
-		rt2x00_set_field16(&word, EEPROM_FREQ_LED_POLARITY, 0);
-		rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
-		rt2x00_eeprom_write(rt2x00dev, EEPROM_LED1, 0x5555);
-		rt2x00_eeprom_write(rt2x00dev, EEPROM_LED2, 0x2221);
-		rt2x00_eeprom_write(rt2x00dev, EEPROM_LED3, 0xa9f8);
-		EEPROM(rt2x00dev, "Freq: 0x%04x\n", word);
-	}
-
-	/*
-	 * During the LNA validation we are going to use
-	 * lna0 as correct value. Note that EEPROM_LNA
-	 * is never validated.
-	 */
-	rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &word);
-	default_lna_gain = rt2x00_get_field16(word, EEPROM_LNA_A0);
-
-	rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &word);
-	if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET0)) > 10)
-		rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET0, 0);
-	if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET1)) > 10)
-		rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET1, 0);
-	rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG, word);
-
-	rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &word);
-	if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG2_OFFSET2)) > 10)
-		rt2x00_set_field16(&word, EEPROM_RSSI_BG2_OFFSET2, 0);
-	if (rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0x00 ||
-	    rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0xff)
-		rt2x00_set_field16(&word, EEPROM_RSSI_BG2_LNA_A1,
-				   default_lna_gain);
-	rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG2, word);
-
-	rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &word);
-	if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET0)) > 10)
-		rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET0, 0);
-	if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET1)) > 10)
-		rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET1, 0);
-	rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A, word);
-
-	rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &word);
-	if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A2_OFFSET2)) > 10)
-		rt2x00_set_field16(&word, EEPROM_RSSI_A2_OFFSET2, 0);
-	if (rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0x00 ||
-	    rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0xff)
-		rt2x00_set_field16(&word, EEPROM_RSSI_A2_LNA_A2,
-				   default_lna_gain);
-	rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
+	if (rt2800_efuse_detect(rt2x00dev))
+		rt2800_read_eeprom_efuse(rt2x00dev);
+	else
+		rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom,
+				      EEPROM_SIZE);
 
-	return 0;
+	return rt2800_validate_eeprom(rt2x00dev);
 }
 
-static int rt2800usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
-{
-	u32 reg;
-	u16 value;
-	u16 eeprom;
-
-	/*
-	 * Read EEPROM word for configuration.
-	 */
-	rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
-
-	/*
-	 * Identify RF chipset.
-	 */
-	value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
-	rt2x00usb_register_read(rt2x00dev, MAC_CSR0, &reg);
-	rt2x00_set_chip(rt2x00dev, RT2870, value, reg);
-
-	/*
-	 * The check for rt2860 is not a typo, some rt2870 hardware
-	 * identifies itself as rt2860 in the CSR register.
-	 */
-	if (!rt2x00_check_rev(&rt2x00dev->chip, 0xfff00000, 0x28600000) &&
-	    !rt2x00_check_rev(&rt2x00dev->chip, 0xfff00000, 0x28700000) &&
-	    !rt2x00_check_rev(&rt2x00dev->chip, 0xfff00000, 0x28800000) &&
-	    !rt2x00_check_rev(&rt2x00dev->chip, 0xffff0000, 0x30700000)) {
-		ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
-		return -ENODEV;
-	}
-
-	if (!rt2x00_rf(&rt2x00dev->chip, RF2820) &&
-	    !rt2x00_rf(&rt2x00dev->chip, RF2850) &&
-	    !rt2x00_rf(&rt2x00dev->chip, RF2720) &&
-	    !rt2x00_rf(&rt2x00dev->chip, RF2750) &&
-	    !rt2x00_rf(&rt2x00dev->chip, RF3020) &&
-	    !rt2x00_rf(&rt2x00dev->chip, RF2020)) {
-		ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
-		return -ENODEV;
-	}
-
-	/*
-	 * Identify default antenna configuration.
-	 */
-	rt2x00dev->default_ant.tx =
-	    rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH);
-	rt2x00dev->default_ant.rx =
-	    rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH);
-
-	/*
-	 * Read frequency offset and RF programming sequence.
-	 */
-	rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
-	rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET);
-
-	/*
-	 * Read external LNA informations.
-	 */
-	rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
-
-	if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_A))
-		__set_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags);
-	if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_BG))
-		__set_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags);
-
-	/*
-	 * Detect if this device has an hardware controlled radio.
-	 */
-	if (rt2x00_get_field16(eeprom, EEPROM_NIC_HW_RADIO))
-		__set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags);
-
-	/*
-	 * Store led settings, for correct led behaviour.
-	 */
-#ifdef CONFIG_RT2X00_LIB_LEDS
-	rt2800usb_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
-	rt2800usb_init_led(rt2x00dev, &rt2x00dev->led_assoc, LED_TYPE_ASSOC);
-	rt2800usb_init_led(rt2x00dev, &rt2x00dev->led_qual, LED_TYPE_QUALITY);
+static const struct rt2800_ops rt2800usb_rt2800_ops = {
+	.register_read		= rt2x00usb_register_read,
+	.register_read_lock	= rt2x00usb_register_read_lock,
+	.register_write		= rt2x00usb_register_write,
+	.register_write_lock	= rt2x00usb_register_write_lock,
 
-	rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ,
-			   &rt2x00dev->led_mcu_reg);
-#endif /* CONFIG_RT2X00_LIB_LEDS */
+	.register_multiread	= rt2x00usb_register_multiread,
+	.register_multiwrite	= rt2x00usb_register_multiwrite,
 
-	return 0;
-}
-
-/*
- * RF value list for rt2870
- * Supports: 2.4 GHz (all) & 5.2 GHz (RF2850 & RF2750)
- */
-static const struct rf_channel rf_vals[] = {
-	{ 1,  0x18402ecc, 0x184c0786, 0x1816b455, 0x1800510b },
-	{ 2,  0x18402ecc, 0x184c0786, 0x18168a55, 0x1800519f },
-	{ 3,  0x18402ecc, 0x184c078a, 0x18168a55, 0x1800518b },
-	{ 4,  0x18402ecc, 0x184c078a, 0x18168a55, 0x1800519f },
-	{ 5,  0x18402ecc, 0x184c078e, 0x18168a55, 0x1800518b },
-	{ 6,  0x18402ecc, 0x184c078e, 0x18168a55, 0x1800519f },
-	{ 7,  0x18402ecc, 0x184c0792, 0x18168a55, 0x1800518b },
-	{ 8,  0x18402ecc, 0x184c0792, 0x18168a55, 0x1800519f },
-	{ 9,  0x18402ecc, 0x184c0796, 0x18168a55, 0x1800518b },
-	{ 10, 0x18402ecc, 0x184c0796, 0x18168a55, 0x1800519f },
-	{ 11, 0x18402ecc, 0x184c079a, 0x18168a55, 0x1800518b },
-	{ 12, 0x18402ecc, 0x184c079a, 0x18168a55, 0x1800519f },
-	{ 13, 0x18402ecc, 0x184c079e, 0x18168a55, 0x1800518b },
-	{ 14, 0x18402ecc, 0x184c07a2, 0x18168a55, 0x18005193 },
-
-	/* 802.11 UNI / HyperLan 2 */
-	{ 36, 0x18402ecc, 0x184c099a, 0x18158a55, 0x180ed1a3 },
-	{ 38, 0x18402ecc, 0x184c099e, 0x18158a55, 0x180ed193 },
-	{ 40, 0x18402ec8, 0x184c0682, 0x18158a55, 0x180ed183 },
-	{ 44, 0x18402ec8, 0x184c0682, 0x18158a55, 0x180ed1a3 },
-	{ 46, 0x18402ec8, 0x184c0686, 0x18158a55, 0x180ed18b },
-	{ 48, 0x18402ec8, 0x184c0686, 0x18158a55, 0x180ed19b },
-	{ 52, 0x18402ec8, 0x184c068a, 0x18158a55, 0x180ed193 },
-	{ 54, 0x18402ec8, 0x184c068a, 0x18158a55, 0x180ed1a3 },
-	{ 56, 0x18402ec8, 0x184c068e, 0x18158a55, 0x180ed18b },
-	{ 60, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed183 },
-	{ 62, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed193 },
-	{ 64, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed1a3 },
-
-	/* 802.11 HyperLan 2 */
-	{ 100, 0x18402ec8, 0x184c06b2, 0x18178a55, 0x180ed783 },
-	{ 102, 0x18402ec8, 0x184c06b2, 0x18578a55, 0x180ed793 },
-	{ 104, 0x18402ec8, 0x185c06b2, 0x18578a55, 0x180ed1a3 },
-	{ 108, 0x18402ecc, 0x185c0a32, 0x18578a55, 0x180ed193 },
-	{ 110, 0x18402ecc, 0x184c0a36, 0x18178a55, 0x180ed183 },
-	{ 112, 0x18402ecc, 0x184c0a36, 0x18178a55, 0x180ed19b },
-	{ 116, 0x18402ecc, 0x184c0a3a, 0x18178a55, 0x180ed1a3 },
-	{ 118, 0x18402ecc, 0x184c0a3e, 0x18178a55, 0x180ed193 },
-	{ 120, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed183 },
-	{ 124, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed193 },
-	{ 126, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed15b },
-	{ 128, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed1a3 },
-	{ 132, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed18b },
-	{ 134, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed193 },
-	{ 136, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed19b },
-	{ 140, 0x18402ec4, 0x184c038a, 0x18178a55, 0x180ed183 },
-
-	/* 802.11 UNII */
-	{ 149, 0x18402ec4, 0x184c038a, 0x18178a55, 0x180ed1a7 },
-	{ 151, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed187 },
-	{ 153, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed18f },
-	{ 157, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed19f },
-	{ 159, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed1a7 },
-	{ 161, 0x18402ec4, 0x184c0392, 0x18178a55, 0x180ed187 },
-	{ 165, 0x18402ec4, 0x184c0392, 0x18178a55, 0x180ed197 },
-	{ 167, 0x18402ec4, 0x184c03d2, 0x18179855, 0x1815531f },
-	{ 169, 0x18402ec4, 0x184c03d2, 0x18179855, 0x18155327 },
-	{ 171, 0x18402ec4, 0x184c03d6, 0x18179855, 0x18155307 },
-	{ 173, 0x18402ec4, 0x184c03d6, 0x18179855, 0x1815530f },
-
-	/* 802.11 Japan */
-	{ 184, 0x15002ccc, 0x1500491e, 0x1509be55, 0x150c0a0b },
-	{ 188, 0x15002ccc, 0x15004922, 0x1509be55, 0x150c0a13 },
-	{ 192, 0x15002ccc, 0x15004926, 0x1509be55, 0x150c0a1b },
-	{ 196, 0x15002ccc, 0x1500492a, 0x1509be55, 0x150c0a23 },
-	{ 208, 0x15002ccc, 0x1500493a, 0x1509be55, 0x150c0a13 },
-	{ 212, 0x15002ccc, 0x1500493e, 0x1509be55, 0x150c0a1b },
-	{ 216, 0x15002ccc, 0x15004982, 0x1509be55, 0x150c0a23 },
-};
-
-/*
- * RF value list for rt3070
- * Supports: 2.4 GHz
- */
-static const struct rf_channel rf_vals_3070[] = {
-	{1,  241, 2, 2 },
-	{2,  241, 2, 7 },
-	{3,  242, 2, 2 },
-	{4,  242, 2, 7 },
-	{5,  243, 2, 2 },
-	{6,  243, 2, 7 },
-	{7,  244, 2, 2 },
-	{8,  244, 2, 7 },
-	{9,  245, 2, 2 },
-	{10, 245, 2, 7 },
-	{11, 246, 2, 2 },
-	{12, 246, 2, 7 },
-	{13, 247, 2, 2 },
-	{14, 248, 2, 4 },
+	.regbusy_read		= rt2x00usb_regbusy_read,
 };
 
-static int rt2800usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
-{
-	struct hw_mode_spec *spec = &rt2x00dev->spec;
-	struct channel_info *info;
-	char *tx_power1;
-	char *tx_power2;
-	unsigned int i;
-	u16 eeprom;
-
-	/*
-	 * Initialize all hw fields.
-	 */
-	rt2x00dev->hw->flags =
-	    IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
-	    IEEE80211_HW_SIGNAL_DBM |
-	    IEEE80211_HW_SUPPORTS_PS |
-	    IEEE80211_HW_PS_NULLFUNC_STACK;
-	rt2x00dev->hw->extra_tx_headroom = TXINFO_DESC_SIZE + TXWI_DESC_SIZE;
-
-	SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
-	SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
-				rt2x00_eeprom_addr(rt2x00dev,
-						   EEPROM_MAC_ADDR_0));
-
-	rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
-
-	/*
-	 * Initialize HT information.
-	 */
-	spec->ht.ht_supported = true;
-	spec->ht.cap =
-	    IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
-	    IEEE80211_HT_CAP_GRN_FLD |
-	    IEEE80211_HT_CAP_SGI_20 |
-	    IEEE80211_HT_CAP_SGI_40 |
-	    IEEE80211_HT_CAP_TX_STBC |
-	    IEEE80211_HT_CAP_RX_STBC |
-	    IEEE80211_HT_CAP_PSMP_SUPPORT;
-	spec->ht.ampdu_factor = 3;
-	spec->ht.ampdu_density = 4;
-	spec->ht.mcs.tx_params =
-	    IEEE80211_HT_MCS_TX_DEFINED |
-	    IEEE80211_HT_MCS_TX_RX_DIFF |
-	    ((rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) - 1) <<
-		IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
-
-	switch (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH)) {
-	case 3:
-		spec->ht.mcs.rx_mask[2] = 0xff;
-	case 2:
-		spec->ht.mcs.rx_mask[1] = 0xff;
-	case 1:
-		spec->ht.mcs.rx_mask[0] = 0xff;
-		spec->ht.mcs.rx_mask[4] = 0x1; /* MCS32 */
-		break;
-	}
-
-	/*
-	 * Initialize hw_mode information.
-	 */
-	spec->supported_bands = SUPPORT_BAND_2GHZ;
-	spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
-
-	if (rt2x00_rf(&rt2x00dev->chip, RF2820) ||
-	    rt2x00_rf(&rt2x00dev->chip, RF2720)) {
-		spec->num_channels = 14;
-		spec->channels = rf_vals;
-	} else if (rt2x00_rf(&rt2x00dev->chip, RF2850) ||
-		   rt2x00_rf(&rt2x00dev->chip, RF2750)) {
-		spec->supported_bands |= SUPPORT_BAND_5GHZ;
-		spec->num_channels = ARRAY_SIZE(rf_vals);
-		spec->channels = rf_vals;
-	} else if (rt2x00_rf(&rt2x00dev->chip, RF3020) ||
-		   rt2x00_rf(&rt2x00dev->chip, RF2020)) {
-		spec->num_channels = ARRAY_SIZE(rf_vals_3070);
-		spec->channels = rf_vals_3070;
-	}
-
-	/*
-	 * Create channel information array
-	 */
-	info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
-	if (!info)
-		return -ENOMEM;
-
-	spec->channels_info = info;
-
-	tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
-	tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
-
-	for (i = 0; i < 14; i++) {
-		info[i].tx_power1 = TXPOWER_G_FROM_DEV(tx_power1[i]);
-		info[i].tx_power2 = TXPOWER_G_FROM_DEV(tx_power2[i]);
-	}
-
-	if (spec->num_channels > 14) {
-		tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1);
-		tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
-
-		for (i = 14; i < spec->num_channels; i++) {
-			info[i].tx_power1 = TXPOWER_A_FROM_DEV(tx_power1[i]);
-			info[i].tx_power2 = TXPOWER_A_FROM_DEV(tx_power2[i]);
-		}
-	}
-
-	return 0;
-}
-
 static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
 {
 	int retval;
 
+	rt2x00dev->priv = (void *)&rt2800usb_rt2800_ops;
+
 	/*
 	 * Allocate eeprom data.
 	 */
@@ -2611,14 +705,14 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
 	if (retval)
 		return retval;
 
-	retval = rt2800usb_init_eeprom(rt2x00dev);
+	retval = rt2800_init_eeprom(rt2x00dev);
 	if (retval)
 		return retval;
 
 	/*
 	 * Initialize hw specifications.
 	 */
-	retval = rt2800usb_probe_hw_mode(rt2x00dev);
+	retval = rt2800_probe_hw_mode(rt2x00dev);
 	if (retval)
 		return retval;
 
@@ -2645,162 +739,6 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
 	return 0;
 }
 
-/*
- * IEEE80211 stack callback functions.
- */
-static void rt2800usb_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx,
-				   u32 *iv32, u16 *iv16)
-{
-	struct rt2x00_dev *rt2x00dev = hw->priv;
-	struct mac_iveiv_entry iveiv_entry;
-	u32 offset;
-
-	offset = MAC_IVEIV_ENTRY(hw_key_idx);
-	rt2x00usb_register_multiread(rt2x00dev, offset,
-				      &iveiv_entry, sizeof(iveiv_entry));
-
-	memcpy(&iveiv_entry.iv[0], iv16, sizeof(iv16));
-	memcpy(&iveiv_entry.iv[4], iv32, sizeof(iv32));
-}
-
-static int rt2800usb_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
-{
-	struct rt2x00_dev *rt2x00dev = hw->priv;
-	u32 reg;
-	bool enabled = (value < IEEE80211_MAX_RTS_THRESHOLD);
-
-	rt2x00usb_register_read(rt2x00dev, TX_RTS_CFG, &reg);
-	rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_THRES, value);
-	rt2x00usb_register_write(rt2x00dev, TX_RTS_CFG, reg);
-
-	rt2x00usb_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
-	rt2x00_set_field32(&reg, CCK_PROT_CFG_RTS_TH_EN, enabled);
-	rt2x00usb_register_write(rt2x00dev, CCK_PROT_CFG, reg);
-
-	rt2x00usb_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
-	rt2x00_set_field32(&reg, OFDM_PROT_CFG_RTS_TH_EN, enabled);
-	rt2x00usb_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
-
-	rt2x00usb_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
-	rt2x00_set_field32(&reg, MM20_PROT_CFG_RTS_TH_EN, enabled);
-	rt2x00usb_register_write(rt2x00dev, MM20_PROT_CFG, reg);
-
-	rt2x00usb_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
-	rt2x00_set_field32(&reg, MM40_PROT_CFG_RTS_TH_EN, enabled);
-	rt2x00usb_register_write(rt2x00dev, MM40_PROT_CFG, reg);
-
-	rt2x00usb_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
-	rt2x00_set_field32(&reg, GF20_PROT_CFG_RTS_TH_EN, enabled);
-	rt2x00usb_register_write(rt2x00dev, GF20_PROT_CFG, reg);
-
-	rt2x00usb_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
-	rt2x00_set_field32(&reg, GF40_PROT_CFG_RTS_TH_EN, enabled);
-	rt2x00usb_register_write(rt2x00dev, GF40_PROT_CFG, reg);
-
-	return 0;
-}
-
-static int rt2800usb_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
-			     const struct ieee80211_tx_queue_params *params)
-{
-	struct rt2x00_dev *rt2x00dev = hw->priv;
-	struct data_queue *queue;
-	struct rt2x00_field32 field;
-	int retval;
-	u32 reg;
-	u32 offset;
-
-	/*
-	 * First pass the configuration through rt2x00lib, that will
-	 * update the queue settings and validate the input. After that
-	 * we are free to update the registers based on the value
-	 * in the queue parameter.
-	 */
-	retval = rt2x00mac_conf_tx(hw, queue_idx, params);
-	if (retval)
-		return retval;
-
-	/*
-	 * We only need to perform additional register initialization
-	 * for WMM queues/
-	 */
-	if (queue_idx >= 4)
-		return 0;
-
-	queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
-
-	/* Update WMM TXOP register */
-	offset = WMM_TXOP0_CFG + (sizeof(u32) * (!!(queue_idx & 2)));
-	field.bit_offset = (queue_idx & 1) * 16;
-	field.bit_mask = 0xffff << field.bit_offset;
-
-	rt2x00usb_register_read(rt2x00dev, offset, &reg);
-	rt2x00_set_field32(&reg, field, queue->txop);
-	rt2x00usb_register_write(rt2x00dev, offset, reg);
-
-	/* Update WMM registers */
-	field.bit_offset = queue_idx * 4;
-	field.bit_mask = 0xf << field.bit_offset;
-
-	rt2x00usb_register_read(rt2x00dev, WMM_AIFSN_CFG, &reg);
-	rt2x00_set_field32(&reg, field, queue->aifs);
-	rt2x00usb_register_write(rt2x00dev, WMM_AIFSN_CFG, reg);
-
-	rt2x00usb_register_read(rt2x00dev, WMM_CWMIN_CFG, &reg);
-	rt2x00_set_field32(&reg, field, queue->cw_min);
-	rt2x00usb_register_write(rt2x00dev, WMM_CWMIN_CFG, reg);
-
-	rt2x00usb_register_read(rt2x00dev, WMM_CWMAX_CFG, &reg);
-	rt2x00_set_field32(&reg, field, queue->cw_max);
-	rt2x00usb_register_write(rt2x00dev, WMM_CWMAX_CFG, reg);
-
-	/* Update EDCA registers */
-	offset = EDCA_AC0_CFG + (sizeof(u32) * queue_idx);
-
-	rt2x00usb_register_read(rt2x00dev, offset, &reg);
-	rt2x00_set_field32(&reg, EDCA_AC0_CFG_TX_OP, queue->txop);
-	rt2x00_set_field32(&reg, EDCA_AC0_CFG_AIFSN, queue->aifs);
-	rt2x00_set_field32(&reg, EDCA_AC0_CFG_CWMIN, queue->cw_min);
-	rt2x00_set_field32(&reg, EDCA_AC0_CFG_CWMAX, queue->cw_max);
-	rt2x00usb_register_write(rt2x00dev, offset, reg);
-
-	return 0;
-}
-
-static u64 rt2800usb_get_tsf(struct ieee80211_hw *hw)
-{
-	struct rt2x00_dev *rt2x00dev = hw->priv;
-	u64 tsf;
-	u32 reg;
-
-	rt2x00usb_register_read(rt2x00dev, TSF_TIMER_DW1, &reg);
-	tsf = (u64) rt2x00_get_field32(reg, TSF_TIMER_DW1_HIGH_WORD) << 32;
-	rt2x00usb_register_read(rt2x00dev, TSF_TIMER_DW0, &reg);
-	tsf |= rt2x00_get_field32(reg, TSF_TIMER_DW0_LOW_WORD);
-
-	return tsf;
-}
-
-static const struct ieee80211_ops rt2800usb_mac80211_ops = {
-	.tx			= rt2x00mac_tx,
-	.start			= rt2x00mac_start,
-	.stop			= rt2x00mac_stop,
-	.add_interface		= rt2x00mac_add_interface,
-	.remove_interface	= rt2x00mac_remove_interface,
-	.config			= rt2x00mac_config,
-	.configure_filter	= rt2x00mac_configure_filter,
-	.set_tim		= rt2x00mac_set_tim,
-	.set_key		= rt2x00mac_set_key,
-	.get_stats		= rt2x00mac_get_stats,
-	.get_tkip_seq		= rt2800usb_get_tkip_seq,
-	.set_rts_threshold	= rt2800usb_set_rts_threshold,
-	.bss_info_changed	= rt2x00mac_bss_info_changed,
-	.conf_tx		= rt2800usb_conf_tx,
-	.get_tx_stats		= rt2x00mac_get_tx_stats,
-	.get_tsf		= rt2800usb_get_tsf,
-	.rfkill_poll		= rt2x00mac_rfkill_poll,
-};
-
 static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
 	.probe_hw		= rt2800usb_probe_hw,
 	.get_firmware_name	= rt2800usb_get_firmware_name,
@@ -2810,10 +748,10 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
 	.uninitialize		= rt2x00usb_uninitialize,
 	.clear_entry		= rt2x00usb_clear_entry,
 	.set_device_state	= rt2800usb_set_device_state,
-	.rfkill_poll		= rt2800usb_rfkill_poll,
-	.link_stats		= rt2800usb_link_stats,
-	.reset_tuner		= rt2800usb_reset_tuner,
-	.link_tuner		= rt2800usb_link_tuner,
+	.rfkill_poll		= rt2800_rfkill_poll,
+	.link_stats		= rt2800_link_stats,
+	.reset_tuner		= rt2800_reset_tuner,
+	.link_tuner		= rt2800_link_tuner,
 	.write_tx_desc		= rt2800usb_write_tx_desc,
 	.write_tx_data		= rt2x00usb_write_tx_data,
 	.write_beacon		= rt2800usb_write_beacon,
@@ -2821,19 +759,19 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
 	.kick_tx_queue		= rt2800usb_kick_tx_queue,
 	.kill_tx_queue		= rt2x00usb_kill_tx_queue,
 	.fill_rxdone		= rt2800usb_fill_rxdone,
-	.config_shared_key	= rt2800usb_config_shared_key,
-	.config_pairwise_key	= rt2800usb_config_pairwise_key,
-	.config_filter		= rt2800usb_config_filter,
-	.config_intf		= rt2800usb_config_intf,
-	.config_erp		= rt2800usb_config_erp,
-	.config_ant		= rt2800usb_config_ant,
-	.config			= rt2800usb_config,
+	.config_shared_key	= rt2800_config_shared_key,
+	.config_pairwise_key	= rt2800_config_pairwise_key,
+	.config_filter		= rt2800_config_filter,
+	.config_intf		= rt2800_config_intf,
+	.config_erp		= rt2800_config_erp,
+	.config_ant		= rt2800_config_ant,
+	.config			= rt2800_config,
 };
 
 static const struct data_queue_desc rt2800usb_queue_rx = {
 	.entry_num		= RX_ENTRIES,
 	.data_size		= AGGREGATION_SIZE,
-	.desc_size		= RXD_DESC_SIZE + RXWI_DESC_SIZE,
+	.desc_size		= RXINFO_DESC_SIZE + RXWI_DESC_SIZE,
 	.priv_size		= sizeof(struct queue_entry_priv_usb),
 };
 
@@ -2852,19 +790,20 @@ static const struct data_queue_desc rt2800usb_queue_bcn = {
 };
 
 static const struct rt2x00_ops rt2800usb_ops = {
-	.name		= KBUILD_MODNAME,
-	.max_sta_intf	= 1,
-	.max_ap_intf	= 8,
-	.eeprom_size	= EEPROM_SIZE,
-	.rf_size	= RF_SIZE,
-	.tx_queues	= NUM_TX_QUEUES,
-	.rx		= &rt2800usb_queue_rx,
-	.tx		= &rt2800usb_queue_tx,
-	.bcn		= &rt2800usb_queue_bcn,
-	.lib		= &rt2800usb_rt2x00_ops,
-	.hw		= &rt2800usb_mac80211_ops,
+	.name			= KBUILD_MODNAME,
+	.max_sta_intf		= 1,
+	.max_ap_intf		= 8,
+	.eeprom_size		= EEPROM_SIZE,
+	.rf_size		= RF_SIZE,
+	.tx_queues		= NUM_TX_QUEUES,
+	.extra_tx_headroom	= TXINFO_DESC_SIZE + TXWI_DESC_SIZE,
+	.rx			= &rt2800usb_queue_rx,
+	.tx			= &rt2800usb_queue_tx,
+	.bcn			= &rt2800usb_queue_bcn,
+	.lib			= &rt2800usb_rt2x00_ops,
+	.hw			= &rt2800_mac80211_ops,
 #ifdef CONFIG_RT2X00_LIB_DEBUGFS
-	.debugfs	= &rt2800usb_rt2x00debug,
+	.debugfs		= &rt2800_rt2x00debug,
 #endif /* CONFIG_RT2X00_LIB_DEBUGFS */
 };
 
@@ -2886,17 +825,23 @@ static struct usb_device_id rt2800usb_device_table[] = {
 	{ USB_DEVICE(0x0e0b, 0x9041), USB_DEVICE_DATA(&rt2800usb_ops) },
 	/* Amit */
 	{ USB_DEVICE(0x15c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) },
+	/* Askey */
+	{ USB_DEVICE(0x1690, 0x0740), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x1690, 0x0744), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x0930, 0x0a07), USB_DEVICE_DATA(&rt2800usb_ops) },
 	/* ASUS */
 	{ USB_DEVICE(0x0b05, 0x1731), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
 	/* AzureWave */
 	{ USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) },
 	/* Belkin */
 	{ USB_DEVICE(0x050d, 0x8053), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x050d, 0x805c), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -2905,6 +850,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
 	/* Buffalo */
 	{ USB_DEVICE(0x0411, 0x00e8), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x0411, 0x012e), USB_DEVICE_DATA(&rt2800usb_ops) },
+	/* Cisco */
+	{ USB_DEVICE(0x167b, 0x4001), USB_DEVICE_DATA(&rt2800usb_ops) },
 	/* Conceptronic */
 	{ USB_DEVICE(0x14b2, 0x3c06), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x14b2, 0x3c07), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -2920,6 +867,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
 	{ USB_DEVICE(0x07aa, 0x002f), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x07aa, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x07aa, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x07aa, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x07aa, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x18c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x18c5, 0x0012), USB_DEVICE_DATA(&rt2800usb_ops) },
 	/* D-Link */
@@ -2931,18 +880,24 @@ static struct usb_device_id rt2800usb_device_table[] = {
 	{ USB_DEVICE(0x07d1, 0x3c0f), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x07d1, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) },
 	/* Edimax */
 	{ USB_DEVICE(0x7392, 0x7711), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) },
 	/* Encore */
 	{ USB_DEVICE(0x203d, 0x1480), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) },
 	/* EnGenius */
 	{ USB_DEVICE(0X1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x1740, 0x9703), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x1740, 0x9705), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x1740, 0x9706), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x1740, 0x9707), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x1740, 0x9708), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x1740, 0x9709), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x1740, 0x9801), USB_DEVICE_DATA(&rt2800usb_ops) },
 	/* Gemtek */
 	{ USB_DEVICE(0x15a9, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -2956,7 +911,10 @@ static struct usb_device_id rt2800usb_device_table[] = {
 	{ USB_DEVICE(0x0e66, 0x0009), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x0e66, 0x000b), USB_DEVICE_DATA(&rt2800usb_ops) },
 	/* I-O DATA */
+	{ USB_DEVICE(0x04bb, 0x0944), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x04bb, 0x0947), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x04bb, 0x0948), USB_DEVICE_DATA(&rt2800usb_ops) },
 	/* LevelOne */
 	{ USB_DEVICE(0x1740, 0x0605), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x1740, 0x0615), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -2971,8 +929,18 @@ static struct usb_device_id rt2800usb_device_table[] = {
 	/* Motorola */
 	{ USB_DEVICE(0x100d, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x100d, 0x9032), USB_DEVICE_DATA(&rt2800usb_ops) },
+	/* MSI */
+	{ USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x0db0, 0x3821), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x0db0, 0x3870), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x0db0, 0x6899), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x0db0, 0x821a), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x0db0, 0x870a), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x0db0, 0x899a), USB_DEVICE_DATA(&rt2800usb_ops) },
 	/* Ovislink */
 	{ USB_DEVICE(0x1b75, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
+	/* Para */
+	{ USB_DEVICE(0x20b8, 0x8888), USB_DEVICE_DATA(&rt2800usb_ops) },
 	/* Pegatron */
 	{ USB_DEVICE(0x1d4d, 0x0002), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -2988,8 +956,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
 	/* Quanta */
 	{ USB_DEVICE(0x1a32, 0x0304), USB_DEVICE_DATA(&rt2800usb_ops) },
 	/* Ralink */
-	{ USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) },
-	{ USB_DEVICE(0x0db0, 0x6899), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x148f, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x148f, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x148f, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -3013,7 +979,12 @@ static struct usb_device_id rt2800usb_device_table[] = {
 	{ USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x0df6, 0x0047), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x0df6, 0x0048), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x0df6, 0x004a), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x0df6, 0x004d), USB_DEVICE_DATA(&rt2800usb_ops) },
 	/* SMC */
 	{ USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -3022,6 +993,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
 	{ USB_DEVICE(0x083a, 0x8522), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x083a, 0xa512), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x083a, 0xa618), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x083a, 0xa701), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x083a, 0xa702), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x083a, 0xb522), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x083a, 0xc522), USB_DEVICE_DATA(&rt2800usb_ops) },
 	/* Sparklan */
@@ -3039,6 +1012,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
 	{ USB_DEVICE(0x5a57, 0x0280), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x5a57, 0x0282), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) },
+	{ USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) },
 	{ USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) },
 	/* Zyxel */
 	{ USB_DEVICE(0x0586, 0x3416), USB_DEVICE_DATA(&rt2800usb_ops) },
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.h b/drivers/net/wireless/rt2x00/rt2800usb.h
index 4d9991c9a51c..1e4340a182ef 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.h
+++ b/drivers/net/wireless/rt2x00/rt2800usb.h
@@ -1,5 +1,9 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2009 Ivo van Doorn <IvDoorn@gmail.com>
+	Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
+	Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
+	Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
+	Copyright (C) 2009 Axel Kollhofer <rain_maker@root-forum.org>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
@@ -28,288 +32,10 @@
 #define RT2800USB_H
 
 /*
- * RF chip defines.
- *
- * RF2820 2.4G 2T3R
- * RF2850 2.4G/5G 2T3R
- * RF2720 2.4G 1T2R
- * RF2750 2.4G/5G 1T2R
- * RF3020 2.4G 1T1R
- * RF2020 2.4G B/G
- * RF3021 2.4G 1T2R
- * RF3022 2.4G 2T2R
- * RF3052 2.4G 2T2R
- */
-#define RF2820				0x0001
-#define RF2850				0x0002
-#define RF2720				0x0003
-#define RF2750				0x0004
-#define RF3020				0x0005
-#define RF2020				0x0006
-#define RF3021				0x0007
-#define RF3022				0x0008
-#define RF3052				0x0009
-
-/*
- * RT2870 version
- */
-#define RT2860C_VERSION			0x28600100
-#define RT2860D_VERSION			0x28600101
-#define RT2880E_VERSION			0x28720200
-#define RT2883_VERSION			0x28830300
-#define RT3070_VERSION			0x30700200
-
-/*
- * Signal information.
- * Defaul offset is required for RSSI <-> dBm conversion.
- */
-#define DEFAULT_RSSI_OFFSET		120 /* FIXME */
-
-/*
- * Register layout information.
- */
-#define CSR_REG_BASE			0x1000
-#define CSR_REG_SIZE			0x0800
-#define EEPROM_BASE			0x0000
-#define EEPROM_SIZE			0x0110
-#define BBP_BASE			0x0000
-#define BBP_SIZE			0x0080
-#define RF_BASE				0x0004
-#define RF_SIZE				0x0010
-
-/*
- * Number of TX queues.
- */
-#define NUM_TX_QUEUES			4
-
-/*
  * USB registers.
  */
 
 /*
- * HOST-MCU shared memory
- */
-#define HOST_CMD_CSR			0x0404
-#define HOST_CMD_CSR_HOST_COMMAND	FIELD32(0x000000ff)
-
-/*
- * INT_SOURCE_CSR: Interrupt source register.
- * Write one to clear corresponding bit.
- * TX_FIFO_STATUS: FIFO Statistics is full, sw should read 0x171c
- */
-#define INT_SOURCE_CSR			0x0200
-#define INT_SOURCE_CSR_RXDELAYINT	FIELD32(0x00000001)
-#define INT_SOURCE_CSR_TXDELAYINT	FIELD32(0x00000002)
-#define INT_SOURCE_CSR_RX_DONE		FIELD32(0x00000004)
-#define INT_SOURCE_CSR_AC0_DMA_DONE	FIELD32(0x00000008)
-#define INT_SOURCE_CSR_AC1_DMA_DONE	FIELD32(0x00000010)
-#define INT_SOURCE_CSR_AC2_DMA_DONE	FIELD32(0x00000020)
-#define INT_SOURCE_CSR_AC3_DMA_DONE	FIELD32(0x00000040)
-#define INT_SOURCE_CSR_HCCA_DMA_DONE	FIELD32(0x00000080)
-#define INT_SOURCE_CSR_MGMT_DMA_DONE	FIELD32(0x00000100)
-#define INT_SOURCE_CSR_MCU_COMMAND	FIELD32(0x00000200)
-#define INT_SOURCE_CSR_RXTX_COHERENT	FIELD32(0x00000400)
-#define INT_SOURCE_CSR_TBTT		FIELD32(0x00000800)
-#define INT_SOURCE_CSR_PRE_TBTT		FIELD32(0x00001000)
-#define INT_SOURCE_CSR_TX_FIFO_STATUS	FIELD32(0x00002000)
-#define INT_SOURCE_CSR_AUTO_WAKEUP	FIELD32(0x00004000)
-#define INT_SOURCE_CSR_GPTIMER		FIELD32(0x00008000)
-#define INT_SOURCE_CSR_RX_COHERENT	FIELD32(0x00010000)
-#define INT_SOURCE_CSR_TX_COHERENT	FIELD32(0x00020000)
-
-/*
- * INT_MASK_CSR: Interrupt MASK register. 1: the interrupt is mask OFF.
- */
-#define INT_MASK_CSR			0x0204
-#define INT_MASK_CSR_RXDELAYINT		FIELD32(0x00000001)
-#define INT_MASK_CSR_TXDELAYINT		FIELD32(0x00000002)
-#define INT_MASK_CSR_RX_DONE		FIELD32(0x00000004)
-#define INT_MASK_CSR_AC0_DMA_DONE	FIELD32(0x00000008)
-#define INT_MASK_CSR_AC1_DMA_DONE	FIELD32(0x00000010)
-#define INT_MASK_CSR_AC2_DMA_DONE	FIELD32(0x00000020)
-#define INT_MASK_CSR_AC3_DMA_DONE	FIELD32(0x00000040)
-#define INT_MASK_CSR_HCCA_DMA_DONE	FIELD32(0x00000080)
-#define INT_MASK_CSR_MGMT_DMA_DONE	FIELD32(0x00000100)
-#define INT_MASK_CSR_MCU_COMMAND	FIELD32(0x00000200)
-#define INT_MASK_CSR_RXTX_COHERENT	FIELD32(0x00000400)
-#define INT_MASK_CSR_TBTT		FIELD32(0x00000800)
-#define INT_MASK_CSR_PRE_TBTT		FIELD32(0x00001000)
-#define INT_MASK_CSR_TX_FIFO_STATUS	FIELD32(0x00002000)
-#define INT_MASK_CSR_AUTO_WAKEUP	FIELD32(0x00004000)
-#define INT_MASK_CSR_GPTIMER		FIELD32(0x00008000)
-#define INT_MASK_CSR_RX_COHERENT	FIELD32(0x00010000)
-#define INT_MASK_CSR_TX_COHERENT	FIELD32(0x00020000)
-
-/*
- * WPDMA_GLO_CFG
- */
-#define WPDMA_GLO_CFG 			0x0208
-#define WPDMA_GLO_CFG_ENABLE_TX_DMA	FIELD32(0x00000001)
-#define WPDMA_GLO_CFG_TX_DMA_BUSY    	FIELD32(0x00000002)
-#define WPDMA_GLO_CFG_ENABLE_RX_DMA	FIELD32(0x00000004)
-#define WPDMA_GLO_CFG_RX_DMA_BUSY	FIELD32(0x00000008)
-#define WPDMA_GLO_CFG_WP_DMA_BURST_SIZE	FIELD32(0x00000030)
-#define WPDMA_GLO_CFG_TX_WRITEBACK_DONE	FIELD32(0x00000040)
-#define WPDMA_GLO_CFG_BIG_ENDIAN	FIELD32(0x00000080)
-#define WPDMA_GLO_CFG_RX_HDR_SCATTER	FIELD32(0x0000ff00)
-#define WPDMA_GLO_CFG_HDR_SEG_LEN	FIELD32(0xffff0000)
-
-/*
- * WPDMA_RST_IDX
- */
-#define WPDMA_RST_IDX 			0x020c
-#define WPDMA_RST_IDX_DTX_IDX0		FIELD32(0x00000001)
-#define WPDMA_RST_IDX_DTX_IDX1		FIELD32(0x00000002)
-#define WPDMA_RST_IDX_DTX_IDX2		FIELD32(0x00000004)
-#define WPDMA_RST_IDX_DTX_IDX3		FIELD32(0x00000008)
-#define WPDMA_RST_IDX_DTX_IDX4		FIELD32(0x00000010)
-#define WPDMA_RST_IDX_DTX_IDX5		FIELD32(0x00000020)
-#define WPDMA_RST_IDX_DRX_IDX0		FIELD32(0x00010000)
-
-/*
- * DELAY_INT_CFG
- */
-#define DELAY_INT_CFG			0x0210
-#define DELAY_INT_CFG_RXMAX_PTIME	FIELD32(0x000000ff)
-#define DELAY_INT_CFG_RXMAX_PINT	FIELD32(0x00007f00)
-#define DELAY_INT_CFG_RXDLY_INT_EN	FIELD32(0x00008000)
-#define DELAY_INT_CFG_TXMAX_PTIME	FIELD32(0x00ff0000)
-#define DELAY_INT_CFG_TXMAX_PINT	FIELD32(0x7f000000)
-#define DELAY_INT_CFG_TXDLY_INT_EN	FIELD32(0x80000000)
-
-/*
- * WMM_AIFSN_CFG: Aifsn for each EDCA AC
- * AIFSN0: AC_BE
- * AIFSN1: AC_BK
- * AIFSN1: AC_VI
- * AIFSN1: AC_VO
- */
-#define WMM_AIFSN_CFG			0x0214
-#define WMM_AIFSN_CFG_AIFSN0		FIELD32(0x0000000f)
-#define WMM_AIFSN_CFG_AIFSN1		FIELD32(0x000000f0)
-#define WMM_AIFSN_CFG_AIFSN2		FIELD32(0x00000f00)
-#define WMM_AIFSN_CFG_AIFSN3		FIELD32(0x0000f000)
-
-/*
- * WMM_CWMIN_CSR: CWmin for each EDCA AC
- * CWMIN0: AC_BE
- * CWMIN1: AC_BK
- * CWMIN1: AC_VI
- * CWMIN1: AC_VO
- */
-#define WMM_CWMIN_CFG			0x0218
-#define WMM_CWMIN_CFG_CWMIN0		FIELD32(0x0000000f)
-#define WMM_CWMIN_CFG_CWMIN1		FIELD32(0x000000f0)
-#define WMM_CWMIN_CFG_CWMIN2		FIELD32(0x00000f00)
-#define WMM_CWMIN_CFG_CWMIN3		FIELD32(0x0000f000)
-
-/*
- * WMM_CWMAX_CSR: CWmax for each EDCA AC
- * CWMAX0: AC_BE
- * CWMAX1: AC_BK
- * CWMAX1: AC_VI
- * CWMAX1: AC_VO
- */
-#define WMM_CWMAX_CFG			0x021c
-#define WMM_CWMAX_CFG_CWMAX0		FIELD32(0x0000000f)
-#define WMM_CWMAX_CFG_CWMAX1		FIELD32(0x000000f0)
-#define WMM_CWMAX_CFG_CWMAX2		FIELD32(0x00000f00)
-#define WMM_CWMAX_CFG_CWMAX3		FIELD32(0x0000f000)
-
-/*
- * AC_TXOP0: AC_BK/AC_BE TXOP register
- * AC0TXOP: AC_BK in unit of 32us
- * AC1TXOP: AC_BE in unit of 32us
- */
-#define WMM_TXOP0_CFG			0x0220
-#define WMM_TXOP0_CFG_AC0TXOP		FIELD32(0x0000ffff)
-#define WMM_TXOP0_CFG_AC1TXOP		FIELD32(0xffff0000)
-
-/*
- * AC_TXOP1: AC_VO/AC_VI TXOP register
- * AC2TXOP: AC_VI in unit of 32us
- * AC3TXOP: AC_VO in unit of 32us
- */
-#define WMM_TXOP1_CFG			0x0224
-#define WMM_TXOP1_CFG_AC2TXOP		FIELD32(0x0000ffff)
-#define WMM_TXOP1_CFG_AC3TXOP		FIELD32(0xffff0000)
-
-/*
- * GPIO_CTRL_CFG:
- */
-#define GPIO_CTRL_CFG			0x0228
-#define GPIO_CTRL_CFG_BIT0		FIELD32(0x00000001)
-#define GPIO_CTRL_CFG_BIT1		FIELD32(0x00000002)
-#define GPIO_CTRL_CFG_BIT2		FIELD32(0x00000004)
-#define GPIO_CTRL_CFG_BIT3		FIELD32(0x00000008)
-#define GPIO_CTRL_CFG_BIT4		FIELD32(0x00000010)
-#define GPIO_CTRL_CFG_BIT5		FIELD32(0x00000020)
-#define GPIO_CTRL_CFG_BIT6		FIELD32(0x00000040)
-#define GPIO_CTRL_CFG_BIT7		FIELD32(0x00000080)
-#define GPIO_CTRL_CFG_BIT8		FIELD32(0x00000100)
-
-/*
- * MCU_CMD_CFG
- */
-#define MCU_CMD_CFG			0x022c
-
-/*
- * AC_BK register offsets
- */
-#define TX_BASE_PTR0			0x0230
-#define TX_MAX_CNT0			0x0234
-#define TX_CTX_IDX0			0x0238
-#define TX_DTX_IDX0			0x023c
-
-/*
- * AC_BE register offsets
- */
-#define TX_BASE_PTR1			0x0240
-#define TX_MAX_CNT1			0x0244
-#define TX_CTX_IDX1			0x0248
-#define TX_DTX_IDX1			0x024c
-
-/*
- * AC_VI register offsets
- */
-#define TX_BASE_PTR2			0x0250
-#define TX_MAX_CNT2			0x0254
-#define TX_CTX_IDX2			0x0258
-#define TX_DTX_IDX2			0x025c
-
-/*
- * AC_VO register offsets
- */
-#define TX_BASE_PTR3			0x0260
-#define TX_MAX_CNT3			0x0264
-#define TX_CTX_IDX3			0x0268
-#define TX_DTX_IDX3			0x026c
-
-/*
- * HCCA register offsets
- */
-#define TX_BASE_PTR4			0x0270
-#define TX_MAX_CNT4			0x0274
-#define TX_CTX_IDX4			0x0278
-#define TX_DTX_IDX4			0x027c
-
-/*
- * MGMT register offsets
- */
-#define TX_BASE_PTR5			0x0280
-#define TX_MAX_CNT5			0x0284
-#define TX_CTX_IDX5			0x0288
-#define TX_DTX_IDX5			0x028c
-
-/*
- * RX register offsets
- */
-#define RX_BASE_PTR			0x0290
-#define RX_MAX_CNT			0x0294
-#define RX_CRX_IDX			0x0298
-#define RX_DRX_IDX			0x029c
-
-/*
  * USB_DMA_CFG
  * RX_BULK_AGG_TIMEOUT: Rx Bulk Aggregation TimeOut in unit of 33ns.
  * RX_BULK_AGG_LIMIT: Rx Bulk Aggregation Limit in unit of 256 bytes.
@@ -343,1448 +69,16 @@
 #define USB_CYC_CFG_CLOCK_CYCLE		FIELD32(0x000000ff)
 
 /*
- * PBF_SYS_CTRL
- * HOST_RAM_WRITE: enable Host program ram write selection
- */
-#define PBF_SYS_CTRL			0x0400
-#define PBF_SYS_CTRL_READY		FIELD32(0x00000080)
-#define PBF_SYS_CTRL_HOST_RAM_WRITE	FIELD32(0x00010000)
-
-/*
- * PBF registers
- * Most are for debug. Driver doesn't touch PBF register.
- */
-#define PBF_CFG				0x0408
-#define PBF_MAX_PCNT			0x040c
-#define PBF_CTRL			0x0410
-#define PBF_INT_STA			0x0414
-#define PBF_INT_ENA			0x0418
-
-/*
- * BCN_OFFSET0:
- */
-#define BCN_OFFSET0			0x042c
-#define BCN_OFFSET0_BCN0		FIELD32(0x000000ff)
-#define BCN_OFFSET0_BCN1		FIELD32(0x0000ff00)
-#define BCN_OFFSET0_BCN2		FIELD32(0x00ff0000)
-#define BCN_OFFSET0_BCN3		FIELD32(0xff000000)
-
-/*
- * BCN_OFFSET1:
- */
-#define BCN_OFFSET1			0x0430
-#define BCN_OFFSET1_BCN4		FIELD32(0x000000ff)
-#define BCN_OFFSET1_BCN5		FIELD32(0x0000ff00)
-#define BCN_OFFSET1_BCN6		FIELD32(0x00ff0000)
-#define BCN_OFFSET1_BCN7		FIELD32(0xff000000)
-
-/*
- * PBF registers
- * Most are for debug. Driver doesn't touch PBF register.
- */
-#define TXRXQ_PCNT			0x0438
-#define PBF_DBG				0x043c
-
-/*
- * RF registers
- */
-#define	RF_CSR_CFG			0x0500
-#define RF_CSR_CFG_DATA			FIELD32(0x000000ff)
-#define RF_CSR_CFG_REGNUM		FIELD32(0x00001f00)
-#define RF_CSR_CFG_WRITE		FIELD32(0x00010000)
-#define RF_CSR_CFG_BUSY			FIELD32(0x00020000)
-
-/*
- * MAC Control/Status Registers(CSR).
- * Some values are set in TU, whereas 1 TU == 1024 us.
- */
-
-/*
- * MAC_CSR0: ASIC revision number.
- * ASIC_REV: 0
- * ASIC_VER: 2870
- */
-#define MAC_CSR0			0x1000
-#define MAC_CSR0_ASIC_REV		FIELD32(0x0000ffff)
-#define MAC_CSR0_ASIC_VER		FIELD32(0xffff0000)
-
-/*
- * MAC_SYS_CTRL:
- */
-#define MAC_SYS_CTRL			0x1004
-#define MAC_SYS_CTRL_RESET_CSR		FIELD32(0x00000001)
-#define MAC_SYS_CTRL_RESET_BBP		FIELD32(0x00000002)
-#define MAC_SYS_CTRL_ENABLE_TX		FIELD32(0x00000004)
-#define MAC_SYS_CTRL_ENABLE_RX		FIELD32(0x00000008)
-#define MAC_SYS_CTRL_CONTINUOUS_TX	FIELD32(0x00000010)
-#define MAC_SYS_CTRL_LOOPBACK		FIELD32(0x00000020)
-#define MAC_SYS_CTRL_WLAN_HALT		FIELD32(0x00000040)
-#define MAC_SYS_CTRL_RX_TIMESTAMP	FIELD32(0x00000080)
-
-/*
- * MAC_ADDR_DW0: STA MAC register 0
- */
-#define MAC_ADDR_DW0			0x1008
-#define MAC_ADDR_DW0_BYTE0		FIELD32(0x000000ff)
-#define MAC_ADDR_DW0_BYTE1		FIELD32(0x0000ff00)
-#define MAC_ADDR_DW0_BYTE2		FIELD32(0x00ff0000)
-#define MAC_ADDR_DW0_BYTE3		FIELD32(0xff000000)
-
-/*
- * MAC_ADDR_DW1: STA MAC register 1
- * UNICAST_TO_ME_MASK:
- * Used to mask off bits from byte 5 of the MAC address
- * to determine the UNICAST_TO_ME bit for RX frames.
- * The full mask is complemented by BSS_ID_MASK:
- *    MASK = BSS_ID_MASK & UNICAST_TO_ME_MASK
- */
-#define MAC_ADDR_DW1			0x100c
-#define MAC_ADDR_DW1_BYTE4		FIELD32(0x000000ff)
-#define MAC_ADDR_DW1_BYTE5		FIELD32(0x0000ff00)
-#define MAC_ADDR_DW1_UNICAST_TO_ME_MASK	FIELD32(0x00ff0000)
-
-/*
- * MAC_BSSID_DW0: BSSID register 0
- */
-#define MAC_BSSID_DW0			0x1010
-#define MAC_BSSID_DW0_BYTE0		FIELD32(0x000000ff)
-#define MAC_BSSID_DW0_BYTE1		FIELD32(0x0000ff00)
-#define MAC_BSSID_DW0_BYTE2		FIELD32(0x00ff0000)
-#define MAC_BSSID_DW0_BYTE3		FIELD32(0xff000000)
-
-/*
- * MAC_BSSID_DW1: BSSID register 1
- * BSS_ID_MASK:
- *     0: 1-BSSID mode (BSS index = 0)
- *     1: 2-BSSID mode (BSS index: Byte5, bit 0)
- *     2: 4-BSSID mode (BSS index: byte5, bit 0 - 1)
- *     3: 8-BSSID mode (BSS index: byte5, bit 0 - 2)
- * This mask is used to mask off bits 0, 1 and 2 of byte 5 of the
- * BSSID. This will make sure that those bits will be ignored
- * when determining the MY_BSS of RX frames.
- */
-#define MAC_BSSID_DW1			0x1014
-#define MAC_BSSID_DW1_BYTE4		FIELD32(0x000000ff)
-#define MAC_BSSID_DW1_BYTE5		FIELD32(0x0000ff00)
-#define MAC_BSSID_DW1_BSS_ID_MASK	FIELD32(0x00030000)
-#define MAC_BSSID_DW1_BSS_BCN_NUM	FIELD32(0x001c0000)
-
-/*
- * MAX_LEN_CFG: Maximum frame length register.
- * MAX_MPDU: rt2860b max 16k bytes
- * MAX_PSDU: Maximum PSDU length
- *	(power factor) 0:2^13, 1:2^14, 2:2^15, 3:2^16
- */
-#define MAX_LEN_CFG			0x1018
-#define MAX_LEN_CFG_MAX_MPDU		FIELD32(0x00000fff)
-#define MAX_LEN_CFG_MAX_PSDU		FIELD32(0x00003000)
-#define MAX_LEN_CFG_MIN_PSDU		FIELD32(0x0000c000)
-#define MAX_LEN_CFG_MIN_MPDU		FIELD32(0x000f0000)
-
-/*
- * BBP_CSR_CFG: BBP serial control register
- * VALUE: Register value to program into BBP
- * REG_NUM: Selected BBP register
- * READ_CONTROL: 0 write BBP, 1 read BBP
- * BUSY: ASIC is busy executing BBP commands
- * BBP_PAR_DUR: 0 4 MAC clocks, 1 8 MAC clocks
- * BBP_RW_MODE: 0 serial, 1 paralell
- */
-#define BBP_CSR_CFG			0x101c
-#define BBP_CSR_CFG_VALUE		FIELD32(0x000000ff)
-#define BBP_CSR_CFG_REGNUM		FIELD32(0x0000ff00)
-#define BBP_CSR_CFG_READ_CONTROL	FIELD32(0x00010000)
-#define BBP_CSR_CFG_BUSY		FIELD32(0x00020000)
-#define BBP_CSR_CFG_BBP_PAR_DUR		FIELD32(0x00040000)
-#define BBP_CSR_CFG_BBP_RW_MODE		FIELD32(0x00080000)
-
-/*
- * RF_CSR_CFG0: RF control register
- * REGID_AND_VALUE: Register value to program into RF
- * BITWIDTH: Selected RF register
- * STANDBYMODE: 0 high when standby, 1 low when standby
- * SEL: 0 RF_LE0 activate, 1 RF_LE1 activate
- * BUSY: ASIC is busy executing RF commands
- */
-#define RF_CSR_CFG0			0x1020
-#define RF_CSR_CFG0_REGID_AND_VALUE	FIELD32(0x00ffffff)
-#define RF_CSR_CFG0_BITWIDTH		FIELD32(0x1f000000)
-#define RF_CSR_CFG0_REG_VALUE_BW	FIELD32(0x1fffffff)
-#define RF_CSR_CFG0_STANDBYMODE		FIELD32(0x20000000)
-#define RF_CSR_CFG0_SEL			FIELD32(0x40000000)
-#define RF_CSR_CFG0_BUSY		FIELD32(0x80000000)
-
-/*
- * RF_CSR_CFG1: RF control register
- * REGID_AND_VALUE: Register value to program into RF
- * RFGAP: Gap between BB_CONTROL_RF and RF_LE
- *        0: 3 system clock cycle (37.5usec)
- *        1: 5 system clock cycle (62.5usec)
- */
-#define RF_CSR_CFG1			0x1024
-#define RF_CSR_CFG1_REGID_AND_VALUE	FIELD32(0x00ffffff)
-#define RF_CSR_CFG1_RFGAP		FIELD32(0x1f000000)
-
-/*
- * RF_CSR_CFG2: RF control register
- * VALUE: Register value to program into RF
- * RFGAP: Gap between BB_CONTROL_RF and RF_LE
- *        0: 3 system clock cycle (37.5usec)
- *        1: 5 system clock cycle (62.5usec)
- */
-#define RF_CSR_CFG2			0x1028
-#define RF_CSR_CFG2_VALUE		FIELD32(0x00ffffff)
-
-/*
- * LED_CFG: LED control
- * color LED's:
- *   0: off
- *   1: blinking upon TX2
- *   2: periodic slow blinking
- *   3: always on
- * LED polarity:
- *   0: active low
- *   1: active high
- */
-#define LED_CFG				0x102c
-#define LED_CFG_ON_PERIOD		FIELD32(0x000000ff)
-#define LED_CFG_OFF_PERIOD		FIELD32(0x0000ff00)
-#define LED_CFG_SLOW_BLINK_PERIOD	FIELD32(0x003f0000)
-#define LED_CFG_R_LED_MODE		FIELD32(0x03000000)
-#define LED_CFG_G_LED_MODE		FIELD32(0x0c000000)
-#define LED_CFG_Y_LED_MODE		FIELD32(0x30000000)
-#define LED_CFG_LED_POLAR		FIELD32(0x40000000)
-
-/*
- * XIFS_TIME_CFG: MAC timing
- * CCKM_SIFS_TIME: unit 1us. Applied after CCK RX/TX
- * OFDM_SIFS_TIME: unit 1us. Applied after OFDM RX/TX
- * OFDM_XIFS_TIME: unit 1us. Applied after OFDM RX
- *	when MAC doesn't reference BBP signal BBRXEND
- * EIFS: unit 1us
- * BB_RXEND_ENABLE: reference RXEND signal to begin XIFS defer
- *
- */
-#define XIFS_TIME_CFG			0x1100
-#define XIFS_TIME_CFG_CCKM_SIFS_TIME	FIELD32(0x000000ff)
-#define XIFS_TIME_CFG_OFDM_SIFS_TIME	FIELD32(0x0000ff00)
-#define XIFS_TIME_CFG_OFDM_XIFS_TIME	FIELD32(0x000f0000)
-#define XIFS_TIME_CFG_EIFS		FIELD32(0x1ff00000)
-#define XIFS_TIME_CFG_BB_RXEND_ENABLE	FIELD32(0x20000000)
-
-/*
- * BKOFF_SLOT_CFG:
- */
-#define BKOFF_SLOT_CFG			0x1104
-#define BKOFF_SLOT_CFG_SLOT_TIME	FIELD32(0x000000ff)
-#define BKOFF_SLOT_CFG_CC_DELAY_TIME	FIELD32(0x0000ff00)
-
-/*
- * NAV_TIME_CFG:
- */
-#define NAV_TIME_CFG			0x1108
-#define NAV_TIME_CFG_SIFS		FIELD32(0x000000ff)
-#define NAV_TIME_CFG_SLOT_TIME		FIELD32(0x0000ff00)
-#define NAV_TIME_CFG_EIFS		FIELD32(0x01ff0000)
-#define NAV_TIME_ZERO_SIFS		FIELD32(0x02000000)
-
-/*
- * CH_TIME_CFG: count as channel busy
- */
-#define CH_TIME_CFG     	        0x110c
-
-/*
- * PBF_LIFE_TIMER: TX/RX MPDU timestamp timer (free run) Unit: 1us
- */
-#define PBF_LIFE_TIMER     	        0x1110
-
-/*
- * BCN_TIME_CFG:
- * BEACON_INTERVAL: in unit of 1/16 TU
- * TSF_TICKING: Enable TSF auto counting
- * TSF_SYNC: Enable TSF sync, 00: disable, 01: infra mode, 10: ad-hoc mode
- * BEACON_GEN: Enable beacon generator
- */
-#define BCN_TIME_CFG			0x1114
-#define BCN_TIME_CFG_BEACON_INTERVAL	FIELD32(0x0000ffff)
-#define BCN_TIME_CFG_TSF_TICKING	FIELD32(0x00010000)
-#define BCN_TIME_CFG_TSF_SYNC		FIELD32(0x00060000)
-#define BCN_TIME_CFG_TBTT_ENABLE	FIELD32(0x00080000)
-#define BCN_TIME_CFG_BEACON_GEN		FIELD32(0x00100000)
-#define BCN_TIME_CFG_TX_TIME_COMPENSATE	FIELD32(0xf0000000)
-
-/*
- * TBTT_SYNC_CFG:
- */
-#define TBTT_SYNC_CFG			0x1118
-
-/*
- * TSF_TIMER_DW0: Local lsb TSF timer, read-only
- */
-#define TSF_TIMER_DW0			0x111c
-#define TSF_TIMER_DW0_LOW_WORD		FIELD32(0xffffffff)
-
-/*
- * TSF_TIMER_DW1: Local msb TSF timer, read-only
- */
-#define TSF_TIMER_DW1			0x1120
-#define TSF_TIMER_DW1_HIGH_WORD		FIELD32(0xffffffff)
-
-/*
- * TBTT_TIMER: TImer remains till next TBTT, read-only
- */
-#define TBTT_TIMER			0x1124
-
-/*
- * INT_TIMER_CFG:
- */
-#define INT_TIMER_CFG			0x1128
-
-/*
- * INT_TIMER_EN: GP-timer and pre-tbtt Int enable
- */
-#define INT_TIMER_EN			0x112c
-
-/*
- * CH_IDLE_STA: channel idle time
- */
-#define CH_IDLE_STA			0x1130
-
-/*
- * CH_BUSY_STA: channel busy time
- */
-#define CH_BUSY_STA			0x1134
-
-/*
- * MAC_STATUS_CFG:
- * BBP_RF_BUSY: When set to 0, BBP and RF are stable.
- *	if 1 or higher one of the 2 registers is busy.
- */
-#define MAC_STATUS_CFG			0x1200
-#define MAC_STATUS_CFG_BBP_RF_BUSY	FIELD32(0x00000003)
-
-/*
- * PWR_PIN_CFG:
- */
-#define PWR_PIN_CFG			0x1204
-
-/*
- * AUTOWAKEUP_CFG: Manual power control / status register
- * TBCN_BEFORE_WAKE: ForceWake has high privilege than PutToSleep when both set
- * AUTOWAKE: 0:sleep, 1:awake
- */
-#define AUTOWAKEUP_CFG			0x1208
-#define AUTOWAKEUP_CFG_AUTO_LEAD_TIME	FIELD32(0x000000ff)
-#define AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE	FIELD32(0x00007f00)
-#define AUTOWAKEUP_CFG_AUTOWAKE		FIELD32(0x00008000)
-
-/*
- * EDCA_AC0_CFG:
- */
-#define EDCA_AC0_CFG			0x1300
-#define EDCA_AC0_CFG_TX_OP		FIELD32(0x000000ff)
-#define EDCA_AC0_CFG_AIFSN		FIELD32(0x00000f00)
-#define EDCA_AC0_CFG_CWMIN		FIELD32(0x0000f000)
-#define EDCA_AC0_CFG_CWMAX		FIELD32(0x000f0000)
-
-/*
- * EDCA_AC1_CFG:
- */
-#define EDCA_AC1_CFG			0x1304
-#define EDCA_AC1_CFG_TX_OP		FIELD32(0x000000ff)
-#define EDCA_AC1_CFG_AIFSN		FIELD32(0x00000f00)
-#define EDCA_AC1_CFG_CWMIN		FIELD32(0x0000f000)
-#define EDCA_AC1_CFG_CWMAX		FIELD32(0x000f0000)
-
-/*
- * EDCA_AC2_CFG:
- */
-#define EDCA_AC2_CFG			0x1308
-#define EDCA_AC2_CFG_TX_OP		FIELD32(0x000000ff)
-#define EDCA_AC2_CFG_AIFSN		FIELD32(0x00000f00)
-#define EDCA_AC2_CFG_CWMIN		FIELD32(0x0000f000)
-#define EDCA_AC2_CFG_CWMAX		FIELD32(0x000f0000)
-
-/*
- * EDCA_AC3_CFG:
- */
-#define EDCA_AC3_CFG			0x130c
-#define EDCA_AC3_CFG_TX_OP		FIELD32(0x000000ff)
-#define EDCA_AC3_CFG_AIFSN		FIELD32(0x00000f00)
-#define EDCA_AC3_CFG_CWMIN		FIELD32(0x0000f000)
-#define EDCA_AC3_CFG_CWMAX		FIELD32(0x000f0000)
-
-/*
- * EDCA_TID_AC_MAP:
- */
-#define EDCA_TID_AC_MAP			0x1310
-
-/*
- * TX_PWR_CFG_0:
- */
-#define TX_PWR_CFG_0			0x1314
-#define TX_PWR_CFG_0_1MBS		FIELD32(0x0000000f)
-#define TX_PWR_CFG_0_2MBS		FIELD32(0x000000f0)
-#define TX_PWR_CFG_0_55MBS		FIELD32(0x00000f00)
-#define TX_PWR_CFG_0_11MBS		FIELD32(0x0000f000)
-#define TX_PWR_CFG_0_6MBS		FIELD32(0x000f0000)
-#define TX_PWR_CFG_0_9MBS		FIELD32(0x00f00000)
-#define TX_PWR_CFG_0_12MBS		FIELD32(0x0f000000)
-#define TX_PWR_CFG_0_18MBS		FIELD32(0xf0000000)
-
-/*
- * TX_PWR_CFG_1:
- */
-#define TX_PWR_CFG_1			0x1318
-#define TX_PWR_CFG_1_24MBS		FIELD32(0x0000000f)
-#define TX_PWR_CFG_1_36MBS		FIELD32(0x000000f0)
-#define TX_PWR_CFG_1_48MBS		FIELD32(0x00000f00)
-#define TX_PWR_CFG_1_54MBS		FIELD32(0x0000f000)
-#define TX_PWR_CFG_1_MCS0		FIELD32(0x000f0000)
-#define TX_PWR_CFG_1_MCS1		FIELD32(0x00f00000)
-#define TX_PWR_CFG_1_MCS2		FIELD32(0x0f000000)
-#define TX_PWR_CFG_1_MCS3		FIELD32(0xf0000000)
-
-/*
- * TX_PWR_CFG_2:
- */
-#define TX_PWR_CFG_2			0x131c
-#define TX_PWR_CFG_2_MCS4		FIELD32(0x0000000f)
-#define TX_PWR_CFG_2_MCS5		FIELD32(0x000000f0)
-#define TX_PWR_CFG_2_MCS6		FIELD32(0x00000f00)
-#define TX_PWR_CFG_2_MCS7		FIELD32(0x0000f000)
-#define TX_PWR_CFG_2_MCS8		FIELD32(0x000f0000)
-#define TX_PWR_CFG_2_MCS9		FIELD32(0x00f00000)
-#define TX_PWR_CFG_2_MCS10		FIELD32(0x0f000000)
-#define TX_PWR_CFG_2_MCS11		FIELD32(0xf0000000)
-
-/*
- * TX_PWR_CFG_3:
- */
-#define TX_PWR_CFG_3			0x1320
-#define TX_PWR_CFG_3_MCS12		FIELD32(0x0000000f)
-#define TX_PWR_CFG_3_MCS13		FIELD32(0x000000f0)
-#define TX_PWR_CFG_3_MCS14		FIELD32(0x00000f00)
-#define TX_PWR_CFG_3_MCS15		FIELD32(0x0000f000)
-#define TX_PWR_CFG_3_UKNOWN1		FIELD32(0x000f0000)
-#define TX_PWR_CFG_3_UKNOWN2		FIELD32(0x00f00000)
-#define TX_PWR_CFG_3_UKNOWN3		FIELD32(0x0f000000)
-#define TX_PWR_CFG_3_UKNOWN4		FIELD32(0xf0000000)
-
-/*
- * TX_PWR_CFG_4:
- */
-#define TX_PWR_CFG_4			0x1324
-#define TX_PWR_CFG_4_UKNOWN5		FIELD32(0x0000000f)
-#define TX_PWR_CFG_4_UKNOWN6		FIELD32(0x000000f0)
-#define TX_PWR_CFG_4_UKNOWN7		FIELD32(0x00000f00)
-#define TX_PWR_CFG_4_UKNOWN8		FIELD32(0x0000f000)
-
-/*
- * TX_PIN_CFG:
- */
-#define TX_PIN_CFG			0x1328
-#define TX_PIN_CFG_PA_PE_A0_EN		FIELD32(0x00000001)
-#define TX_PIN_CFG_PA_PE_G0_EN		FIELD32(0x00000002)
-#define TX_PIN_CFG_PA_PE_A1_EN		FIELD32(0x00000004)
-#define TX_PIN_CFG_PA_PE_G1_EN		FIELD32(0x00000008)
-#define TX_PIN_CFG_PA_PE_A0_POL		FIELD32(0x00000010)
-#define TX_PIN_CFG_PA_PE_G0_POL		FIELD32(0x00000020)
-#define TX_PIN_CFG_PA_PE_A1_POL		FIELD32(0x00000040)
-#define TX_PIN_CFG_PA_PE_G1_POL		FIELD32(0x00000080)
-#define TX_PIN_CFG_LNA_PE_A0_EN		FIELD32(0x00000100)
-#define TX_PIN_CFG_LNA_PE_G0_EN		FIELD32(0x00000200)
-#define TX_PIN_CFG_LNA_PE_A1_EN		FIELD32(0x00000400)
-#define TX_PIN_CFG_LNA_PE_G1_EN		FIELD32(0x00000800)
-#define TX_PIN_CFG_LNA_PE_A0_POL	FIELD32(0x00001000)
-#define TX_PIN_CFG_LNA_PE_G0_POL	FIELD32(0x00002000)
-#define TX_PIN_CFG_LNA_PE_A1_POL	FIELD32(0x00004000)
-#define TX_PIN_CFG_LNA_PE_G1_POL	FIELD32(0x00008000)
-#define TX_PIN_CFG_RFTR_EN		FIELD32(0x00010000)
-#define TX_PIN_CFG_RFTR_POL		FIELD32(0x00020000)
-#define TX_PIN_CFG_TRSW_EN		FIELD32(0x00040000)
-#define TX_PIN_CFG_TRSW_POL		FIELD32(0x00080000)
-
-/*
- * TX_BAND_CFG: 0x1 use upper 20MHz, 0x0 use lower 20MHz
- */
-#define TX_BAND_CFG			0x132c
-#define TX_BAND_CFG_HT40_PLUS		FIELD32(0x00000001)
-#define TX_BAND_CFG_A			FIELD32(0x00000002)
-#define TX_BAND_CFG_BG			FIELD32(0x00000004)
-
-/*
- * TX_SW_CFG0:
- */
-#define TX_SW_CFG0			0x1330
-
-/*
- * TX_SW_CFG1:
- */
-#define TX_SW_CFG1			0x1334
-
-/*
- * TX_SW_CFG2:
- */
-#define TX_SW_CFG2			0x1338
-
-/*
- * TXOP_THRES_CFG:
- */
-#define TXOP_THRES_CFG			0x133c
-
-/*
- * TXOP_CTRL_CFG:
- */
-#define TXOP_CTRL_CFG			0x1340
-
-/*
- * TX_RTS_CFG:
- * RTS_THRES: unit:byte
- * RTS_FBK_EN: enable rts rate fallback
- */
-#define TX_RTS_CFG			0x1344
-#define TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT	FIELD32(0x000000ff)
-#define TX_RTS_CFG_RTS_THRES		FIELD32(0x00ffff00)
-#define TX_RTS_CFG_RTS_FBK_EN		FIELD32(0x01000000)
-
-/*
- * TX_TIMEOUT_CFG:
- * MPDU_LIFETIME: expiration time = 2^(9+MPDU LIFE TIME) us
- * RX_ACK_TIMEOUT: unit:slot. Used for TX procedure
- * TX_OP_TIMEOUT: TXOP timeout value for TXOP truncation.
- *                it is recommended that:
- *                (SLOT_TIME) > (TX_OP_TIMEOUT) > (RX_ACK_TIMEOUT)
- */
-#define TX_TIMEOUT_CFG			0x1348
-#define TX_TIMEOUT_CFG_MPDU_LIFETIME	FIELD32(0x000000f0)
-#define TX_TIMEOUT_CFG_RX_ACK_TIMEOUT	FIELD32(0x0000ff00)
-#define TX_TIMEOUT_CFG_TX_OP_TIMEOUT	FIELD32(0x00ff0000)
-
-/*
- * TX_RTY_CFG:
- * SHORT_RTY_LIMIT: short retry limit
- * LONG_RTY_LIMIT: long retry limit
- * LONG_RTY_THRE: Long retry threshoold
- * NON_AGG_RTY_MODE: Non-Aggregate MPDU retry mode
- *                   0:expired by retry limit, 1: expired by mpdu life timer
- * AGG_RTY_MODE: Aggregate MPDU retry mode
- *               0:expired by retry limit, 1: expired by mpdu life timer
- * TX_AUTO_FB_ENABLE: Tx retry PHY rate auto fallback enable
- */
-#define TX_RTY_CFG			0x134c
-#define TX_RTY_CFG_SHORT_RTY_LIMIT	FIELD32(0x000000ff)
-#define TX_RTY_CFG_LONG_RTY_LIMIT	FIELD32(0x0000ff00)
-#define TX_RTY_CFG_LONG_RTY_THRE	FIELD32(0x0fff0000)
-#define TX_RTY_CFG_NON_AGG_RTY_MODE	FIELD32(0x10000000)
-#define TX_RTY_CFG_AGG_RTY_MODE		FIELD32(0x20000000)
-#define TX_RTY_CFG_TX_AUTO_FB_ENABLE	FIELD32(0x40000000)
-
-/*
- * TX_LINK_CFG:
- * REMOTE_MFB_LIFETIME: remote MFB life time. unit: 32us
- * MFB_ENABLE: TX apply remote MFB 1:enable
- * REMOTE_UMFS_ENABLE: remote unsolicit  MFB enable
- *                     0: not apply remote remote unsolicit (MFS=7)
- * TX_MRQ_EN: MCS request TX enable
- * TX_RDG_EN: RDG TX enable
- * TX_CF_ACK_EN: Piggyback CF-ACK enable
- * REMOTE_MFB: remote MCS feedback
- * REMOTE_MFS: remote MCS feedback sequence number
- */
-#define TX_LINK_CFG			0x1350
-#define TX_LINK_CFG_REMOTE_MFB_LIFETIME	FIELD32(0x000000ff)
-#define TX_LINK_CFG_MFB_ENABLE		FIELD32(0x00000100)
-#define TX_LINK_CFG_REMOTE_UMFS_ENABLE	FIELD32(0x00000200)
-#define TX_LINK_CFG_TX_MRQ_EN		FIELD32(0x00000400)
-#define TX_LINK_CFG_TX_RDG_EN		FIELD32(0x00000800)
-#define TX_LINK_CFG_TX_CF_ACK_EN	FIELD32(0x00001000)
-#define TX_LINK_CFG_REMOTE_MFB		FIELD32(0x00ff0000)
-#define TX_LINK_CFG_REMOTE_MFS		FIELD32(0xff000000)
-
-/*
- * HT_FBK_CFG0:
- */
-#define HT_FBK_CFG0			0x1354
-#define HT_FBK_CFG0_HTMCS0FBK		FIELD32(0x0000000f)
-#define HT_FBK_CFG0_HTMCS1FBK		FIELD32(0x000000f0)
-#define HT_FBK_CFG0_HTMCS2FBK		FIELD32(0x00000f00)
-#define HT_FBK_CFG0_HTMCS3FBK		FIELD32(0x0000f000)
-#define HT_FBK_CFG0_HTMCS4FBK		FIELD32(0x000f0000)
-#define HT_FBK_CFG0_HTMCS5FBK		FIELD32(0x00f00000)
-#define HT_FBK_CFG0_HTMCS6FBK		FIELD32(0x0f000000)
-#define HT_FBK_CFG0_HTMCS7FBK		FIELD32(0xf0000000)
-
-/*
- * HT_FBK_CFG1:
- */
-#define HT_FBK_CFG1			0x1358
-#define HT_FBK_CFG1_HTMCS8FBK		FIELD32(0x0000000f)
-#define HT_FBK_CFG1_HTMCS9FBK		FIELD32(0x000000f0)
-#define HT_FBK_CFG1_HTMCS10FBK		FIELD32(0x00000f00)
-#define HT_FBK_CFG1_HTMCS11FBK		FIELD32(0x0000f000)
-#define HT_FBK_CFG1_HTMCS12FBK		FIELD32(0x000f0000)
-#define HT_FBK_CFG1_HTMCS13FBK		FIELD32(0x00f00000)
-#define HT_FBK_CFG1_HTMCS14FBK		FIELD32(0x0f000000)
-#define HT_FBK_CFG1_HTMCS15FBK		FIELD32(0xf0000000)
-
-/*
- * LG_FBK_CFG0:
- */
-#define LG_FBK_CFG0			0x135c
-#define LG_FBK_CFG0_OFDMMCS0FBK		FIELD32(0x0000000f)
-#define LG_FBK_CFG0_OFDMMCS1FBK		FIELD32(0x000000f0)
-#define LG_FBK_CFG0_OFDMMCS2FBK		FIELD32(0x00000f00)
-#define LG_FBK_CFG0_OFDMMCS3FBK		FIELD32(0x0000f000)
-#define LG_FBK_CFG0_OFDMMCS4FBK		FIELD32(0x000f0000)
-#define LG_FBK_CFG0_OFDMMCS5FBK		FIELD32(0x00f00000)
-#define LG_FBK_CFG0_OFDMMCS6FBK		FIELD32(0x0f000000)
-#define LG_FBK_CFG0_OFDMMCS7FBK		FIELD32(0xf0000000)
-
-/*
- * LG_FBK_CFG1:
- */
-#define LG_FBK_CFG1			0x1360
-#define LG_FBK_CFG0_CCKMCS0FBK		FIELD32(0x0000000f)
-#define LG_FBK_CFG0_CCKMCS1FBK		FIELD32(0x000000f0)
-#define LG_FBK_CFG0_CCKMCS2FBK		FIELD32(0x00000f00)
-#define LG_FBK_CFG0_CCKMCS3FBK		FIELD32(0x0000f000)
-
-/*
- * CCK_PROT_CFG: CCK Protection
- * PROTECT_RATE: Protection control frame rate for CCK TX(RTS/CTS/CFEnd)
- * PROTECT_CTRL: Protection control frame type for CCK TX
- *               0:none, 1:RTS/CTS, 2:CTS-to-self
- * PROTECT_NAV: TXOP protection type for CCK TX
- *              0:none, 1:ShortNAVprotect, 2:LongNAVProtect
- * TX_OP_ALLOW_CCK: CCK TXOP allowance, 0:disallow
- * TX_OP_ALLOW_OFDM: CCK TXOP allowance, 0:disallow
- * TX_OP_ALLOW_MM20: CCK TXOP allowance, 0:disallow
- * TX_OP_ALLOW_MM40: CCK TXOP allowance, 0:disallow
- * TX_OP_ALLOW_GF20: CCK TXOP allowance, 0:disallow
- * TX_OP_ALLOW_GF40: CCK TXOP allowance, 0:disallow
- * RTS_TH_EN: RTS threshold enable on CCK TX
- */
-#define CCK_PROT_CFG			0x1364
-#define CCK_PROT_CFG_PROTECT_RATE	FIELD32(0x0000ffff)
-#define CCK_PROT_CFG_PROTECT_CTRL	FIELD32(0x00030000)
-#define CCK_PROT_CFG_PROTECT_NAV	FIELD32(0x000c0000)
-#define CCK_PROT_CFG_TX_OP_ALLOW_CCK	FIELD32(0x00100000)
-#define CCK_PROT_CFG_TX_OP_ALLOW_OFDM	FIELD32(0x00200000)
-#define CCK_PROT_CFG_TX_OP_ALLOW_MM20	FIELD32(0x00400000)
-#define CCK_PROT_CFG_TX_OP_ALLOW_MM40	FIELD32(0x00800000)
-#define CCK_PROT_CFG_TX_OP_ALLOW_GF20	FIELD32(0x01000000)
-#define CCK_PROT_CFG_TX_OP_ALLOW_GF40	FIELD32(0x02000000)
-#define CCK_PROT_CFG_RTS_TH_EN		FIELD32(0x04000000)
-
-/*
- * OFDM_PROT_CFG: OFDM Protection
- */
-#define OFDM_PROT_CFG			0x1368
-#define OFDM_PROT_CFG_PROTECT_RATE	FIELD32(0x0000ffff)
-#define OFDM_PROT_CFG_PROTECT_CTRL	FIELD32(0x00030000)
-#define OFDM_PROT_CFG_PROTECT_NAV	FIELD32(0x000c0000)
-#define OFDM_PROT_CFG_TX_OP_ALLOW_CCK	FIELD32(0x00100000)
-#define OFDM_PROT_CFG_TX_OP_ALLOW_OFDM	FIELD32(0x00200000)
-#define OFDM_PROT_CFG_TX_OP_ALLOW_MM20	FIELD32(0x00400000)
-#define OFDM_PROT_CFG_TX_OP_ALLOW_MM40	FIELD32(0x00800000)
-#define OFDM_PROT_CFG_TX_OP_ALLOW_GF20	FIELD32(0x01000000)
-#define OFDM_PROT_CFG_TX_OP_ALLOW_GF40	FIELD32(0x02000000)
-#define OFDM_PROT_CFG_RTS_TH_EN		FIELD32(0x04000000)
-
-/*
- * MM20_PROT_CFG: MM20 Protection
- */
-#define MM20_PROT_CFG			0x136c
-#define MM20_PROT_CFG_PROTECT_RATE	FIELD32(0x0000ffff)
-#define MM20_PROT_CFG_PROTECT_CTRL	FIELD32(0x00030000)
-#define MM20_PROT_CFG_PROTECT_NAV	FIELD32(0x000c0000)
-#define MM20_PROT_CFG_TX_OP_ALLOW_CCK	FIELD32(0x00100000)
-#define MM20_PROT_CFG_TX_OP_ALLOW_OFDM	FIELD32(0x00200000)
-#define MM20_PROT_CFG_TX_OP_ALLOW_MM20	FIELD32(0x00400000)
-#define MM20_PROT_CFG_TX_OP_ALLOW_MM40	FIELD32(0x00800000)
-#define MM20_PROT_CFG_TX_OP_ALLOW_GF20	FIELD32(0x01000000)
-#define MM20_PROT_CFG_TX_OP_ALLOW_GF40	FIELD32(0x02000000)
-#define MM20_PROT_CFG_RTS_TH_EN		FIELD32(0x04000000)
-
-/*
- * MM40_PROT_CFG: MM40 Protection
- */
-#define MM40_PROT_CFG			0x1370
-#define MM40_PROT_CFG_PROTECT_RATE	FIELD32(0x0000ffff)
-#define MM40_PROT_CFG_PROTECT_CTRL	FIELD32(0x00030000)
-#define MM40_PROT_CFG_PROTECT_NAV	FIELD32(0x000c0000)
-#define MM40_PROT_CFG_TX_OP_ALLOW_CCK	FIELD32(0x00100000)
-#define MM40_PROT_CFG_TX_OP_ALLOW_OFDM	FIELD32(0x00200000)
-#define MM40_PROT_CFG_TX_OP_ALLOW_MM20	FIELD32(0x00400000)
-#define MM40_PROT_CFG_TX_OP_ALLOW_MM40	FIELD32(0x00800000)
-#define MM40_PROT_CFG_TX_OP_ALLOW_GF20	FIELD32(0x01000000)
-#define MM40_PROT_CFG_TX_OP_ALLOW_GF40	FIELD32(0x02000000)
-#define MM40_PROT_CFG_RTS_TH_EN		FIELD32(0x04000000)
-
-/*
- * GF20_PROT_CFG: GF20 Protection
- */
-#define GF20_PROT_CFG			0x1374
-#define GF20_PROT_CFG_PROTECT_RATE	FIELD32(0x0000ffff)
-#define GF20_PROT_CFG_PROTECT_CTRL	FIELD32(0x00030000)
-#define GF20_PROT_CFG_PROTECT_NAV	FIELD32(0x000c0000)
-#define GF20_PROT_CFG_TX_OP_ALLOW_CCK	FIELD32(0x00100000)
-#define GF20_PROT_CFG_TX_OP_ALLOW_OFDM	FIELD32(0x00200000)
-#define GF20_PROT_CFG_TX_OP_ALLOW_MM20	FIELD32(0x00400000)
-#define GF20_PROT_CFG_TX_OP_ALLOW_MM40	FIELD32(0x00800000)
-#define GF20_PROT_CFG_TX_OP_ALLOW_GF20	FIELD32(0x01000000)
-#define GF20_PROT_CFG_TX_OP_ALLOW_GF40	FIELD32(0x02000000)
-#define GF20_PROT_CFG_RTS_TH_EN		FIELD32(0x04000000)
-
-/*
- * GF40_PROT_CFG: GF40 Protection
- */
-#define GF40_PROT_CFG			0x1378
-#define GF40_PROT_CFG_PROTECT_RATE	FIELD32(0x0000ffff)
-#define GF40_PROT_CFG_PROTECT_CTRL	FIELD32(0x00030000)
-#define GF40_PROT_CFG_PROTECT_NAV	FIELD32(0x000c0000)
-#define GF40_PROT_CFG_TX_OP_ALLOW_CCK	FIELD32(0x00100000)
-#define GF40_PROT_CFG_TX_OP_ALLOW_OFDM	FIELD32(0x00200000)
-#define GF40_PROT_CFG_TX_OP_ALLOW_MM20	FIELD32(0x00400000)
-#define GF40_PROT_CFG_TX_OP_ALLOW_MM40	FIELD32(0x00800000)
-#define GF40_PROT_CFG_TX_OP_ALLOW_GF20	FIELD32(0x01000000)
-#define GF40_PROT_CFG_TX_OP_ALLOW_GF40	FIELD32(0x02000000)
-#define GF40_PROT_CFG_RTS_TH_EN		FIELD32(0x04000000)
-
-/*
- * EXP_CTS_TIME:
- */
-#define EXP_CTS_TIME			0x137c
-
-/*
- * EXP_ACK_TIME:
- */
-#define EXP_ACK_TIME			0x1380
-
-/*
- * RX_FILTER_CFG: RX configuration register.
- */
-#define RX_FILTER_CFG			0x1400
-#define RX_FILTER_CFG_DROP_CRC_ERROR	FIELD32(0x00000001)
-#define RX_FILTER_CFG_DROP_PHY_ERROR	FIELD32(0x00000002)
-#define RX_FILTER_CFG_DROP_NOT_TO_ME	FIELD32(0x00000004)
-#define RX_FILTER_CFG_DROP_NOT_MY_BSSD	FIELD32(0x00000008)
-#define RX_FILTER_CFG_DROP_VER_ERROR	FIELD32(0x00000010)
-#define RX_FILTER_CFG_DROP_MULTICAST	FIELD32(0x00000020)
-#define RX_FILTER_CFG_DROP_BROADCAST	FIELD32(0x00000040)
-#define RX_FILTER_CFG_DROP_DUPLICATE	FIELD32(0x00000080)
-#define RX_FILTER_CFG_DROP_CF_END_ACK	FIELD32(0x00000100)
-#define RX_FILTER_CFG_DROP_CF_END	FIELD32(0x00000200)
-#define RX_FILTER_CFG_DROP_ACK		FIELD32(0x00000400)
-#define RX_FILTER_CFG_DROP_CTS		FIELD32(0x00000800)
-#define RX_FILTER_CFG_DROP_RTS		FIELD32(0x00001000)
-#define RX_FILTER_CFG_DROP_PSPOLL	FIELD32(0x00002000)
-#define RX_FILTER_CFG_DROP_BA		FIELD32(0x00004000)
-#define RX_FILTER_CFG_DROP_BAR		FIELD32(0x00008000)
-#define RX_FILTER_CFG_DROP_CNTL		FIELD32(0x00010000)
-
-/*
- * AUTO_RSP_CFG:
- * AUTORESPONDER: 0: disable, 1: enable
- * BAC_ACK_POLICY: 0:long, 1:short preamble
- * CTS_40_MMODE: Response CTS 40MHz duplicate mode
- * CTS_40_MREF: Response CTS 40MHz duplicate mode
- * AR_PREAMBLE: Auto responder preamble 0:long, 1:short preamble
- * DUAL_CTS_EN: Power bit value in control frame
- * ACK_CTS_PSM_BIT:Power bit value in control frame
- */
-#define AUTO_RSP_CFG			0x1404
-#define AUTO_RSP_CFG_AUTORESPONDER	FIELD32(0x00000001)
-#define AUTO_RSP_CFG_BAC_ACK_POLICY	FIELD32(0x00000002)
-#define AUTO_RSP_CFG_CTS_40_MMODE	FIELD32(0x00000004)
-#define AUTO_RSP_CFG_CTS_40_MREF	FIELD32(0x00000008)
-#define AUTO_RSP_CFG_AR_PREAMBLE	FIELD32(0x00000010)
-#define AUTO_RSP_CFG_DUAL_CTS_EN	FIELD32(0x00000040)
-#define AUTO_RSP_CFG_ACK_CTS_PSM_BIT	FIELD32(0x00000080)
-
-/*
- * LEGACY_BASIC_RATE:
- */
-#define LEGACY_BASIC_RATE		0x1408
-
-/*
- * HT_BASIC_RATE:
- */
-#define HT_BASIC_RATE			0x140c
-
-/*
- * HT_CTRL_CFG:
- */
-#define HT_CTRL_CFG			0x1410
-
-/*
- * SIFS_COST_CFG:
- */
-#define SIFS_COST_CFG			0x1414
-
-/*
- * RX_PARSER_CFG:
- * Set NAV for all received frames
- */
-#define RX_PARSER_CFG			0x1418
-
-/*
- * TX_SEC_CNT0:
- */
-#define TX_SEC_CNT0			0x1500
-
-/*
- * RX_SEC_CNT0:
- */
-#define RX_SEC_CNT0			0x1504
-
-/*
- * CCMP_FC_MUTE:
- */
-#define CCMP_FC_MUTE			0x1508
-
-/*
- * TXOP_HLDR_ADDR0:
- */
-#define TXOP_HLDR_ADDR0			0x1600
-
-/*
- * TXOP_HLDR_ADDR1:
- */
-#define TXOP_HLDR_ADDR1			0x1604
-
-/*
- * TXOP_HLDR_ET:
- */
-#define TXOP_HLDR_ET			0x1608
-
-/*
- * QOS_CFPOLL_RA_DW0:
- */
-#define QOS_CFPOLL_RA_DW0		0x160c
-
-/*
- * QOS_CFPOLL_RA_DW1:
- */
-#define QOS_CFPOLL_RA_DW1		0x1610
-
-/*
- * QOS_CFPOLL_QC:
- */
-#define QOS_CFPOLL_QC			0x1614
-
-/*
- * RX_STA_CNT0: RX PLCP error count & RX CRC error count
- */
-#define RX_STA_CNT0			0x1700
-#define RX_STA_CNT0_CRC_ERR		FIELD32(0x0000ffff)
-#define RX_STA_CNT0_PHY_ERR		FIELD32(0xffff0000)
-
-/*
- * RX_STA_CNT1: RX False CCA count & RX LONG frame count
- */
-#define RX_STA_CNT1			0x1704
-#define RX_STA_CNT1_FALSE_CCA		FIELD32(0x0000ffff)
-#define RX_STA_CNT1_PLCP_ERR		FIELD32(0xffff0000)
-
-/*
- * RX_STA_CNT2:
- */
-#define RX_STA_CNT2			0x1708
-#define RX_STA_CNT2_RX_DUPLI_COUNT	FIELD32(0x0000ffff)
-#define RX_STA_CNT2_RX_FIFO_OVERFLOW	FIELD32(0xffff0000)
-
-/*
- * TX_STA_CNT0: TX Beacon count
- */
-#define TX_STA_CNT0			0x170c
-#define TX_STA_CNT0_TX_FAIL_COUNT	FIELD32(0x0000ffff)
-#define TX_STA_CNT0_TX_BEACON_COUNT	FIELD32(0xffff0000)
-
-/*
- * TX_STA_CNT1: TX tx count
- */
-#define TX_STA_CNT1			0x1710
-#define TX_STA_CNT1_TX_SUCCESS		FIELD32(0x0000ffff)
-#define TX_STA_CNT1_TX_RETRANSMIT	FIELD32(0xffff0000)
-
-/*
- * TX_STA_CNT2: TX tx count
- */
-#define TX_STA_CNT2			0x1714
-#define TX_STA_CNT2_TX_ZERO_LEN_COUNT	FIELD32(0x0000ffff)
-#define TX_STA_CNT2_TX_UNDER_FLOW_COUNT	FIELD32(0xffff0000)
-
-/*
- * TX_STA_FIFO: TX Result for specific PID status fifo register
- */
-#define TX_STA_FIFO			0x1718
-#define TX_STA_FIFO_VALID		FIELD32(0x00000001)
-#define TX_STA_FIFO_PID_TYPE		FIELD32(0x0000001e)
-#define TX_STA_FIFO_TX_SUCCESS		FIELD32(0x00000020)
-#define TX_STA_FIFO_TX_AGGRE		FIELD32(0x00000040)
-#define TX_STA_FIFO_TX_ACK_REQUIRED	FIELD32(0x00000080)
-#define TX_STA_FIFO_WCID		FIELD32(0x0000ff00)
-#define TX_STA_FIFO_SUCCESS_RATE	FIELD32(0xffff0000)
-
-/*
- * TX_AGG_CNT: Debug counter
- */
-#define TX_AGG_CNT			0x171c
-#define TX_AGG_CNT_NON_AGG_TX_COUNT	FIELD32(0x0000ffff)
-#define TX_AGG_CNT_AGG_TX_COUNT		FIELD32(0xffff0000)
-
-/*
- * TX_AGG_CNT0:
- */
-#define TX_AGG_CNT0			0x1720
-#define TX_AGG_CNT0_AGG_SIZE_1_COUNT	FIELD32(0x0000ffff)
-#define TX_AGG_CNT0_AGG_SIZE_2_COUNT	FIELD32(0xffff0000)
-
-/*
- * TX_AGG_CNT1:
- */
-#define TX_AGG_CNT1			0x1724
-#define TX_AGG_CNT1_AGG_SIZE_3_COUNT	FIELD32(0x0000ffff)
-#define TX_AGG_CNT1_AGG_SIZE_4_COUNT	FIELD32(0xffff0000)
-
-/*
- * TX_AGG_CNT2:
- */
-#define TX_AGG_CNT2			0x1728
-#define TX_AGG_CNT2_AGG_SIZE_5_COUNT	FIELD32(0x0000ffff)
-#define TX_AGG_CNT2_AGG_SIZE_6_COUNT	FIELD32(0xffff0000)
-
-/*
- * TX_AGG_CNT3:
- */
-#define TX_AGG_CNT3			0x172c
-#define TX_AGG_CNT3_AGG_SIZE_7_COUNT	FIELD32(0x0000ffff)
-#define TX_AGG_CNT3_AGG_SIZE_8_COUNT	FIELD32(0xffff0000)
-
-/*
- * TX_AGG_CNT4:
- */
-#define TX_AGG_CNT4			0x1730
-#define TX_AGG_CNT4_AGG_SIZE_9_COUNT	FIELD32(0x0000ffff)
-#define TX_AGG_CNT4_AGG_SIZE_10_COUNT	FIELD32(0xffff0000)
-
-/*
- * TX_AGG_CNT5:
- */
-#define TX_AGG_CNT5			0x1734
-#define TX_AGG_CNT5_AGG_SIZE_11_COUNT	FIELD32(0x0000ffff)
-#define TX_AGG_CNT5_AGG_SIZE_12_COUNT	FIELD32(0xffff0000)
-
-/*
- * TX_AGG_CNT6:
- */
-#define TX_AGG_CNT6			0x1738
-#define TX_AGG_CNT6_AGG_SIZE_13_COUNT	FIELD32(0x0000ffff)
-#define TX_AGG_CNT6_AGG_SIZE_14_COUNT	FIELD32(0xffff0000)
-
-/*
- * TX_AGG_CNT7:
- */
-#define TX_AGG_CNT7			0x173c
-#define TX_AGG_CNT7_AGG_SIZE_15_COUNT	FIELD32(0x0000ffff)
-#define TX_AGG_CNT7_AGG_SIZE_16_COUNT	FIELD32(0xffff0000)
-
-/*
- * MPDU_DENSITY_CNT:
- * TX_ZERO_DEL: TX zero length delimiter count
- * RX_ZERO_DEL: RX zero length delimiter count
- */
-#define MPDU_DENSITY_CNT		0x1740
-#define MPDU_DENSITY_CNT_TX_ZERO_DEL	FIELD32(0x0000ffff)
-#define MPDU_DENSITY_CNT_RX_ZERO_DEL	FIELD32(0xffff0000)
-
-/*
- * Security key table memory.
- * MAC_WCID_BASE: 8-bytes (use only 6 bytes) * 256 entry
- * PAIRWISE_KEY_TABLE_BASE: 32-byte * 256 entry
- * MAC_IVEIV_TABLE_BASE: 8-byte * 256-entry
- * MAC_WCID_ATTRIBUTE_BASE: 4-byte * 256-entry
- * SHARED_KEY_TABLE_BASE: 32 bytes * 32-entry
- * SHARED_KEY_MODE_BASE: 4 bits * 32-entry
- */
-#define MAC_WCID_BASE			0x1800
-#define PAIRWISE_KEY_TABLE_BASE		0x4000
-#define MAC_IVEIV_TABLE_BASE		0x6000
-#define MAC_WCID_ATTRIBUTE_BASE		0x6800
-#define SHARED_KEY_TABLE_BASE		0x6c00
-#define SHARED_KEY_MODE_BASE		0x7000
-
-#define MAC_WCID_ENTRY(__idx) \
-	( MAC_WCID_BASE + ((__idx) * sizeof(struct mac_wcid_entry)) )
-#define PAIRWISE_KEY_ENTRY(__idx) \
-	( PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
-#define MAC_IVEIV_ENTRY(__idx) \
-	( MAC_IVEIV_TABLE_BASE + ((__idx) & sizeof(struct mac_iveiv_entry)) )
-#define MAC_WCID_ATTR_ENTRY(__idx) \
-	( MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)) )
-#define SHARED_KEY_ENTRY(__idx) \
-	( SHARED_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
-#define SHARED_KEY_MODE_ENTRY(__idx) \
-	( SHARED_KEY_MODE_BASE + ((__idx) * sizeof(u32)) )
-
-struct mac_wcid_entry {
-	u8 mac[6];
-	u8 reserved[2];
-} __attribute__ ((packed));
-
-struct hw_key_entry {
-	u8 key[16];
-	u8 tx_mic[8];
-	u8 rx_mic[8];
-} __attribute__ ((packed));
-
-struct mac_iveiv_entry {
-	u8 iv[8];
-} __attribute__ ((packed));
-
-/*
- * MAC_WCID_ATTRIBUTE:
- */
-#define MAC_WCID_ATTRIBUTE_KEYTAB	FIELD32(0x00000001)
-#define MAC_WCID_ATTRIBUTE_CIPHER	FIELD32(0x0000000e)
-#define MAC_WCID_ATTRIBUTE_BSS_IDX	FIELD32(0x00000070)
-#define MAC_WCID_ATTRIBUTE_RX_WIUDF	FIELD32(0x00000380)
-
-/*
- * SHARED_KEY_MODE:
- */
-#define SHARED_KEY_MODE_BSS0_KEY0	FIELD32(0x00000007)
-#define SHARED_KEY_MODE_BSS0_KEY1	FIELD32(0x00000070)
-#define SHARED_KEY_MODE_BSS0_KEY2	FIELD32(0x00000700)
-#define SHARED_KEY_MODE_BSS0_KEY3	FIELD32(0x00007000)
-#define SHARED_KEY_MODE_BSS1_KEY0	FIELD32(0x00070000)
-#define SHARED_KEY_MODE_BSS1_KEY1	FIELD32(0x00700000)
-#define SHARED_KEY_MODE_BSS1_KEY2	FIELD32(0x07000000)
-#define SHARED_KEY_MODE_BSS1_KEY3	FIELD32(0x70000000)
-
-/*
- * HOST-MCU communication
- */
-
-/*
- * H2M_MAILBOX_CSR: Host-to-MCU Mailbox.
- */
-#define H2M_MAILBOX_CSR			0x7010
-#define H2M_MAILBOX_CSR_ARG0		FIELD32(0x000000ff)
-#define H2M_MAILBOX_CSR_ARG1		FIELD32(0x0000ff00)
-#define H2M_MAILBOX_CSR_CMD_TOKEN	FIELD32(0x00ff0000)
-#define H2M_MAILBOX_CSR_OWNER		FIELD32(0xff000000)
-
-/*
- * H2M_MAILBOX_CID:
- */
-#define H2M_MAILBOX_CID			0x7014
-#define H2M_MAILBOX_CID_CMD0		FIELD32(0x000000ff)
-#define H2M_MAILBOX_CID_CMD1		FIELD32(0x0000ff00)
-#define H2M_MAILBOX_CID_CMD2		FIELD32(0x00ff0000)
-#define H2M_MAILBOX_CID_CMD3		FIELD32(0xff000000)
-
-/*
- * H2M_MAILBOX_STATUS:
- */
-#define H2M_MAILBOX_STATUS		0x701c
-
-/*
- * H2M_INT_SRC:
- */
-#define H2M_INT_SRC			0x7024
-
-/*
- * H2M_BBP_AGENT:
- */
-#define H2M_BBP_AGENT			0x7028
-
-/*
- * MCU_LEDCS: LED control for MCU Mailbox.
- */
-#define MCU_LEDCS_LED_MODE		FIELD8(0x1f)
-#define MCU_LEDCS_POLARITY		FIELD8(0x01)
-
-/*
- * HW_CS_CTS_BASE:
- * Carrier-sense CTS frame base address.
- * It's where mac stores carrier-sense frame for carrier-sense function.
- */
-#define HW_CS_CTS_BASE			0x7700
-
-/*
- * HW_DFS_CTS_BASE:
- * FS CTS frame base address. It's where mac stores CTS frame for DFS.
- */
-#define HW_DFS_CTS_BASE			0x7780
-
-/*
- * TXRX control registers - base address 0x3000
- */
-
-/*
- * TXRX_CSR1:
- * rt2860b  UNKNOWN reg use R/O Reg Addr 0x77d0 first..
- */
-#define TXRX_CSR1			0x77d0
-
-/*
- * HW_DEBUG_SETTING_BASE:
- * since NULL frame won't be that long (256 byte)
- * We steal 16 tail bytes to save debugging settings
- */
-#define HW_DEBUG_SETTING_BASE		0x77f0
-#define HW_DEBUG_SETTING_BASE2		0x7770
-
-/*
- * HW_BEACON_BASE
- * In order to support maximum 8 MBSS and its maximum length
- * is 512 bytes for each beacon
- * Three section discontinue memory segments will be used.
- * 1. The original region for BCN 0~3
- * 2. Extract memory from FCE table for BCN 4~5
- * 3. Extract memory from Pair-wise key table for BCN 6~7
- *    It occupied those memory of wcid 238~253 for BCN 6
- *    and wcid 222~237 for BCN 7
- *
- * IMPORTANT NOTE: Not sure why legacy driver does this,
- * but HW_BEACON_BASE7 is 0x0200 bytes below HW_BEACON_BASE6.
- */
-#define HW_BEACON_BASE0			0x7800
-#define HW_BEACON_BASE1			0x7a00
-#define HW_BEACON_BASE2			0x7c00
-#define HW_BEACON_BASE3			0x7e00
-#define HW_BEACON_BASE4			0x7200
-#define HW_BEACON_BASE5			0x7400
-#define HW_BEACON_BASE6			0x5dc0
-#define HW_BEACON_BASE7			0x5bc0
-
-#define HW_BEACON_OFFSET(__index) \
-	( ((__index) < 4) ? ( HW_BEACON_BASE0 + (__index * 0x0200) ) : \
-	  (((__index) < 6) ? ( HW_BEACON_BASE4 + ((__index - 4) * 0x0200) ) : \
-	  (HW_BEACON_BASE6 - ((__index - 6) * 0x0200))) )
-
-/*
  * 8051 firmware image.
  */
 #define FIRMWARE_RT2870			"rt2870.bin"
 #define FIRMWARE_IMAGE_BASE		0x3000
 
 /*
- * BBP registers.
- * The wordsize of the BBP is 8 bits.
- */
-
-/*
- * BBP 1: TX Antenna
- */
-#define BBP1_TX_POWER			FIELD8(0x07)
-#define BBP1_TX_ANTENNA			FIELD8(0x18)
-
-/*
- * BBP 3: RX Antenna
- */
-#define BBP3_RX_ANTENNA			FIELD8(0x18)
-#define BBP3_HT40_PLUS			FIELD8(0x20)
-
-/*
- * BBP 4: Bandwidth
- */
-#define BBP4_TX_BF			FIELD8(0x01)
-#define BBP4_BANDWIDTH			FIELD8(0x18)
-
-/*
- * RFCSR registers
- * The wordsize of the RFCSR is 8 bits.
- */
-
-/*
- * RFCSR 6:
- */
-#define RFCSR6_R			FIELD8(0x03)
-
-/*
- * RFCSR 7:
- */
-#define RFCSR7_RF_TUNING		FIELD8(0x01)
-
-/*
- * RFCSR 12:
- */
-#define RFCSR12_TX_POWER		FIELD8(0x1f)
-
-/*
- * RFCSR 22:
- */
-#define RFCSR22_BASEBAND_LOOPBACK	FIELD8(0x01)
-
-/*
- * RFCSR 23:
- */
-#define RFCSR23_FREQ_OFFSET		FIELD8(0x7f)
-
-/*
- * RFCSR 30:
- */
-#define RFCSR30_RF_CALIBRATION		FIELD8(0x80)
-
-/*
- * RF registers
- */
-
-/*
- * RF 2
- */
-#define RF2_ANTENNA_RX2			FIELD32(0x00000040)
-#define RF2_ANTENNA_TX1			FIELD32(0x00004000)
-#define RF2_ANTENNA_RX1			FIELD32(0x00020000)
-
-/*
- * RF 3
- */
-#define RF3_TXPOWER_G			FIELD32(0x00003e00)
-#define RF3_TXPOWER_A_7DBM_BOOST	FIELD32(0x00000200)
-#define RF3_TXPOWER_A			FIELD32(0x00003c00)
-
-/*
- * RF 4
- */
-#define RF4_TXPOWER_G			FIELD32(0x000007c0)
-#define RF4_TXPOWER_A_7DBM_BOOST	FIELD32(0x00000040)
-#define RF4_TXPOWER_A			FIELD32(0x00000780)
-#define RF4_FREQ_OFFSET			FIELD32(0x001f8000)
-#define RF4_HT40			FIELD32(0x00200000)
-
-/*
- * EEPROM content.
- * The wordsize of the EEPROM is 16 bits.
- */
-
-/*
- * EEPROM Version
- */
-#define EEPROM_VERSION			0x0001
-#define EEPROM_VERSION_FAE		FIELD16(0x00ff)
-#define EEPROM_VERSION_VERSION		FIELD16(0xff00)
-
-/*
- * HW MAC address.
- */
-#define EEPROM_MAC_ADDR_0		0x0002
-#define EEPROM_MAC_ADDR_BYTE0		FIELD16(0x00ff)
-#define EEPROM_MAC_ADDR_BYTE1		FIELD16(0xff00)
-#define EEPROM_MAC_ADDR_1		0x0003
-#define EEPROM_MAC_ADDR_BYTE2		FIELD16(0x00ff)
-#define EEPROM_MAC_ADDR_BYTE3		FIELD16(0xff00)
-#define EEPROM_MAC_ADDR_2		0x0004
-#define EEPROM_MAC_ADDR_BYTE4		FIELD16(0x00ff)
-#define EEPROM_MAC_ADDR_BYTE5		FIELD16(0xff00)
-
-/*
- * EEPROM ANTENNA config
- * RXPATH: 1: 1R, 2: 2R, 3: 3R
- * TXPATH: 1: 1T, 2: 2T
- */
-#define	EEPROM_ANTENNA			0x001a
-#define EEPROM_ANTENNA_RXPATH		FIELD16(0x000f)
-#define EEPROM_ANTENNA_TXPATH		FIELD16(0x00f0)
-#define EEPROM_ANTENNA_RF_TYPE		FIELD16(0x0f00)
-
-/*
- * EEPROM NIC config
- * CARDBUS_ACCEL: 0 - enable, 1 - disable
- */
-#define	EEPROM_NIC			0x001b
-#define EEPROM_NIC_HW_RADIO		FIELD16(0x0001)
-#define EEPROM_NIC_DYNAMIC_TX_AGC	FIELD16(0x0002)
-#define EEPROM_NIC_EXTERNAL_LNA_BG	FIELD16(0x0004)
-#define EEPROM_NIC_EXTERNAL_LNA_A	FIELD16(0x0008)
-#define EEPROM_NIC_CARDBUS_ACCEL	FIELD16(0x0010)
-#define EEPROM_NIC_BW40M_SB_BG		FIELD16(0x0020)
-#define EEPROM_NIC_BW40M_SB_A		FIELD16(0x0040)
-#define EEPROM_NIC_WPS_PBC		FIELD16(0x0080)
-#define EEPROM_NIC_BW40M_BG		FIELD16(0x0100)
-#define EEPROM_NIC_BW40M_A		FIELD16(0x0200)
-
-/*
- * EEPROM frequency
- */
-#define	EEPROM_FREQ			0x001d
-#define EEPROM_FREQ_OFFSET		FIELD16(0x00ff)
-#define EEPROM_FREQ_LED_MODE		FIELD16(0x7f00)
-#define EEPROM_FREQ_LED_POLARITY	FIELD16(0x1000)
-
-/*
- * EEPROM LED
- * POLARITY_RDY_G: Polarity RDY_G setting.
- * POLARITY_RDY_A: Polarity RDY_A setting.
- * POLARITY_ACT: Polarity ACT setting.
- * POLARITY_GPIO_0: Polarity GPIO0 setting.
- * POLARITY_GPIO_1: Polarity GPIO1 setting.
- * POLARITY_GPIO_2: Polarity GPIO2 setting.
- * POLARITY_GPIO_3: Polarity GPIO3 setting.
- * POLARITY_GPIO_4: Polarity GPIO4 setting.
- * LED_MODE: Led mode.
- */
-#define EEPROM_LED1			0x001e
-#define EEPROM_LED2			0x001f
-#define EEPROM_LED3			0x0020
-#define EEPROM_LED_POLARITY_RDY_BG	FIELD16(0x0001)
-#define EEPROM_LED_POLARITY_RDY_A	FIELD16(0x0002)
-#define EEPROM_LED_POLARITY_ACT		FIELD16(0x0004)
-#define EEPROM_LED_POLARITY_GPIO_0	FIELD16(0x0008)
-#define EEPROM_LED_POLARITY_GPIO_1	FIELD16(0x0010)
-#define EEPROM_LED_POLARITY_GPIO_2	FIELD16(0x0020)
-#define EEPROM_LED_POLARITY_GPIO_3	FIELD16(0x0040)
-#define EEPROM_LED_POLARITY_GPIO_4	FIELD16(0x0080)
-#define EEPROM_LED_LED_MODE		FIELD16(0x1f00)
-
-/*
- * EEPROM LNA
- */
-#define EEPROM_LNA			0x0022
-#define EEPROM_LNA_BG			FIELD16(0x00ff)
-#define EEPROM_LNA_A0			FIELD16(0xff00)
-
-/*
- * EEPROM RSSI BG offset
- */
-#define EEPROM_RSSI_BG			0x0023
-#define EEPROM_RSSI_BG_OFFSET0		FIELD16(0x00ff)
-#define EEPROM_RSSI_BG_OFFSET1		FIELD16(0xff00)
-
-/*
- * EEPROM RSSI BG2 offset
- */
-#define EEPROM_RSSI_BG2			0x0024
-#define EEPROM_RSSI_BG2_OFFSET2		FIELD16(0x00ff)
-#define EEPROM_RSSI_BG2_LNA_A1		FIELD16(0xff00)
-
-/*
- * EEPROM RSSI A offset
- */
-#define EEPROM_RSSI_A			0x0025
-#define EEPROM_RSSI_A_OFFSET0		FIELD16(0x00ff)
-#define EEPROM_RSSI_A_OFFSET1		FIELD16(0xff00)
-
-/*
- * EEPROM RSSI A2 offset
- */
-#define EEPROM_RSSI_A2			0x0026
-#define EEPROM_RSSI_A2_OFFSET2		FIELD16(0x00ff)
-#define EEPROM_RSSI_A2_LNA_A2		FIELD16(0xff00)
-
-/*
- * EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power.
- *	This is delta in 40MHZ.
- * VALUE: Tx Power dalta value (MAX=4)
- * TYPE: 1: Plus the delta value, 0: minus the delta value
- * TXPOWER: Enable:
- */
-#define EEPROM_TXPOWER_DELTA		0x0028
-#define EEPROM_TXPOWER_DELTA_VALUE	FIELD16(0x003f)
-#define EEPROM_TXPOWER_DELTA_TYPE	FIELD16(0x0040)
-#define EEPROM_TXPOWER_DELTA_TXPOWER	FIELD16(0x0080)
-
-/*
- * EEPROM TXPOWER 802.11BG
- */
-#define	EEPROM_TXPOWER_BG1		0x0029
-#define	EEPROM_TXPOWER_BG2		0x0030
-#define EEPROM_TXPOWER_BG_SIZE		7
-#define EEPROM_TXPOWER_BG_1		FIELD16(0x00ff)
-#define EEPROM_TXPOWER_BG_2		FIELD16(0xff00)
-
-/*
- * EEPROM TXPOWER 802.11A
- */
-#define EEPROM_TXPOWER_A1		0x003c
-#define EEPROM_TXPOWER_A2		0x0053
-#define EEPROM_TXPOWER_A_SIZE		6
-#define EEPROM_TXPOWER_A_1		FIELD16(0x00ff)
-#define EEPROM_TXPOWER_A_2		FIELD16(0xff00)
-
-/*
- * EEPROM TXpower byrate: 20MHZ power
- */
-#define EEPROM_TXPOWER_BYRATE		0x006f
-
-/*
- * EEPROM BBP.
- */
-#define	EEPROM_BBP_START		0x0078
-#define EEPROM_BBP_SIZE			16
-#define EEPROM_BBP_VALUE		FIELD16(0x00ff)
-#define EEPROM_BBP_REG_ID		FIELD16(0xff00)
-
-/*
- * MCU mailbox commands.
- */
-#define MCU_SLEEP			0x30
-#define MCU_WAKEUP			0x31
-#define MCU_RADIO_OFF			0x35
-#define MCU_CURRENT			0x36
-#define MCU_LED				0x50
-#define MCU_LED_STRENGTH		0x51
-#define MCU_LED_1			0x52
-#define MCU_LED_2			0x53
-#define MCU_LED_3			0x54
-#define MCU_RADAR			0x60
-#define MCU_BOOT_SIGNAL			0x72
-#define MCU_BBP_SIGNAL			0x80
-#define MCU_POWER_SAVE			0x83
-
-/*
- * MCU mailbox tokens
- */
-#define TOKEN_WAKUP			3
-
-/*
  * DMA descriptor defines.
  */
-#define TXD_DESC_SIZE			( 4 * sizeof(__le32) )
 #define TXINFO_DESC_SIZE		( 1 * sizeof(__le32) )
-#define TXWI_DESC_SIZE			( 4 * sizeof(__le32) )
-#define RXD_DESC_SIZE			( 1 * sizeof(__le32) )
-#define RXWI_DESC_SIZE			( 4 * sizeof(__le32) )
-
-/*
- * TX descriptor format for TX, PRIO and Beacon Ring.
- */
-
-/*
- * Word0
- */
-#define TXD_W0_SD_PTR0			FIELD32(0xffffffff)
-
-/*
- * Word1
- */
-#define TXD_W1_SD_LEN1			FIELD32(0x00003fff)
-#define TXD_W1_LAST_SEC1		FIELD32(0x00004000)
-#define TXD_W1_BURST			FIELD32(0x00008000)
-#define TXD_W1_SD_LEN0			FIELD32(0x3fff0000)
-#define TXD_W1_LAST_SEC0		FIELD32(0x40000000)
-#define TXD_W1_DMA_DONE			FIELD32(0x80000000)
-
-/*
- * Word2
- */
-#define TXD_W2_SD_PTR1			FIELD32(0xffffffff)
-
-/*
- * Word3
- * WIV: Wireless Info Valid. 1: Driver filled WI,  0: DMA needs to copy WI
- * QSEL: Select on-chip FIFO ID for 2nd-stage output scheduler.
- *       0:MGMT, 1:HCCA 2:EDCA
- */
-#define TXD_W3_WIV			FIELD32(0x01000000)
-#define TXD_W3_QSEL			FIELD32(0x06000000)
-#define TXD_W3_TCO			FIELD32(0x20000000)
-#define TXD_W3_UCO			FIELD32(0x40000000)
-#define TXD_W3_ICO			FIELD32(0x80000000)
+#define RXINFO_DESC_SIZE		( 1 * sizeof(__le32) )
 
 /*
  * TX Info structure
@@ -1807,52 +101,6 @@ struct mac_iveiv_entry {
 #define TXINFO_W0_USB_DMA_TX_BURST	FIELD32(0x80000000)
 
 /*
- * TX WI structure
- */
-
-/*
- * Word0
- * FRAG: 1 To inform TKIP engine this is a fragment.
- * MIMO_PS: The remote peer is in dynamic MIMO-PS mode
- * TX_OP: 0:HT TXOP rule , 1:PIFS TX ,2:Backoff, 3:sifs
- * BW: Channel bandwidth 20MHz or 40 MHz
- * STBC: 1: STBC support MCS =0-7, 2,3 : RESERVED
- */
-#define TXWI_W0_FRAG			FIELD32(0x00000001)
-#define TXWI_W0_MIMO_PS			FIELD32(0x00000002)
-#define TXWI_W0_CF_ACK			FIELD32(0x00000004)
-#define TXWI_W0_TS			FIELD32(0x00000008)
-#define TXWI_W0_AMPDU			FIELD32(0x00000010)
-#define TXWI_W0_MPDU_DENSITY		FIELD32(0x000000e0)
-#define TXWI_W0_TX_OP			FIELD32(0x00000300)
-#define TXWI_W0_MCS			FIELD32(0x007f0000)
-#define TXWI_W0_BW			FIELD32(0x00800000)
-#define TXWI_W0_SHORT_GI		FIELD32(0x01000000)
-#define TXWI_W0_STBC			FIELD32(0x06000000)
-#define TXWI_W0_IFS			FIELD32(0x08000000)
-#define TXWI_W0_PHYMODE			FIELD32(0xc0000000)
-
-/*
- * Word1
- */
-#define TXWI_W1_ACK			FIELD32(0x00000001)
-#define TXWI_W1_NSEQ			FIELD32(0x00000002)
-#define TXWI_W1_BW_WIN_SIZE		FIELD32(0x000000fc)
-#define TXWI_W1_WIRELESS_CLI_ID		FIELD32(0x0000ff00)
-#define TXWI_W1_MPDU_TOTAL_BYTE_COUNT	FIELD32(0x0fff0000)
-#define TXWI_W1_PACKETID		FIELD32(0xf0000000)
-
-/*
- * Word2
- */
-#define TXWI_W2_IV			FIELD32(0xffffffff)
-
-/*
- * Word3
- */
-#define TXWI_W3_EIV			FIELD32(0xffffffff)
-
-/*
  * RX descriptor format for RX Ring.
  */
 
@@ -1867,85 +115,25 @@ struct mac_iveiv_entry {
  * AMSDU: rx with 802.3 header, not 802.11 header.
  */
 
-#define RXD_W0_BA			FIELD32(0x00000001)
-#define RXD_W0_DATA			FIELD32(0x00000002)
-#define RXD_W0_NULLDATA			FIELD32(0x00000004)
-#define RXD_W0_FRAG			FIELD32(0x00000008)
-#define RXD_W0_UNICAST_TO_ME		FIELD32(0x00000010)
-#define RXD_W0_MULTICAST		FIELD32(0x00000020)
-#define RXD_W0_BROADCAST		FIELD32(0x00000040)
-#define RXD_W0_MY_BSS			FIELD32(0x00000080)
-#define RXD_W0_CRC_ERROR		FIELD32(0x00000100)
-#define RXD_W0_CIPHER_ERROR		FIELD32(0x00000600)
-#define RXD_W0_AMSDU			FIELD32(0x00000800)
-#define RXD_W0_HTC			FIELD32(0x00001000)
-#define RXD_W0_RSSI			FIELD32(0x00002000)
-#define RXD_W0_L2PAD			FIELD32(0x00004000)
-#define RXD_W0_AMPDU			FIELD32(0x00008000)
-#define RXD_W0_DECRYPTED		FIELD32(0x00010000)
-#define RXD_W0_PLCP_RSSI		FIELD32(0x00020000)
-#define RXD_W0_CIPHER_ALG		FIELD32(0x00040000)
-#define RXD_W0_LAST_AMSDU		FIELD32(0x00080000)
-#define RXD_W0_PLCP_SIGNAL		FIELD32(0xfff00000)
-
-/*
- * RX WI structure
- */
-
-/*
- * Word0
- */
-#define RXWI_W0_WIRELESS_CLI_ID		FIELD32(0x000000ff)
-#define RXWI_W0_KEY_INDEX		FIELD32(0x00000300)
-#define RXWI_W0_BSSID			FIELD32(0x00001c00)
-#define RXWI_W0_UDF			FIELD32(0x0000e000)
-#define RXWI_W0_MPDU_TOTAL_BYTE_COUNT	FIELD32(0x0fff0000)
-#define RXWI_W0_TID			FIELD32(0xf0000000)
-
-/*
- * Word1
- */
-#define RXWI_W1_FRAG			FIELD32(0x0000000f)
-#define RXWI_W1_SEQUENCE		FIELD32(0x0000fff0)
-#define RXWI_W1_MCS			FIELD32(0x007f0000)
-#define RXWI_W1_BW			FIELD32(0x00800000)
-#define RXWI_W1_SHORT_GI		FIELD32(0x01000000)
-#define RXWI_W1_STBC			FIELD32(0x06000000)
-#define RXWI_W1_PHYMODE			FIELD32(0xc0000000)
-
-/*
- * Word2
- */
-#define RXWI_W2_RSSI0			FIELD32(0x000000ff)
-#define RXWI_W2_RSSI1			FIELD32(0x0000ff00)
-#define RXWI_W2_RSSI2			FIELD32(0x00ff0000)
-
-/*
- * Word3
- */
-#define RXWI_W3_SNR0			FIELD32(0x000000ff)
-#define RXWI_W3_SNR1			FIELD32(0x0000ff00)
-
-/*
- * Macros for converting txpower from EEPROM to mac80211 value
- * and from mac80211 value to register value.
- */
-#define MIN_G_TXPOWER	0
-#define MIN_A_TXPOWER	-7
-#define MAX_G_TXPOWER	31
-#define MAX_A_TXPOWER	15
-#define DEFAULT_TXPOWER	5
-
-#define TXPOWER_G_FROM_DEV(__txpower) \
-	((__txpower) > MAX_G_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
-
-#define TXPOWER_G_TO_DEV(__txpower) \
-	clamp_t(char, __txpower, MIN_G_TXPOWER, MAX_G_TXPOWER)
-
-#define TXPOWER_A_FROM_DEV(__txpower) \
-	((__txpower) > MAX_A_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
-
-#define TXPOWER_A_TO_DEV(__txpower) \
-	clamp_t(char, __txpower, MIN_A_TXPOWER, MAX_A_TXPOWER)
+#define RXINFO_W0_BA			FIELD32(0x00000001)
+#define RXINFO_W0_DATA			FIELD32(0x00000002)
+#define RXINFO_W0_NULLDATA		FIELD32(0x00000004)
+#define RXINFO_W0_FRAG			FIELD32(0x00000008)
+#define RXINFO_W0_UNICAST_TO_ME		FIELD32(0x00000010)
+#define RXINFO_W0_MULTICAST		FIELD32(0x00000020)
+#define RXINFO_W0_BROADCAST		FIELD32(0x00000040)
+#define RXINFO_W0_MY_BSS		FIELD32(0x00000080)
+#define RXINFO_W0_CRC_ERROR		FIELD32(0x00000100)
+#define RXINFO_W0_CIPHER_ERROR		FIELD32(0x00000600)
+#define RXINFO_W0_AMSDU			FIELD32(0x00000800)
+#define RXINFO_W0_HTC			FIELD32(0x00001000)
+#define RXINFO_W0_RSSI			FIELD32(0x00002000)
+#define RXINFO_W0_L2PAD			FIELD32(0x00004000)
+#define RXINFO_W0_AMPDU			FIELD32(0x00008000)
+#define RXINFO_W0_DECRYPTED		FIELD32(0x00010000)
+#define RXINFO_W0_PLCP_RSSI		FIELD32(0x00020000)
+#define RXINFO_W0_CIPHER_ALG		FIELD32(0x00040000)
+#define RXINFO_W0_LAST_AMSDU		FIELD32(0x00080000)
+#define RXINFO_W0_PLCP_SIGNAL		FIELD32(0xfff00000)
 
 #endif /* RT2800USB_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 27bc6b7fbfde..4d841c07c970 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -1,5 +1,6 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
+	Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
@@ -144,6 +145,11 @@ struct avg_val {
 	int avg_weight;
 };
 
+enum rt2x00_chip_intf {
+	RT2X00_CHIP_INTF_PCI,
+	RT2X00_CHIP_INTF_USB,
+};
+
 /*
  * Chipset identification
  * The chipset on the device is composed of a RT and RF chip.
@@ -158,10 +164,20 @@ struct rt2x00_chip {
 #define RT2561		0x0302
 #define RT2661		0x0401
 #define RT2571		0x1300
+#define RT2860		0x0601	/* 2.4GHz PCI/CB */
+#define RT2860D		0x0681	/* 2.4GHz, 5GHz PCI/CB */
+#define RT2890		0x0701	/* 2.4GHz PCIe */
+#define RT2890D		0x0781	/* 2.4GHz, 5GHz PCIe */
+#define RT2880		0x2880	/* WSOC */
+#define RT3052		0x3052	/* WSOC */
+#define RT3090		0x3090	/* 2.4GHz PCIe */
 #define RT2870		0x1600
+#define RT3070		0x1800
 
 	u16 rf;
 	u32 rev;
+
+	enum rt2x00_chip_intf intf;
 };
 
 /*
@@ -299,13 +315,6 @@ struct link {
 	struct avg_val avg_rssi;
 
 	/*
-	 * Currently precalculated percentages of successful
-	 * TX and RX frames.
-	 */
-	int rx_percentage;
-	int tx_percentage;
-
-	/*
 	 * Work structure for scheduling periodic link tuning.
 	 */
 	struct delayed_work work;
@@ -579,6 +588,7 @@ struct rt2x00_ops {
 	const unsigned int eeprom_size;
 	const unsigned int rf_size;
 	const unsigned int tx_queues;
+	const unsigned int extra_tx_headroom;
 	const struct data_queue_desc *rx;
 	const struct data_queue_desc *tx;
 	const struct data_queue_desc *bcn;
@@ -835,9 +845,23 @@ struct rt2x00_dev {
 	 * Firmware image.
 	 */
 	const struct firmware *fw;
+
+	/*
+	 * Driver specific data.
+	 */
+	void *priv;
 };
 
 /*
+ * Register defines.
+ * Some registers require multiple attempts before success,
+ * in those cases REGISTER_BUSY_COUNT attempts should be
+ * taken with a REGISTER_BUSY_DELAY interval.
+ */
+#define REGISTER_BUSY_COUNT	5
+#define REGISTER_BUSY_DELAY	100
+
+/*
  * Generic RF access.
  * The RF is being accessed by word index.
  */
@@ -883,10 +907,6 @@ static inline void rt2x00_eeprom_write(struct rt2x00_dev *rt2x00dev,
 static inline void rt2x00_set_chip(struct rt2x00_dev *rt2x00dev,
 				   const u16 rt, const u16 rf, const u32 rev)
 {
-	INFO(rt2x00dev,
-	     "Chipset detected - rt: %04x, rf: %04x, rev: %08x.\n",
-	     rt, rf, rev);
-
 	rt2x00dev->chip.rt = rt;
 	rt2x00dev->chip.rf = rf;
 	rt2x00dev->chip.rev = rev;
@@ -904,6 +924,13 @@ static inline void rt2x00_set_chip_rf(struct rt2x00_dev *rt2x00dev,
 	rt2x00_set_chip(rt2x00dev, rt2x00dev->chip.rt, rf, rev);
 }
 
+static inline void rt2x00_print_chip(struct rt2x00_dev *rt2x00dev)
+{
+	INFO(rt2x00dev,
+	     "Chipset detected - rt: %04x, rf: %04x, rev: %08x.\n",
+	     rt2x00dev->chip.rt, rt2x00dev->chip.rf, rt2x00dev->chip.rev);
+}
+
 static inline char rt2x00_rt(const struct rt2x00_chip *chipset, const u16 chip)
 {
 	return (chipset->rt == chip);
@@ -925,6 +952,28 @@ static inline bool rt2x00_check_rev(const struct rt2x00_chip *chipset,
 	return ((chipset->rev & mask) == rev);
 }
 
+static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev,
+					enum rt2x00_chip_intf intf)
+{
+	rt2x00dev->chip.intf = intf;
+}
+
+static inline bool rt2x00_intf(const struct rt2x00_chip *chipset,
+			       enum rt2x00_chip_intf intf)
+{
+	return (chipset->intf == intf);
+}
+
+static inline bool rt2x00_intf_is_pci(struct rt2x00_dev *rt2x00dev)
+{
+	return rt2x00_intf(&rt2x00dev->chip, RT2X00_CHIP_INTF_PCI);
+}
+
+static inline bool rt2x00_intf_is_usb(struct rt2x00_dev *rt2x00dev)
+{
+	return rt2x00_intf(&rt2x00dev->chip, RT2X00_CHIP_INTF_USB);
+}
+
 /**
  * rt2x00queue_map_txskb - Map a skb into DMA for TX purposes.
  * @rt2x00dev: Pointer to &struct rt2x00_dev.
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 40a201e2e151..098315a271ca 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -1,5 +1,5 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c
index de36837dcf86..d291c7862e10 100644
--- a/drivers/net/wireless/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c
@@ -1,5 +1,5 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 68bc9bb1dbf9..7d323a763b54 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -1,5 +1,5 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.h b/drivers/net/wireless/rt2x00/rt2x00debug.h
index 035cbc98c593..fa11409cb5c6 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.h
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.h
@@ -1,5 +1,5 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 73bbec58341e..06c43ca39bf8 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -1,5 +1,5 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
@@ -205,6 +205,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
 	enum data_queue_qid qid = skb_get_queue_mapping(entry->skb);
 	unsigned int header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
 	u8 rate_idx, rate_flags, retry_rates;
+	u8 skbdesc_flags = skbdesc->flags;
 	unsigned int i;
 	bool success;
 
@@ -287,12 +288,12 @@ void rt2x00lib_txdone(struct queue_entry *entry,
 	}
 
 	/*
-	 * Only send the status report to mac80211 when TX status was
-	 * requested by it. If this was a extra frame coming through
-	 * a mac80211 library call (RTS/CTS) then we should not send the
-	 * status report back.
+	 * Only send the status report to mac80211 when it's a frame
+	 * that originated in mac80211. If this was a extra frame coming
+	 * through a mac80211 library call (RTS/CTS) then we should not
+	 * send the status report back.
 	 */
-	if (tx_info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)
+	if (!(skbdesc_flags & SKBDESC_NOT_MAC80211))
 		ieee80211_tx_status_irqsafe(rt2x00dev->hw, entry->skb);
 	else
 		dev_kfree_skb_irq(entry->skb);
@@ -430,7 +431,6 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
 
 	rx_status->mactime = rxdesc.timestamp;
 	rx_status->rate_idx = rate_idx;
-	rx_status->qual = rt2x00link_calculate_signal(rt2x00dev, rxdesc.rssi);
 	rx_status->signal = rxdesc.rssi;
 	rx_status->noise = rxdesc.noise;
 	rx_status->flag = rxdesc.flags;
@@ -684,6 +684,11 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
 	rt2x00dev->hw->queues = rt2x00dev->ops->tx_queues;
 
 	/*
+	 * Initialize extra TX headroom required.
+	 */
+	rt2x00dev->hw->extra_tx_headroom = rt2x00dev->ops->extra_tx_headroom;
+
+	/*
 	 * Register HW.
 	 */
 	status = ieee80211_register_hw(rt2x00dev->hw);
diff --git a/drivers/net/wireless/rt2x00/rt2x00dump.h b/drivers/net/wireless/rt2x00/rt2x00dump.h
index fdedb5122928..727019a748e7 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dump.h
+++ b/drivers/net/wireless/rt2x00/rt2x00dump.h
@@ -1,5 +1,5 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2x00firmware.c b/drivers/net/wireless/rt2x00/rt2x00firmware.c
index d2deea2f2679..34beb00c4347 100644
--- a/drivers/net/wireless/rt2x00/rt2x00firmware.c
+++ b/drivers/net/wireless/rt2x00/rt2x00firmware.c
@@ -1,5 +1,6 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
+	Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2x00ht.c b/drivers/net/wireless/rt2x00/rt2x00ht.c
index e3cec839e540..1056c92143a8 100644
--- a/drivers/net/wireless/rt2x00/rt2x00ht.c
+++ b/drivers/net/wireless/rt2x00/rt2x00ht.c
@@ -1,5 +1,5 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.c b/drivers/net/wireless/rt2x00/rt2x00leds.c
index 49671fed91d7..ca585e34d00e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00leds.c
+++ b/drivers/net/wireless/rt2x00/rt2x00leds.c
@@ -1,5 +1,5 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.h b/drivers/net/wireless/rt2x00/rt2x00leds.h
index 1046977e6a12..3b46f0c3332a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00leds.h
+++ b/drivers/net/wireless/rt2x00/rt2x00leds.h
@@ -1,5 +1,5 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
@@ -33,8 +33,6 @@ enum led_type {
 	LED_TYPE_QUALITY,
 };
 
-#ifdef CONFIG_RT2X00_LIB_LEDS
-
 struct rt2x00_led {
 	struct rt2x00_dev *rt2x00dev;
 	struct led_classdev led_dev;
@@ -45,6 +43,4 @@ struct rt2x00_led {
 #define LED_REGISTERED		( 1 << 1 )
 };
 
-#endif /* CONFIG_RT2X00_LIB_LEDS */
-
 #endif /* RT2X00LEDS_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 567f029a8cda..be2e37fb4071 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -1,5 +1,6 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
+	Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
@@ -161,8 +162,10 @@ void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length);
  * rt2x00queue_write_tx_frame - Write TX frame to hardware
  * @queue: Queue over which the frame should be send
  * @skb: The skb to send
+ * @local: frame is not from mac80211
  */
-int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb);
+int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
+			       bool local);
 
 /**
  * rt2x00queue_update_beacon - Send new beacon from mac80211 to hardware
@@ -223,19 +226,6 @@ void rt2x00link_update_stats(struct rt2x00_dev *rt2x00dev,
 			     struct rxdone_entry_desc *rxdesc);
 
 /**
- * rt2x00link_calculate_signal - Calculate signal quality
- * @rt2x00dev: Pointer to &struct rt2x00_dev.
- * @rssi: RX Frame RSSI
- *
- * Calculate the signal quality of a frame based on the rssi
- * measured during the receiving of the frame and the global
- * link quality statistics measured since the start of the
- * link tuning. The result is a value between 0 and 100 which
- * is an indication of the signal quality.
- */
-int rt2x00link_calculate_signal(struct rt2x00_dev *rt2x00dev, int rssi);
-
-/**
  * rt2x00link_start_tuner - Start periodic link tuner work
  * @rt2x00dev: Pointer to &struct rt2x00_dev.
  *
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index c708d0be9155..0efbf5a6c254 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -1,5 +1,5 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
@@ -36,24 +36,6 @@
 #define DEFAULT_RSSI		-128
 
 /*
- * When no TX/RX percentage could be calculated due to lack of
- * frames on the air, we fallback to a percentage of 50%.
- * This will assure we will get at least get some decent value
- * when the link tuner starts.
- * The value will be dropped and overwritten with the correct (measured)
- * value anyway during the first run of the link tuner.
- */
-#define DEFAULT_PERCENTAGE	50
-
-/*
- * Small helper macro for percentage calculation
- * This is a very simple macro with the only catch that it will
- * produce a default value in case no total value was provided.
- */
-#define PERCENTAGE(__value, __total) \
-	( (__total) ? (((__value) * 100) / (__total)) : (DEFAULT_PERCENTAGE) )
-
-/*
  * Helper struct and macro to work with moving/walking averages.
  * When adding a value to the average value the following calculation
  * is needed:
@@ -91,27 +73,6 @@
 	__new; \
 })
 
-/*
- * For calculating the Signal quality we have determined
- * the total number of success and failed RX and TX frames.
- * With the addition of the average RSSI value we can determine
- * the link quality using the following algorithm:
- *
- *         rssi_percentage = (avg_rssi * 100) / rssi_offset
- *         rx_percentage = (rx_success * 100) / rx_total
- *         tx_percentage = (tx_success * 100) / tx_total
- *         avg_signal = ((WEIGHT_RSSI * avg_rssi) +
- *                       (WEIGHT_TX * tx_percentage) +
- *                       (WEIGHT_RX * rx_percentage)) / 100
- *
- * This value should then be checked to not be greater then 100.
- * This means the values of WEIGHT_RSSI, WEIGHT_RX, WEIGHT_TX must
- * sum up to 100 as well.
- */
-#define WEIGHT_RSSI	20
-#define WEIGHT_RX	40
-#define WEIGHT_TX	40
-
 static int rt2x00link_antenna_get_link_rssi(struct rt2x00_dev *rt2x00dev)
 {
 	struct link_ant *ant = &rt2x00dev->link.ant;
@@ -304,46 +265,6 @@ void rt2x00link_update_stats(struct rt2x00_dev *rt2x00dev,
 	ant->rssi_ant = MOVING_AVERAGE(ant->rssi_ant, rxdesc->rssi);
 }
 
-static void rt2x00link_precalculate_signal(struct rt2x00_dev *rt2x00dev)
-{
-	struct link *link = &rt2x00dev->link;
-	struct link_qual *qual = &rt2x00dev->link.qual;
-
-	link->rx_percentage =
-	    PERCENTAGE(qual->rx_success, qual->rx_failed + qual->rx_success);
-	link->tx_percentage =
-	    PERCENTAGE(qual->tx_success, qual->tx_failed + qual->tx_success);
-}
-
-int rt2x00link_calculate_signal(struct rt2x00_dev *rt2x00dev, int rssi)
-{
-	struct link *link = &rt2x00dev->link;
-	int rssi_percentage = 0;
-	int signal;
-
-	/*
-	 * We need a positive value for the RSSI.
-	 */
-	if (rssi < 0)
-		rssi += rt2x00dev->rssi_offset;
-
-	/*
-	 * Calculate the different percentages,
-	 * which will be used for the signal.
-	 */
-	rssi_percentage = PERCENTAGE(rssi, rt2x00dev->rssi_offset);
-
-	/*
-	 * Add the individual percentages and use the WEIGHT
-	 * defines to calculate the current link signal.
-	 */
-	signal = ((WEIGHT_RSSI * rssi_percentage) +
-		  (WEIGHT_TX * link->tx_percentage) +
-		  (WEIGHT_RX * link->rx_percentage)) / 100;
-
-	return max_t(int, signal, 100);
-}
-
 void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev)
 {
 	struct link *link = &rt2x00dev->link;
@@ -357,9 +278,6 @@ void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev)
 	if (!rt2x00dev->intf_ap_count && !rt2x00dev->intf_sta_count)
 		return;
 
-	link->rx_percentage = DEFAULT_PERCENTAGE;
-	link->tx_percentage = DEFAULT_PERCENTAGE;
-
 	rt2x00link_reset_tuner(rt2x00dev, false);
 
 	if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
@@ -448,12 +366,6 @@ static void rt2x00link_tuner(struct work_struct *work)
 		rt2x00dev->ops->lib->link_tuner(rt2x00dev, qual, link->count);
 
 	/*
-	 * Precalculate a portion of the link signal which is
-	 * in based on the tx/rx success/failure counters.
-	 */
-	rt2x00link_precalculate_signal(rt2x00dev);
-
-	/*
 	 * Send a signal to the led to update the led signal strength.
 	 */
 	rt2x00leds_led_quality(rt2x00dev, qual->rssi);
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 929b85f34f38..de549c244ed8 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -1,5 +1,5 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
@@ -66,7 +66,6 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
 	rts_info = IEEE80211_SKB_CB(skb);
 	rts_info->control.rates[0].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS;
 	rts_info->control.rates[0].flags &= ~IEEE80211_TX_RC_USE_CTS_PROTECT;
-	rts_info->flags &= ~IEEE80211_TX_CTL_REQ_TX_STATUS;
 
 	if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
 		rts_info->flags |= IEEE80211_TX_CTL_NO_ACK;
@@ -91,7 +90,7 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
 				  frag_skb->data, data_length, tx_info,
 				  (struct ieee80211_rts *)(skb->data));
 
-	retval = rt2x00queue_write_tx_frame(queue, skb);
+	retval = rt2x00queue_write_tx_frame(queue, skb, true);
 	if (retval) {
 		dev_kfree_skb_any(skb);
 		WARNING(rt2x00dev, "Failed to send RTS/CTS frame.\n");
@@ -104,10 +103,8 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
 	struct rt2x00_dev *rt2x00dev = hw->priv;
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-	struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data;
 	enum data_queue_qid qid = skb_get_queue_mapping(skb);
 	struct data_queue *queue;
-	u16 frame_control;
 
 	/*
 	 * Mac80211 might be calling this function while we are trying
@@ -142,7 +139,6 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 	 * either RTS or CTS-to-self frame and handles everything
 	 * inside the hardware.
 	 */
-	frame_control = le16_to_cpu(ieee80211hdr->frame_control);
 	if ((tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS |
 						IEEE80211_TX_RC_USE_CTS_PROTECT)) &&
 	    !rt2x00dev->ops->hw->set_rts_threshold) {
@@ -153,7 +149,7 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 			goto exit_fail;
 	}
 
-	if (rt2x00queue_write_tx_frame(queue, skb))
+	if (rt2x00queue_write_tx_frame(queue, skb, false))
 		goto exit_fail;
 
 	if (rt2x00queue_threshold(queue))
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index cdd5154bd4c0..0feb4d0e4668 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -1,5 +1,5 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
@@ -310,6 +310,8 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
 	rt2x00dev->irq = pci_dev->irq;
 	rt2x00dev->name = pci_name(pci_dev);
 
+	rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
+
 	/*
 	 * Determine RT chipset by reading PCI header.
 	 */
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index 15a12487e04b..d4f9449ab0a4 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -1,5 +1,5 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
@@ -35,15 +35,6 @@
 #define PCI_DEVICE_DATA(__ops)	.driver_data = (kernel_ulong_t)(__ops)
 
 /*
- * Register defines.
- * Some registers require multiple attempts before success,
- * in those cases REGISTER_BUSY_COUNT attempts should be
- * taken with a REGISTER_BUSY_DELAY interval.
- */
-#define REGISTER_BUSY_COUNT	5
-#define REGISTER_BUSY_DELAY	100
-
-/*
  * Register access.
  */
 static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev,
@@ -53,10 +44,9 @@ static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev,
 	*value = readl(rt2x00dev->csr.base + offset);
 }
 
-static inline void
-rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev,
-			     const unsigned int offset,
-			     void *value, const u16 length)
+static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev,
+						const unsigned int offset,
+						void *value, const u32 length)
 {
 	memcpy_fromio(value, rt2x00dev->csr.base + offset, length);
 }
@@ -68,10 +58,10 @@ static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev,
 	writel(value, rt2x00dev->csr.base + offset);
 }
 
-static inline void
-rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev,
-			      const unsigned int offset,
-			      const void *value, const u16 length)
+static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev,
+						 const unsigned int offset,
+						 const void *value,
+						 const u32 length)
 {
 	memcpy_toio(rt2x00dev->csr.base + offset, value, length);
 }
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 577029efe320..239afc7a9c0b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -1,5 +1,6 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
+	Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
@@ -161,10 +162,10 @@ void rt2x00queue_align_frame(struct sk_buff *skb)
 	skb_trim(skb, frame_length);
 }
 
-void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_lengt)
+void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_length)
 {
 	unsigned int frame_length = skb->len;
-	unsigned int align = ALIGN_SIZE(skb, header_lengt);
+	unsigned int align = ALIGN_SIZE(skb, header_length);
 
 	if (!align)
 		return;
@@ -213,7 +214,7 @@ void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
 		skb_push(skb, header_align);
 		memmove(skb->data, skb->data + header_align, header_length);
 		memmove(skb->data + header_length + l2pad,
-			skb->data + header_length + l2pad + header_align,
+			skb->data + header_length + l2pad + payload_align,
 			frame_length - header_length);
 		skbdesc->flags |= SKBDESC_L2_PADDED;
 	}
@@ -453,7 +454,8 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
 		rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid);
 }
 
-int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
+int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
+			       bool local)
 {
 	struct ieee80211_tx_info *tx_info;
 	struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
@@ -494,6 +496,9 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
 	skbdesc->tx_rate_idx = rate_idx;
 	skbdesc->tx_rate_flags = rate_flags;
 
+	if (local)
+		skbdesc->flags |= SKBDESC_NOT_MAC80211;
+
 	/*
 	 * When hardware encryption is supported, and this frame
 	 * is to be encrypted, we should strip the IV/EIV data from
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index a5591fb2b191..70775e5ba1ac 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -1,5 +1,5 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
@@ -94,12 +94,15 @@ enum data_queue_qid {
  *	mac80211 but was stripped for processing by the driver.
  * @SKBDESC_L2_PADDED: Payload has been padded for 4-byte alignment,
  *	the padded bytes are located between header and payload.
+ * @SKBDESC_NOT_MAC80211: Frame didn't originate from mac80211,
+ *	don't try to pass it back.
  */
 enum skb_frame_desc_flags {
 	SKBDESC_DMA_MAPPED_RX = 1 << 0,
 	SKBDESC_DMA_MAPPED_TX = 1 << 1,
 	SKBDESC_IV_STRIPPED = 1 << 2,
-	SKBDESC_L2_PADDED = 1 << 3
+	SKBDESC_L2_PADDED = 1 << 3,
+	SKBDESC_NOT_MAC80211 = 1 << 4,
 };
 
 /**
diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h
index 983e52e127a7..603bfc0adaa3 100644
--- a/drivers/net/wireless/rt2x00/rt2x00reg.h
+++ b/drivers/net/wireless/rt2x00/rt2x00reg.h
@@ -1,5 +1,5 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.c b/drivers/net/wireless/rt2x00/rt2x00soc.c
new file mode 100644
index 000000000000..19e684f8ffa1
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2x00soc.c
@@ -0,0 +1,165 @@
+/*
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
+	Copyright (C) 2004 - 2009 Felix Fietkau <nbd@openwrt.org>
+	<http://rt2x00.serialmonkey.com>
+
+	This program is free software; you can redistribute it and/or modify
+	it under the terms of the GNU General Public License as published by
+	the Free Software Foundation; either version 2 of the License, or
+	(at your option) any later version.
+
+	This program is distributed in the hope that it will be useful,
+	but WITHOUT ANY WARRANTY; without even the implied warranty of
+	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+	GNU General Public License for more details.
+
+	You should have received a copy of the GNU General Public License
+	along with this program; if not, write to the
+	Free Software Foundation, Inc.,
+	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+	Module: rt2x00soc
+	Abstract: rt2x00 generic soc device routines.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "rt2x00.h"
+#include "rt2x00soc.h"
+
+static void rt2x00soc_free_reg(struct rt2x00_dev *rt2x00dev)
+{
+	kfree(rt2x00dev->rf);
+	rt2x00dev->rf = NULL;
+
+	kfree(rt2x00dev->eeprom);
+	rt2x00dev->eeprom = NULL;
+}
+
+static int rt2x00soc_alloc_reg(struct rt2x00_dev *rt2x00dev)
+{
+	struct platform_device *pdev = to_platform_device(rt2x00dev->dev);
+	struct resource *res;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENODEV;
+
+	rt2x00dev->csr.base = (void __iomem *)KSEG1ADDR(res->start);
+	if (!rt2x00dev->csr.base)
+		goto exit;
+
+	rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
+	if (!rt2x00dev->eeprom)
+		goto exit;
+
+	rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
+	if (!rt2x00dev->rf)
+		goto exit;
+
+	return 0;
+
+exit:
+	ERROR_PROBE("Failed to allocate registers.\n");
+	rt2x00soc_free_reg(rt2x00dev);
+
+	return -ENOMEM;
+}
+
+int rt2x00soc_probe(struct platform_device *pdev,
+		    const unsigned short chipset,
+		    const struct rt2x00_ops *ops)
+{
+	struct ieee80211_hw *hw;
+	struct rt2x00_dev *rt2x00dev;
+	int retval;
+
+	hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
+	if (!hw) {
+		ERROR_PROBE("Failed to allocate hardware.\n");
+		return -ENOMEM;
+	}
+
+	platform_set_drvdata(pdev, hw);
+
+	rt2x00dev = hw->priv;
+	rt2x00dev->dev = &pdev->dev;
+	rt2x00dev->ops = ops;
+	rt2x00dev->hw = hw;
+	rt2x00dev->irq = platform_get_irq(pdev, 0);
+	rt2x00dev->name = pdev->dev.driver->name;
+
+	/*
+	 * SoC devices mimic PCI behavior.
+	 */
+	rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
+
+	rt2x00_set_chip_rt(rt2x00dev, chipset);
+
+	retval = rt2x00soc_alloc_reg(rt2x00dev);
+	if (retval)
+		goto exit_free_device;
+
+	retval = rt2x00lib_probe_dev(rt2x00dev);
+	if (retval)
+		goto exit_free_reg;
+
+	return 0;
+
+exit_free_reg:
+	rt2x00soc_free_reg(rt2x00dev);
+
+exit_free_device:
+	ieee80211_free_hw(hw);
+
+	return retval;
+}
+
+int rt2x00soc_remove(struct platform_device *pdev)
+{
+	struct ieee80211_hw *hw = platform_get_drvdata(pdev);
+	struct rt2x00_dev *rt2x00dev = hw->priv;
+
+	/*
+	 * Free all allocated data.
+	 */
+	rt2x00lib_remove_dev(rt2x00dev);
+	rt2x00soc_free_reg(rt2x00dev);
+	ieee80211_free_hw(hw);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rt2x00soc_remove);
+
+#ifdef CONFIG_PM
+int rt2x00soc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct ieee80211_hw *hw = platform_get_drvdata(pdev);
+	struct rt2x00_dev *rt2x00dev = hw->priv;
+
+	return rt2x00lib_suspend(rt2x00dev, state);
+}
+EXPORT_SYMBOL_GPL(rt2x00soc_suspend);
+
+int rt2x00soc_resume(struct platform_device *pdev)
+{
+	struct ieee80211_hw *hw = platform_get_drvdata(pdev);
+	struct rt2x00_dev *rt2x00dev = hw->priv;
+
+	return rt2x00lib_resume(rt2x00dev);
+}
+EXPORT_SYMBOL_GPL(rt2x00soc_resume);
+#endif /* CONFIG_PM */
+
+/*
+ * rt2x00soc module information.
+ */
+MODULE_AUTHOR(DRV_PROJECT);
+MODULE_VERSION(DRV_VERSION);
+MODULE_DESCRIPTION("rt2x00 soc library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.h b/drivers/net/wireless/rt2x00/rt2x00soc.h
new file mode 100644
index 000000000000..8a3416624af5
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2x00soc.h
@@ -0,0 +1,52 @@
+/*
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
+	<http://rt2x00.serialmonkey.com>
+
+	This program is free software; you can redistribute it and/or modify
+	it under the terms of the GNU General Public License as published by
+	the Free Software Foundation; either version 2 of the License, or
+	(at your option) any later version.
+
+	This program is distributed in the hope that it will be useful,
+	but WITHOUT ANY WARRANTY; without even the implied warranty of
+	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+	GNU General Public License for more details.
+
+	You should have received a copy of the GNU General Public License
+	along with this program; if not, write to the
+	Free Software Foundation, Inc.,
+	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+	Module: rt2x00soc
+	Abstract: Data structures for the rt2x00soc module.
+ */
+
+#ifndef RT2X00SOC_H
+#define RT2X00SOC_H
+
+#define KSEG1ADDR(__ptr) __ptr
+
+#define __rt2x00soc_probe(__chipset, __ops) \
+static int __rt2x00soc_probe(struct platform_device *pdev) \
+{ \
+	return rt2x00soc_probe(pdev, (__chipset), (__ops)); \
+}
+
+/*
+ * SoC driver handlers.
+ */
+int rt2x00soc_probe(struct platform_device *pdev,
+		    const unsigned short chipset,
+		    const struct rt2x00_ops *ops);
+int rt2x00soc_remove(struct platform_device *pdev);
+#ifdef CONFIG_PM
+int rt2x00soc_suspend(struct platform_device *pdev, pm_message_t state);
+int rt2x00soc_resume(struct platform_device *pdev);
+#else
+#define rt2x00soc_suspend	NULL
+#define rt2x00soc_resume	NULL
+#endif /* CONFIG_PM */
+
+#endif /* RT2X00SOC_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index f02b48a90593..0a751e73aa0f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -1,5 +1,5 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
@@ -160,7 +160,7 @@ EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_large_buff);
 
 int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
 			   const unsigned int offset,
-			   struct rt2x00_field32 field,
+			   const struct rt2x00_field32 field,
 			   u32 *reg)
 {
 	unsigned int i;
@@ -653,6 +653,8 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
 	rt2x00dev->ops = ops;
 	rt2x00dev->hw = hw;
 
+	rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_USB);
+
 	retval = rt2x00usb_alloc_reg(rt2x00dev);
 	if (retval)
 		goto exit_free_device;
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index bd2d59c85f1b..3da6841b5d42 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -1,5 +1,5 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
@@ -26,6 +26,8 @@
 #ifndef RT2X00USB_H
 #define RT2X00USB_H
 
+#include <linux/usb.h>
+
 #define to_usb_device_intf(d) \
 ({ \
 	struct usb_interface *intf = to_usb_interface(d); \
@@ -39,17 +41,11 @@
 #define USB_DEVICE_DATA(__ops)	.driver_info = (kernel_ulong_t)(__ops)
 
 /*
- * Register defines.
- * Some registers require multiple attempts before success,
- * in those cases REGISTER_BUSY_COUNT attempts should be
- * taken with a REGISTER_BUSY_DELAY interval.
  * For USB vendor requests we need to pass a timeout
  * time in ms, for this we use the REGISTER_TIMEOUT,
  * however when loading firmware a higher value is
  * required. In that case we use the REGISTER_TIMEOUT_FIRMWARE.
  */
-#define REGISTER_BUSY_COUNT		5
-#define REGISTER_BUSY_DELAY		100
 #define REGISTER_TIMEOUT		500
 #define REGISTER_TIMEOUT_FIRMWARE	1000
 
@@ -232,7 +228,7 @@ static inline int rt2x00usb_eeprom_read(struct rt2x00_dev *rt2x00dev,
 }
 
 /**
- * rt2x00usb_regbusy_read - Read 32bit register word
+ * rt2x00usb_register_read - Read 32bit register word
  * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
  * @offset: Register offset
  * @value: Pointer to where register contents should be stored
@@ -340,12 +336,13 @@ static inline void rt2x00usb_register_write_lock(struct rt2x00_dev *rt2x00dev,
  * through rt2x00usb_vendor_request_buff().
  */
 static inline void rt2x00usb_register_multiwrite(struct rt2x00_dev *rt2x00dev,
-					       const unsigned int offset,
-					       void *value, const u32 length)
+						 const unsigned int offset,
+						 const void *value,
+						 const u32 length)
 {
 	rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE,
 				      USB_VENDOR_REQUEST_OUT, offset,
-				      value, length,
+				      (void *)value, length,
 				      REGISTER_TIMEOUT32(length));
 }
 
@@ -364,7 +361,7 @@ static inline void rt2x00usb_register_multiwrite(struct rt2x00_dev *rt2x00dev,
  */
 int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
 			   const unsigned int offset,
-			   struct rt2x00_field32 field,
+			   const struct rt2x00_field32 field,
 			   u32 *reg);
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index b20e3eac9d67..687e17dc2e9f 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -1,5 +1,5 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
@@ -51,7 +51,7 @@ MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
  * These indirect registers work with busy bits,
  * and we will try maximal REGISTER_BUSY_COUNT times to access
  * the register while taking a REGISTER_BUSY_DELAY us delay
- * between each attampt. When the busy bit is still set at that time,
+ * between each attempt. When the busy bit is still set at that time,
  * the access attempt is considered to have failed,
  * and we will print an error.
  */
@@ -386,7 +386,7 @@ static int rt61pci_config_shared_key(struct rt2x00_dev *rt2x00dev,
 		 * The driver does not support the IV/EIV generation
 		 * in hardware. However it doesn't support the IV/EIV
 		 * inside the ieee80211 frame either, but requires it
-		 * to be provided seperately for the descriptor.
+		 * to be provided separately for the descriptor.
 		 * rt2x00lib will cut the IV/EIV data out of all frames
 		 * given to us by mac80211, but we must tell mac80211
 		 * to generate the IV/EIV data.
@@ -397,7 +397,7 @@ static int rt61pci_config_shared_key(struct rt2x00_dev *rt2x00dev,
 	/*
 	 * SEC_CSR0 contains only single-bit fields to indicate
 	 * a particular key is valid. Because using the FIELD32()
-	 * defines directly will cause a lot of overhead we use
+	 * defines directly will cause a lot of overhead, we use
 	 * a calculation to determine the correct bit directly.
 	 */
 	mask = 1 << key->hw_key_idx;
@@ -425,11 +425,11 @@ static int rt61pci_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
 		/*
 		 * rt2x00lib can't determine the correct free
 		 * key_idx for pairwise keys. We have 2 registers
-		 * with key valid bits. The goal is simple, read
-		 * the first register, if that is full move to
+		 * with key valid bits. The goal is simple: read
+		 * the first register. If that is full, move to
 		 * the next register.
-		 * When both registers are full, we drop the key,
-		 * otherwise we use the first invalid entry.
+		 * When both registers are full, we drop the key.
+		 * Otherwise, we use the first invalid entry.
 		 */
 		rt2x00pci_register_read(rt2x00dev, SEC_CSR2, &reg);
 		if (reg && reg == ~0) {
@@ -464,8 +464,8 @@ static int rt61pci_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
 					      &addr_entry, sizeof(addr_entry));
 
 		/*
-		 * Enable pairwise lookup table for given BSS idx,
-		 * without this received frames will not be decrypted
+		 * Enable pairwise lookup table for given BSS idx.
+		 * Without this, received frames will not be decrypted
 		 * by the hardware.
 		 */
 		rt2x00pci_register_read(rt2x00dev, SEC_CSR4, &reg);
@@ -487,7 +487,7 @@ static int rt61pci_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
 	/*
 	 * SEC_CSR2 and SEC_CSR3 contain only single-bit fields to indicate
 	 * a particular key is valid. Because using the FIELD32()
-	 * defines directly will cause a lot of overhead we use
+	 * defines directly will cause a lot of overhead, we use
 	 * a calculation to determine the correct bit directly.
 	 */
 	if (key->hw_key_idx < 32) {
@@ -556,7 +556,7 @@ static void rt61pci_config_intf(struct rt2x00_dev *rt2x00dev,
 	if (flags & CONFIG_UPDATE_TYPE) {
 		/*
 		 * Clear current synchronisation setup.
-		 * For the Beacon base registers we only need to clear
+		 * For the Beacon base registers, we only need to clear
 		 * the first byte since that byte contains the VALID and OWNER
 		 * bits which (when set to 0) will invalidate the entire beacon.
 		 */
@@ -1168,8 +1168,8 @@ static int rt61pci_check_firmware(struct rt2x00_dev *rt2x00dev,
 		return FW_BAD_LENGTH;
 
 	/*
-	 * The last 2 bytes in the firmware array are the crc checksum itself,
-	 * this means that we should never pass those 2 bytes to the crc
+	 * The last 2 bytes in the firmware array are the crc checksum itself.
+	 * This means that we should never pass those 2 bytes to the crc
 	 * algorithm.
 	 */
 	fw_crc = (data[len - 2] << 8 | data[len - 1]);
@@ -1986,7 +1986,7 @@ static void rt61pci_fill_rxdone(struct queue_entry *entry,
 
 		/*
 		 * Hardware has stripped IV/EIV data from 802.11 frame during
-		 * decryption. It has provided the data seperately but rt2x00lib
+		 * decryption. It has provided the data separately but rt2x00lib
 		 * should decide if it should be reinserted.
 		 */
 		rxdesc->flags |= RX_FLAG_IV_STRIPPED;
@@ -2042,7 +2042,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
 	 * During each loop we will compare the freshly read
 	 * STA_CSR4 register value with the value read from
 	 * the previous loop. If the 2 values are equal then
-	 * we should stop processing because the chance it
+	 * we should stop processing because the chance is
 	 * quite big that the device has been unplugged and
 	 * we risk going into an endless loop.
 	 */
@@ -2300,6 +2300,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
 	value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
 	rt2x00pci_register_read(rt2x00dev, MAC_CSR0, &reg);
 	rt2x00_set_chip_rf(rt2x00dev, value, reg);
+	rt2x00_print_chip(rt2x00dev);
 
 	if (!rt2x00_rf(&rt2x00dev->chip, RF5225) &&
 	    !rt2x00_rf(&rt2x00dev->chip, RF5325) &&
@@ -2330,7 +2331,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
 		__set_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags);
 
 	/*
-	 * Detect if this device has an hardware controlled radio.
+	 * Detect if this device has a hardware controlled radio.
 	 */
 	if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO))
 		__set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags);
@@ -2355,7 +2356,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
 		__set_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags);
 
 	/*
-	 * When working with a RF2529 chip without double antenna
+	 * When working with a RF2529 chip without double antenna,
 	 * the antenna settings should be gathered from the NIC
 	 * eeprom word.
 	 */
@@ -2545,7 +2546,6 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 	    IEEE80211_HW_SIGNAL_DBM |
 	    IEEE80211_HW_SUPPORTS_PS |
 	    IEEE80211_HW_PS_NULLFUNC_STACK;
-	rt2x00dev->hw->extra_tx_headroom = 0;
 
 	SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
 	SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
@@ -2668,7 +2668,7 @@ static int rt61pci_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
 
 	/*
 	 * We only need to perform additional register initialization
-	 * for WMM queues/
+	 * for WMM queues.
 	 */
 	if (queue_idx >= 4)
 		return 0;
@@ -2787,19 +2787,20 @@ static const struct data_queue_desc rt61pci_queue_bcn = {
 };
 
 static const struct rt2x00_ops rt61pci_ops = {
-	.name		= KBUILD_MODNAME,
-	.max_sta_intf	= 1,
-	.max_ap_intf	= 4,
-	.eeprom_size	= EEPROM_SIZE,
-	.rf_size	= RF_SIZE,
-	.tx_queues	= NUM_TX_QUEUES,
-	.rx		= &rt61pci_queue_rx,
-	.tx		= &rt61pci_queue_tx,
-	.bcn		= &rt61pci_queue_bcn,
-	.lib		= &rt61pci_rt2x00_ops,
-	.hw		= &rt61pci_mac80211_ops,
+	.name			= KBUILD_MODNAME,
+	.max_sta_intf		= 1,
+	.max_ap_intf		= 4,
+	.eeprom_size		= EEPROM_SIZE,
+	.rf_size		= RF_SIZE,
+	.tx_queues		= NUM_TX_QUEUES,
+	.extra_tx_headroom	= 0,
+	.rx			= &rt61pci_queue_rx,
+	.tx			= &rt61pci_queue_tx,
+	.bcn			= &rt61pci_queue_bcn,
+	.lib			= &rt61pci_rt2x00_ops,
+	.hw			= &rt61pci_mac80211_ops,
 #ifdef CONFIG_RT2X00_LIB_DEBUGFS
-	.debugfs	= &rt61pci_rt2x00debug,
+	.debugfs		= &rt61pci_rt2x00debug,
 #endif /* CONFIG_RT2X00_LIB_DEBUGFS */
 };
 
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index 93eb699165cc..6f33f7f5668c 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -1,5 +1,5 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 14e7bb210075..ced3b6ab5e16 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -1,5 +1,5 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
@@ -1825,6 +1825,7 @@ static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
 	value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
 	rt2x00usb_register_read(rt2x00dev, MAC_CSR0, &reg);
 	rt2x00_set_chip(rt2x00dev, RT2571, value, reg);
+	rt2x00_print_chip(rt2x00dev);
 
 	if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0x25730) ||
 	    rt2x00_check_rev(&rt2x00dev->chip, 0x0000000f, 0)) {
@@ -2068,7 +2069,6 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 	    IEEE80211_HW_SIGNAL_DBM |
 	    IEEE80211_HW_SUPPORTS_PS |
 	    IEEE80211_HW_PS_NULLFUNC_STACK;
-	rt2x00dev->hw->extra_tx_headroom = TXD_DESC_SIZE;
 
 	SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
 	SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
@@ -2305,19 +2305,20 @@ static const struct data_queue_desc rt73usb_queue_bcn = {
 };
 
 static const struct rt2x00_ops rt73usb_ops = {
-	.name		= KBUILD_MODNAME,
-	.max_sta_intf	= 1,
-	.max_ap_intf	= 4,
-	.eeprom_size	= EEPROM_SIZE,
-	.rf_size	= RF_SIZE,
-	.tx_queues	= NUM_TX_QUEUES,
-	.rx		= &rt73usb_queue_rx,
-	.tx		= &rt73usb_queue_tx,
-	.bcn		= &rt73usb_queue_bcn,
-	.lib		= &rt73usb_rt2x00_ops,
-	.hw		= &rt73usb_mac80211_ops,
+	.name			= KBUILD_MODNAME,
+	.max_sta_intf		= 1,
+	.max_ap_intf		= 4,
+	.eeprom_size		= EEPROM_SIZE,
+	.rf_size		= RF_SIZE,
+	.tx_queues		= NUM_TX_QUEUES,
+	.extra_tx_headroom	= TXD_DESC_SIZE,
+	.rx			= &rt73usb_queue_rx,
+	.tx			= &rt73usb_queue_tx,
+	.bcn			= &rt73usb_queue_bcn,
+	.lib			= &rt73usb_rt2x00_ops,
+	.hw			= &rt73usb_mac80211_ops,
 #ifdef CONFIG_RT2X00_LIB_DEBUGFS
-	.debugfs	= &rt73usb_rt2x00debug,
+	.debugfs		= &rt73usb_rt2x00debug,
 #endif /* CONFIG_RT2X00_LIB_DEBUGFS */
 };
 
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
index 81fe0be51c42..e783a099a8f1 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/rt2x00/rt73usb.h
@@ -1,5 +1,5 @@
 /*
-	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	<http://rt2x00.serialmonkey.com>
 
 	This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 16429c49139c..a1a3dd15c664 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -548,7 +548,7 @@ static int rtl8180_start(struct ieee80211_hw *dev)
 	rtl818x_iowrite32(priv, &priv->map->TNPDA, priv->tx_ring[1].dma);
 	rtl818x_iowrite32(priv, &priv->map->TLPDA, priv->tx_ring[0].dma);
 
-	ret = request_irq(priv->pdev->irq, &rtl8180_interrupt,
+	ret = request_irq(priv->pdev->irq, rtl8180_interrupt,
 			  IRQF_SHARED, KBUILD_MODNAME, dev);
 	if (ret) {
 		printk(KERN_ERR "%s: failed to register IRQ handler\n",
diff --git a/drivers/net/wireless/rtl818x/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187.h
index bf9175a8c1f4..abb4907cf296 100644
--- a/drivers/net/wireless/rtl818x/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187.h
@@ -119,7 +119,6 @@ struct rtl8187_priv {
 	} hw_rev;
 	struct sk_buff_head rx_queue;
 	u8 signal;
-	u8 quality;
 	u8 noise;
 	u8 slot_time;
 	u8 aifsn[4];
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 2017ccc00145..76973b8c7099 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -320,7 +320,6 @@ static void rtl8187_rx_cb(struct urb *urb)
 	struct ieee80211_rx_status rx_status = { 0 };
 	int rate, signal;
 	u32 flags;
-	u32 quality;
 	unsigned long f;
 
 	spin_lock_irqsave(&priv->rx_queue.lock, f);
@@ -338,10 +337,9 @@ static void rtl8187_rx_cb(struct urb *urb)
 			(typeof(hdr))(skb_tail_pointer(skb) - sizeof(*hdr));
 		flags = le32_to_cpu(hdr->flags);
 		/* As with the RTL8187B below, the AGC is used to calculate
-		 * signal strength and quality. In this case, the scaling
+		 * signal strength. In this case, the scaling
 		 * constants are derived from the output of p54usb.
 		 */
-		quality = 130 - ((41 * hdr->agc) >> 6);
 		signal = -4 - ((27 * hdr->agc) >> 6);
 		rx_status.antenna = (hdr->signal >> 7) & 1;
 		rx_status.mactime = le64_to_cpu(hdr->mac_time);
@@ -354,23 +352,18 @@ static void rtl8187_rx_cb(struct urb *urb)
 		 * In testing, none of these quantities show qualitative
 		 * agreement with AP signal strength, except for the AGC,
 		 * which is inversely proportional to the strength of the
-		 * signal. In the following, the quality and signal strength
-		 * are derived from the AGC. The arbitrary scaling constants
+		 * signal. In the following, the signal strength
+		 * is derived from the AGC. The arbitrary scaling constants
 		 * are chosen to make the results close to the values obtained
 		 * for a BCM4312 using b43 as the driver. The noise is ignored
 		 * for now.
 		 */
 		flags = le32_to_cpu(hdr->flags);
-		quality = 170 - hdr->agc;
 		signal = 14 - hdr->agc / 2;
 		rx_status.antenna = (hdr->rssi >> 7) & 1;
 		rx_status.mactime = le64_to_cpu(hdr->mac_time);
 	}
 
-	if (quality > 100)
-		quality = 100;
-	rx_status.qual = quality;
-	priv->quality = quality;
 	rx_status.signal = signal;
 	priv->signal = signal;
 	rate = (flags >> 20) & 0xF;
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 88060e117541..785e0244e305 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -1,6 +1,6 @@
 menuconfig WL12XX
 	tristate "TI wl12xx driver support"
-	depends on MAC80211 && WLAN_80211 && EXPERIMENTAL
+	depends on MAC80211 && EXPERIMENTAL
 	---help---
 	  This will enable TI wl12xx driver support. The drivers make
 	  use of the mac80211 stack.
@@ -42,6 +42,7 @@ config WL1251_SDIO
 config WL1271
 	tristate "TI wl1271 support"
 	depends on WL12XX && SPI_MASTER && GENERIC_HARDIRQS
+	depends on INET
 	select FW_LOADER
 	select CRC7
 	---help---
diff --git a/drivers/net/wireless/wl12xx/wl1251.h b/drivers/net/wireless/wl12xx/wl1251.h
index 998e4b6252bd..054533f7a124 100644
--- a/drivers/net/wireless/wl12xx/wl1251.h
+++ b/drivers/net/wireless/wl12xx/wl1251.h
@@ -269,6 +269,7 @@ struct wl1251 {
 
 	void (*set_power)(bool enable);
 	int irq;
+	bool use_eeprom;
 
 	enum wl1251_state state;
 	struct mutex mutex;
@@ -354,6 +355,8 @@ struct wl1251 {
 	/* is firmware in elp mode */
 	bool elp;
 
+	struct delayed_work elp_work;
+
 	/* we can be in psm, but not in elp, we have to differentiate */
 	bool psm;
 
@@ -374,6 +377,8 @@ struct wl1251 {
 	u8 buffer_busyword[WL1251_BUSY_WORD_LEN];
 	struct wl1251_rx_descriptor *rx_descriptor;
 
+	struct ieee80211_vif *vif;
+
 	u32 chip_id;
 	char fw_ver[21];
 };
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.c b/drivers/net/wireless/wl12xx/wl1251_acx.c
index 10b26c4532c9..acfa086dbfc5 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_acx.c
@@ -494,7 +494,7 @@ out:
 	return ret;
 }
 
-int wl1251_acx_beacon_filter_opt(struct wl1251 *wl)
+int wl1251_acx_beacon_filter_opt(struct wl1251 *wl, bool enable_filter)
 {
 	struct acx_beacon_filter_option *beacon_filter;
 	int ret;
@@ -507,7 +507,7 @@ int wl1251_acx_beacon_filter_opt(struct wl1251 *wl)
 		goto out;
 	}
 
-	beacon_filter->enable = 0;
+	beacon_filter->enable = enable_filter;
 	beacon_filter->max_num_beacons = 0;
 
 	ret = wl1251_cmd_configure(wl, ACX_BEACON_FILTER_OPT,
@@ -525,6 +525,7 @@ out:
 int wl1251_acx_beacon_filter_table(struct wl1251 *wl)
 {
 	struct acx_beacon_filter_ie_table *ie_table;
+	int idx = 0;
 	int ret;
 
 	wl1251_debug(DEBUG_ACX, "acx beacon filter table");
@@ -535,8 +536,10 @@ int wl1251_acx_beacon_filter_table(struct wl1251 *wl)
 		goto out;
 	}
 
-	ie_table->num_ie = 0;
-	memset(ie_table->table, 0, BEACON_FILTER_TABLE_MAX_SIZE);
+	/* configure default beacon pass-through rules */
+	ie_table->num_ie = 1;
+	ie_table->table[idx++] = BEACON_FILTER_IE_ID_CHANNEL_SWITCH_ANN;
+	ie_table->table[idx++] = BEACON_RULE_PASS_ON_APPEARANCE;
 
 	ret = wl1251_cmd_configure(wl, ACX_BEACON_FILTER_TABLE,
 				   ie_table, sizeof(*ie_table));
@@ -550,6 +553,35 @@ out:
 	return ret;
 }
 
+int wl1251_acx_conn_monit_params(struct wl1251 *wl)
+{
+	struct acx_conn_monit_params *acx;
+	int ret;
+
+	wl1251_debug(DEBUG_ACX, "acx connection monitor parameters");
+
+	acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+	if (!acx) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	acx->synch_fail_thold = SYNCH_FAIL_DEFAULT_THRESHOLD;
+	acx->bss_lose_timeout = NO_BEACON_DEFAULT_TIMEOUT;
+
+	ret = wl1251_cmd_configure(wl, ACX_CONN_MONIT_PARAMS,
+				   acx, sizeof(*acx));
+	if (ret < 0) {
+		wl1251_warning("failed to set connection monitor "
+			       "parameters: %d", ret);
+		goto out;
+	}
+
+out:
+	kfree(acx);
+	return ret;
+}
+
 int wl1251_acx_sg_enable(struct wl1251 *wl)
 {
 	struct acx_bt_wlan_coex *pta;
@@ -916,3 +948,31 @@ out:
 	kfree(mem_conf);
 	return ret;
 }
+
+int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim)
+{
+	struct wl1251_acx_wr_tbtt_and_dtim *acx;
+	int ret;
+
+	wl1251_debug(DEBUG_ACX, "acx tbtt and dtim");
+
+	acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+	if (!acx) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	acx->tbtt = tbtt;
+	acx->dtim = dtim;
+
+	ret = wl1251_cmd_configure(wl, ACX_WR_TBTT_AND_DTIM,
+				   acx, sizeof(*acx));
+	if (ret < 0) {
+		wl1251_warning("failed to set tbtt and dtim: %d", ret);
+		goto out;
+	}
+
+out:
+	kfree(acx);
+	return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.h b/drivers/net/wireless/wl12xx/wl1251_acx.h
index cafb91459504..652371432cd8 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_acx.h
@@ -450,6 +450,11 @@ struct acx_beacon_filter_option {
 			   (BEACON_FILTER_TABLE_MAX_VENDOR_SPECIFIC_IE_NUM * \
 			    BEACON_FILTER_TABLE_EXTRA_VENDOR_SPECIFIC_IE_SIZE))
 
+#define BEACON_RULE_PASS_ON_CHANGE                     BIT(0)
+#define BEACON_RULE_PASS_ON_APPEARANCE                 BIT(1)
+
+#define BEACON_FILTER_IE_ID_CHANNEL_SWITCH_ANN         (37)
+
 struct acx_beacon_filter_ie_table {
 	struct acx_header header;
 
@@ -458,6 +463,16 @@ struct acx_beacon_filter_ie_table {
 	u8 pad[3];
 } __attribute__ ((packed));
 
+#define SYNCH_FAIL_DEFAULT_THRESHOLD    10     /* number of beacons */
+#define NO_BEACON_DEFAULT_TIMEOUT       (500) /* in microseconds */
+
+struct acx_conn_monit_params {
+	struct acx_header header;
+
+	u32 synch_fail_thold; /* number of beacons missed */
+	u32 bss_lose_timeout; /* number of TU's from synch fail */
+};
+
 enum {
 	SG_ENABLE = 0,
 	SG_DISABLE,
@@ -1134,6 +1149,23 @@ struct wl1251_acx_mem_map {
 	u32 num_rx_mem_blocks;
 } __attribute__ ((packed));
 
+
+struct wl1251_acx_wr_tbtt_and_dtim {
+
+	struct acx_header header;
+
+	/* Time in TUs between two consecutive beacons */
+	u16 tbtt;
+
+	/*
+	 * DTIM period
+	 * For BSS: Number of TBTTs in a DTIM period (range: 1-10)
+	 * For IBSS: value shall be set to 1
+	*/
+	u8  dtim;
+	u8  padding;
+} __attribute__ ((packed));
+
 /*************************************************************************
 
     Host Interrupt Register (WiLink -> Host)
@@ -1273,8 +1305,9 @@ int wl1251_acx_slot(struct wl1251 *wl, enum acx_slot_type slot_time);
 int wl1251_acx_group_address_tbl(struct wl1251 *wl);
 int wl1251_acx_service_period_timeout(struct wl1251 *wl);
 int wl1251_acx_rts_threshold(struct wl1251 *wl, u16 rts_threshold);
-int wl1251_acx_beacon_filter_opt(struct wl1251 *wl);
+int wl1251_acx_beacon_filter_opt(struct wl1251 *wl, bool enable_filter);
 int wl1251_acx_beacon_filter_table(struct wl1251 *wl);
+int wl1251_acx_conn_monit_params(struct wl1251 *wl);
 int wl1251_acx_sg_enable(struct wl1251 *wl);
 int wl1251_acx_sg_cfg(struct wl1251 *wl);
 int wl1251_acx_cca_threshold(struct wl1251 *wl);
@@ -1288,5 +1321,6 @@ int wl1251_acx_statistics(struct wl1251 *wl, struct acx_statistics *stats);
 int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime);
 int wl1251_acx_rate_policies(struct wl1251 *wl);
 int wl1251_acx_mem_cfg(struct wl1251 *wl);
+int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim);
 
 #endif /* __WL1251_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1251_boot.c b/drivers/net/wireless/wl12xx/wl1251_boot.c
index 452d748e42c6..2e733e7bdfd4 100644
--- a/drivers/net/wireless/wl12xx/wl1251_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1251_boot.c
@@ -296,8 +296,12 @@ int wl1251_boot_run_firmware(struct wl1251 *wl)
 		WL1251_ACX_INTR_INIT_COMPLETE;
 	wl1251_boot_target_enable_interrupts(wl);
 
-	/* unmask all mbox events  */
-	wl->event_mask = 0xffffffff;
+	wl->event_mask = SCAN_COMPLETE_EVENT_ID | BSS_LOSE_EVENT_ID |
+		SYNCHRONIZATION_TIMEOUT_EVENT_ID |
+		ROAMING_TRIGGER_LOW_RSSI_EVENT_ID |
+		ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID |
+		REGAINED_BSS_EVENT_ID | BT_PTA_SENSE_EVENT_ID |
+		BT_PTA_PREDICTION_EVENT_ID;
 
 	ret = wl1251_event_unmask(wl);
 	if (ret < 0) {
@@ -314,8 +318,8 @@ int wl1251_boot_run_firmware(struct wl1251 *wl)
 static int wl1251_boot_upload_firmware(struct wl1251 *wl)
 {
 	int addr, chunk_num, partition_limit;
-	size_t fw_data_len;
-	u8 *p;
+	size_t fw_data_len, len;
+	u8 *p, *buf;
 
 	/* whal_FwCtrl_LoadFwImageSm() */
 
@@ -334,6 +338,12 @@ static int wl1251_boot_upload_firmware(struct wl1251 *wl)
 		return -EIO;
 	}
 
+	buf = kmalloc(CHUNK_SIZE, GFP_KERNEL);
+	if (!buf) {
+		wl1251_error("allocation for firmware upload chunk failed");
+		return -ENOMEM;
+	}
+
 	wl1251_set_partition(wl, WL1251_PART_DOWN_MEM_START,
 			     WL1251_PART_DOWN_MEM_SIZE,
 			     WL1251_PART_DOWN_REG_START,
@@ -364,7 +374,11 @@ static int wl1251_boot_upload_firmware(struct wl1251 *wl)
 		p = wl->fw + FW_HDR_SIZE + chunk_num * CHUNK_SIZE;
 		wl1251_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x",
 			     p, addr);
-		wl1251_mem_write(wl, addr, p, CHUNK_SIZE);
+
+		/* need to copy the chunk for dma */
+		len = CHUNK_SIZE;
+		memcpy(buf, p, len);
+		wl1251_mem_write(wl, addr, buf, len);
 
 		chunk_num++;
 	}
@@ -372,9 +386,16 @@ static int wl1251_boot_upload_firmware(struct wl1251 *wl)
 	/* 10.4 upload the last chunk */
 	addr = WL1251_PART_DOWN_MEM_START + chunk_num * CHUNK_SIZE;
 	p = wl->fw + FW_HDR_SIZE + chunk_num * CHUNK_SIZE;
+
+	/* need to copy the chunk for dma */
+	len = fw_data_len % CHUNK_SIZE;
+	memcpy(buf, p, len);
+
 	wl1251_debug(DEBUG_BOOT, "uploading fw last chunk (%zu B) 0x%p to 0x%x",
-		     fw_data_len % CHUNK_SIZE, p, addr);
-	wl1251_mem_write(wl, addr, p, fw_data_len % CHUNK_SIZE);
+		     len, p, addr);
+	wl1251_mem_write(wl, addr, buf, len);
+
+	kfree(buf);
 
 	return 0;
 }
@@ -473,13 +494,19 @@ int wl1251_boot(struct wl1251 *wl)
 		goto out;
 
 	/* 2. start processing NVS file */
-	ret = wl1251_boot_upload_nvs(wl);
-	if (ret < 0)
-		goto out;
-
-	/* write firmware's last address (ie. it's length) to
-	 * ACX_EEPROMLESS_IND_REG */
-	wl1251_reg_write32(wl, ACX_EEPROMLESS_IND_REG, wl->fw_len);
+	if (wl->use_eeprom) {
+		wl1251_reg_write32(wl, ACX_REG_EE_START, START_EEPROM_MGR);
+		msleep(4000);
+		wl1251_reg_write32(wl, ACX_EEPROMLESS_IND_REG, USE_EEPROM);
+	} else {
+		ret = wl1251_boot_upload_nvs(wl);
+		if (ret < 0)
+			goto out;
+
+		/* write firmware's last address (ie. it's length) to
+		 * ACX_EEPROMLESS_IND_REG */
+		wl1251_reg_write32(wl, ACX_EEPROMLESS_IND_REG, wl->fw_len);
+	}
 
 	/* 6. read the EEPROM parameters */
 	tmp = wl1251_reg_read32(wl, SCR_PAD2);
diff --git a/drivers/net/wireless/wl12xx/wl1251_event.c b/drivers/net/wireless/wl12xx/wl1251_event.c
index 00076c4a8a21..020d764f9c13 100644
--- a/drivers/net/wireless/wl12xx/wl1251_event.c
+++ b/drivers/net/wireless/wl12xx/wl1251_event.c
@@ -79,6 +79,21 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
 		}
 	}
 
+	if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID && wl->psm) {
+		wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT");
+
+		/* indicate to the stack, that beacons have been lost */
+		ieee80211_beacon_loss(wl->vif);
+	}
+
+	if (vector & REGAINED_BSS_EVENT_ID) {
+		if (wl->psm_requested) {
+			ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
+			if (ret < 0)
+				return ret;
+		}
+	}
+
 	return 0;
 }
 
diff --git a/drivers/net/wireless/wl12xx/wl1251_init.c b/drivers/net/wireless/wl12xx/wl1251_init.c
index b2ee4f468fc4..5cb573383eeb 100644
--- a/drivers/net/wireless/wl12xx/wl1251_init.c
+++ b/drivers/net/wireless/wl12xx/wl1251_init.c
@@ -147,7 +147,8 @@ int wl1251_hw_init_beacon_filter(struct wl1251 *wl)
 {
 	int ret;
 
-	ret = wl1251_acx_beacon_filter_opt(wl);
+	/* disable beacon filtering at this stage */
+	ret = wl1251_acx_beacon_filter_opt(wl, false);
 	if (ret < 0)
 		return ret;
 
@@ -364,6 +365,11 @@ int wl1251_hw_init(struct wl1251 *wl)
 	if (ret < 0)
 		goto out_free_data_path;
 
+	/* Initialize connection monitoring thresholds */
+	ret = wl1251_acx_conn_monit_params(wl);
+	if (ret < 0)
+		goto out_free_data_path;
+
 	/* Beacon filtering */
 	ret = wl1251_hw_init_beacon_filter(wl);
 	if (ret < 0)
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index 1103256ad989..ff4be7bf5d36 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -28,6 +28,7 @@
 #include <linux/irq.h>
 #include <linux/crc32.h>
 #include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
 
 #include "wl1251.h"
 #include "wl12xx_80211.h"
@@ -83,7 +84,7 @@ static int wl1251_fetch_firmware(struct wl1251 *wl)
 	}
 
 	wl->fw_len = fw->size;
-	wl->fw = kmalloc(wl->fw_len, GFP_KERNEL);
+	wl->fw = vmalloc(wl->fw_len);
 
 	if (!wl->fw) {
 		wl1251_error("could not allocate memory for the firmware");
@@ -183,8 +184,11 @@ static int wl1251_chip_wakeup(struct wl1251 *wl)
 		wl1251_debug(DEBUG_BOOT, "chip id 0x%x (1251 PG12)",
 			     wl->chip_id);
 		break;
-	case CHIP_ID_1251_PG10:
 	case CHIP_ID_1251_PG11:
+		wl1251_debug(DEBUG_BOOT, "chip id 0x%x (1251 PG11)",
+			     wl->chip_id);
+		break;
+	case CHIP_ID_1251_PG10:
 	default:
 		wl1251_error("unsupported chip id: 0x%x", wl->chip_id);
 		ret = -ENODEV;
@@ -208,9 +212,10 @@ out:
 	return ret;
 }
 
+#define WL1251_IRQ_LOOP_COUNT 10
 static void wl1251_irq_work(struct work_struct *work)
 {
-	u32 intr;
+	u32 intr, ctr = WL1251_IRQ_LOOP_COUNT;
 	struct wl1251 *wl =
 		container_of(work, struct wl1251, irq_work);
 	int ret;
@@ -231,78 +236,86 @@ static void wl1251_irq_work(struct work_struct *work)
 	intr = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_CLEAR);
 	wl1251_debug(DEBUG_IRQ, "intr: 0x%x", intr);
 
-	if (wl->data_path) {
-		wl->rx_counter =
-			wl1251_mem_read32(wl, wl->data_path->rx_control_addr);
-
-		/* We handle a frmware bug here */
-		switch ((wl->rx_counter - wl->rx_handled) & 0xf) {
-		case 0:
-			wl1251_debug(DEBUG_IRQ, "RX: FW and host in sync");
-			intr &= ~WL1251_ACX_INTR_RX0_DATA;
-			intr &= ~WL1251_ACX_INTR_RX1_DATA;
-			break;
-		case 1:
-			wl1251_debug(DEBUG_IRQ, "RX: FW +1");
-			intr |= WL1251_ACX_INTR_RX0_DATA;
-			intr &= ~WL1251_ACX_INTR_RX1_DATA;
-			break;
-		case 2:
-			wl1251_debug(DEBUG_IRQ, "RX: FW +2");
-			intr |= WL1251_ACX_INTR_RX0_DATA;
-			intr |= WL1251_ACX_INTR_RX1_DATA;
-			break;
-		default:
-			wl1251_warning("RX: FW and host out of sync: %d",
-				       wl->rx_counter - wl->rx_handled);
-			break;
-		}
-
-		wl->rx_handled = wl->rx_counter;
+	do {
+		if (wl->data_path) {
+			wl->rx_counter = wl1251_mem_read32(
+				wl, wl->data_path->rx_control_addr);
+
+			/* We handle a frmware bug here */
+			switch ((wl->rx_counter - wl->rx_handled) & 0xf) {
+			case 0:
+				wl1251_debug(DEBUG_IRQ,
+					     "RX: FW and host in sync");
+				intr &= ~WL1251_ACX_INTR_RX0_DATA;
+				intr &= ~WL1251_ACX_INTR_RX1_DATA;
+				break;
+			case 1:
+				wl1251_debug(DEBUG_IRQ, "RX: FW +1");
+				intr |= WL1251_ACX_INTR_RX0_DATA;
+				intr &= ~WL1251_ACX_INTR_RX1_DATA;
+				break;
+			case 2:
+				wl1251_debug(DEBUG_IRQ, "RX: FW +2");
+				intr |= WL1251_ACX_INTR_RX0_DATA;
+				intr |= WL1251_ACX_INTR_RX1_DATA;
+				break;
+			default:
+				wl1251_warning(
+					"RX: FW and host out of sync: %d",
+					wl->rx_counter - wl->rx_handled);
+				break;
+			}
 
+			wl->rx_handled = wl->rx_counter;
 
-		wl1251_debug(DEBUG_IRQ, "RX counter: %d", wl->rx_counter);
-	}
+			wl1251_debug(DEBUG_IRQ, "RX counter: %d",
+				     wl->rx_counter);
+		}
 
-	intr &= wl->intr_mask;
+		intr &= wl->intr_mask;
 
-	if (intr == 0) {
-		wl1251_debug(DEBUG_IRQ, "INTR is 0");
-		wl1251_reg_write32(wl, ACX_REG_INTERRUPT_MASK,
-				   ~(wl->intr_mask));
+		if (intr == 0) {
+			wl1251_debug(DEBUG_IRQ, "INTR is 0");
+			goto out_sleep;
+		}
 
-		goto out_sleep;
-	}
+		if (intr & WL1251_ACX_INTR_RX0_DATA) {
+			wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_RX0_DATA");
+			wl1251_rx(wl);
+		}
 
-	if (intr & WL1251_ACX_INTR_RX0_DATA) {
-		wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_RX0_DATA");
-		wl1251_rx(wl);
-	}
+		if (intr & WL1251_ACX_INTR_RX1_DATA) {
+			wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_RX1_DATA");
+			wl1251_rx(wl);
+		}
 
-	if (intr & WL1251_ACX_INTR_RX1_DATA) {
-		wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_RX1_DATA");
-		wl1251_rx(wl);
-	}
+		if (intr & WL1251_ACX_INTR_TX_RESULT) {
+			wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_TX_RESULT");
+			wl1251_tx_complete(wl);
+		}
 
-	if (intr & WL1251_ACX_INTR_TX_RESULT) {
-		wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_TX_RESULT");
-		wl1251_tx_complete(wl);
-	}
+		if (intr & (WL1251_ACX_INTR_EVENT_A |
+			    WL1251_ACX_INTR_EVENT_B)) {
+			wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_EVENT (0x%x)",
+				     intr);
+			if (intr & WL1251_ACX_INTR_EVENT_A)
+				wl1251_event_handle(wl, 0);
+			else
+				wl1251_event_handle(wl, 1);
+		}
 
-	if (intr & (WL1251_ACX_INTR_EVENT_A | WL1251_ACX_INTR_EVENT_B)) {
-		wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_EVENT (0x%x)", intr);
-		if (intr & WL1251_ACX_INTR_EVENT_A)
-			wl1251_event_handle(wl, 0);
-		else
-			wl1251_event_handle(wl, 1);
-	}
+		if (intr & WL1251_ACX_INTR_INIT_COMPLETE)
+			wl1251_debug(DEBUG_IRQ,
+				     "WL1251_ACX_INTR_INIT_COMPLETE");
 
-	if (intr & WL1251_ACX_INTR_INIT_COMPLETE)
-		wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_INIT_COMPLETE");
+		if (--ctr == 0)
+			break;
 
-	wl1251_reg_write32(wl, ACX_REG_INTERRUPT_MASK, ~(wl->intr_mask));
+		intr = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_CLEAR);
+	} while (intr);
 
 out_sleep:
+	wl1251_reg_write32(wl, ACX_REG_INTERRUPT_MASK, ~(wl->intr_mask));
 	wl1251_ps_elp_sleep(wl);
 
 out:
@@ -506,6 +519,12 @@ static int wl1251_op_add_interface(struct ieee80211_hw *hw,
 		     conf->type, conf->mac_addr);
 
 	mutex_lock(&wl->mutex);
+	if (wl->vif) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	wl->vif = conf->vif;
 
 	switch (conf->type) {
 	case NL80211_IFTYPE_STATION:
@@ -535,7 +554,12 @@ out:
 static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
 					 struct ieee80211_if_init_conf *conf)
 {
+	struct wl1251 *wl = hw->priv;
+
+	mutex_lock(&wl->mutex);
 	wl1251_debug(DEBUG_MAC80211, "mac80211 remove interface");
+	wl->vif = NULL;
+	mutex_unlock(&wl->mutex);
 }
 
 static int wl1251_build_null_data(struct wl1251 *wl)
@@ -552,7 +576,8 @@ static int wl1251_build_null_data(struct wl1251 *wl)
 
 	memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
 	template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA |
-						IEEE80211_STYPE_NULLFUNC);
+						IEEE80211_STYPE_NULLFUNC |
+						IEEE80211_FCTL_TODS);
 
 	return wl1251_cmd_template_set(wl, CMD_NULL_DATA, &template,
 				       sizeof(template));
@@ -565,7 +590,10 @@ static int wl1251_build_ps_poll(struct wl1251 *wl, u16 aid)
 
 	memcpy(template.bssid, wl->bssid, ETH_ALEN);
 	memcpy(template.ta, wl->mac_addr, ETH_ALEN);
-	template.aid = aid;
+
+	/* aid in PS-Poll has its two MSBs each set to 1 */
+	template.aid = cpu_to_le16(1 << 15 | 1 << 14 | aid);
+
 	template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
 
 	return wl1251_cmd_template_set(wl, CMD_PS_POLL, &template,
@@ -1087,8 +1115,8 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
 			wl->beacon_int = bss_conf->beacon_int;
 			wl->dtim_period = bss_conf->dtim_period;
 
-			/* FIXME: call join */
-
+			ret = wl1251_acx_wr_tbtt_and_dtim(wl, wl->beacon_int,
+							  wl->dtim_period);
 			wl->aid = bss_conf->aid;
 
 			ret = wl1251_build_ps_poll(wl, wl->aid);
@@ -1308,7 +1336,9 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
 	wl->hw->channel_change_time = 10000;
 
 	wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
-		IEEE80211_HW_NOISE_DBM;
+		IEEE80211_HW_NOISE_DBM |
+		IEEE80211_HW_SUPPORTS_PS |
+		IEEE80211_HW_BEACON_FILTER;
 
 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
 	wl->hw->wiphy->max_scan_ssids = 1;
@@ -1351,6 +1381,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
 	skb_queue_head_init(&wl->tx_queue);
 
 	INIT_WORK(&wl->filter_work, wl1251_filter_work);
+	INIT_DELAYED_WORK(&wl->elp_work, wl1251_elp_work);
 	wl->channel = WL1251_DEFAULT_CHANNEL;
 	wl->scanning = false;
 	wl->default_key = 0;
@@ -1368,6 +1399,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
 	wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
 	wl->beacon_int = WL1251_DEFAULT_BEACON_INT;
 	wl->dtim_period = WL1251_DEFAULT_DTIM_PERIOD;
+	wl->vif = NULL;
 
 	for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
 		wl->tx_frames[i] = NULL;
@@ -1409,7 +1441,7 @@ int wl1251_free_hw(struct wl1251 *wl)
 
 	kfree(wl->target_mem_map);
 	kfree(wl->data_path);
-	kfree(wl->fw);
+	vfree(wl->fw);
 	wl->fw = NULL;
 	kfree(wl->nvs);
 	wl->nvs = NULL;
@@ -1426,4 +1458,5 @@ EXPORT_SYMBOL_GPL(wl1251_free_hw);
 MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>");
-MODULE_ALIAS("spi:wl12xx");
+MODULE_ALIAS("spi:wl1251");
+MODULE_FIRMWARE(WL1251_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/wl1251_netlink.h b/drivers/net/wireless/wl12xx/wl1251_netlink.h
deleted file mode 100644
index ee36695e134e..000000000000
--- a/drivers/net/wireless/wl12xx/wl1251_netlink.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * This file is part of wl1251
- *
- * Copyright (C) 2009 Nokia Corporation
- *
- * Contact: Kalle Valo <kalle.valo@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#ifndef __WL1251_NETLINK_H__
-#define __WL1251_NETLINK_H__
-
-int wl1251_nl_register(void);
-void wl1251_nl_unregister(void);
-
-#endif /* __WL1251_NETLINK_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1251_ps.c b/drivers/net/wireless/wl12xx/wl1251_ps.c
index c53e28727ed4..9931b197ff77 100644
--- a/drivers/net/wireless/wl12xx/wl1251_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1251_ps.c
@@ -28,17 +28,41 @@
 
 #define WL1251_WAKEUP_TIMEOUT 2000
 
-/* Routines to toggle sleep mode while in ELP */
-void wl1251_ps_elp_sleep(struct wl1251 *wl)
+void wl1251_elp_work(struct work_struct *work)
 {
+	struct delayed_work *dwork;
+	struct wl1251 *wl;
+
+	dwork = container_of(work, struct delayed_work, work);
+	wl = container_of(dwork, struct wl1251, elp_work);
+
+	wl1251_debug(DEBUG_PSM, "elp work");
+
+	mutex_lock(&wl->mutex);
+
 	if (wl->elp || !wl->psm)
-		return;
+		goto out;
 
 	wl1251_debug(DEBUG_PSM, "chip to elp");
-
 	wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
-
 	wl->elp = true;
+
+out:
+	mutex_unlock(&wl->mutex);
+}
+
+#define ELP_ENTRY_DELAY  5
+
+/* Routines to toggle sleep mode while in ELP */
+void wl1251_ps_elp_sleep(struct wl1251 *wl)
+{
+	unsigned long delay;
+
+	if (wl->psm) {
+		cancel_delayed_work(&wl->elp_work);
+		delay = msecs_to_jiffies(ELP_ENTRY_DELAY);
+		ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, delay);
+	}
 }
 
 int wl1251_ps_elp_wakeup(struct wl1251 *wl)
@@ -119,6 +143,11 @@ int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode)
 	case STATION_POWER_SAVE_MODE:
 		wl1251_debug(DEBUG_PSM, "entering psm");
 
+		/* enable beacon filtering */
+		ret = wl1251_acx_beacon_filter_opt(wl, true);
+		if (ret < 0)
+			return ret;
+
 		ret = wl1251_acx_wake_up_conditions(wl,
 						    WAKE_UP_EVENT_DTIM_BITMAP,
 						    wl->listen_int);
@@ -142,6 +171,11 @@ int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode)
 		if (ret < 0)
 			return ret;
 
+		/* disable beacon filtering */
+		ret = wl1251_acx_beacon_filter_opt(wl, false);
+		if (ret < 0)
+			return ret;
+
 		ret = wl1251_acx_wake_up_conditions(wl,
 						    WAKE_UP_EVENT_DTIM_BITMAP,
 						    wl->listen_int);
diff --git a/drivers/net/wireless/wl12xx/wl1251_ps.h b/drivers/net/wireless/wl12xx/wl1251_ps.h
index db036fe12f25..c688ac57aee4 100644
--- a/drivers/net/wireless/wl12xx/wl1251_ps.h
+++ b/drivers/net/wireless/wl12xx/wl1251_ps.h
@@ -31,6 +31,7 @@
 int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode);
 void wl1251_ps_elp_sleep(struct wl1251 *wl);
 int wl1251_ps_elp_wakeup(struct wl1251 *wl);
+void wl1251_elp_work(struct work_struct *work);
 
 
 #endif /* __WL1251_PS_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1251_reg.h b/drivers/net/wireless/wl12xx/wl1251_reg.h
index 06e1bd94a739..0ca3b4326056 100644
--- a/drivers/net/wireless/wl12xx/wl1251_reg.h
+++ b/drivers/net/wireless/wl12xx/wl1251_reg.h
@@ -370,6 +370,7 @@ enum wl12xx_acx_int_reg {
  EEPROM location specified in the EE_ADDR register.
  The Wlan hardware hardware clears this bit automatically.
 *===============================================*/
+#define EE_CTL                              (REGISTERS_BASE + 0x2000)
 #define ACX_EE_CTL_REG                      EE_CTL
 #define EE_WRITE                            0x00000001ul
 #define EE_READ                             0x00000002ul
@@ -380,6 +381,7 @@ enum wl12xx_acx_int_reg {
   This register specifies the address
   within the EEPROM from/to which to read/write data.
   ===============================================*/
+#define EE_ADDR                             (REGISTERS_BASE + 0x2008)
 #define ACX_EE_ADDR_REG                     EE_ADDR
 
 /*===============================================
@@ -389,8 +391,12 @@ enum wl12xx_acx_int_reg {
   data from the EEPROM or the write data
   to be written to the EEPROM.
   ===============================================*/
+#define EE_DATA                             (REGISTERS_BASE + 0x2004)
 #define ACX_EE_DATA_REG                     EE_DATA
 
+#define EEPROM_ACCESS_TO                    10000   /* timeout counter */
+#define START_EEPROM_MGR                    0x00000001
+
 /*===============================================
   EEPROM Base Address  - 32bit RW
   ------------------------------------------
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.c b/drivers/net/wireless/wl12xx/wl1251_rx.c
index 17c54b59ef86..f84cc89cbffc 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_rx.c
@@ -72,10 +72,6 @@ static void wl1251_rx_status(struct wl1251 *wl,
 	}
 
 	status->signal = desc->rssi;
-	status->qual = (desc->rssi - WL1251_RX_MIN_RSSI) * 100 /
-		(WL1251_RX_MAX_RSSI - WL1251_RX_MIN_RSSI);
-	status->qual = min(status->qual, 100);
-	status->qual = max(status->qual, 0);
 
 	/*
 	 * FIXME: guessing that snr needs to be divided by two, otherwise
@@ -153,7 +149,7 @@ static void wl1251_rx_body(struct wl1251 *wl,
 		     beacon ? "beacon" : "");
 
 	memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
-	ieee80211_rx(wl->hw, skb);
+	ieee80211_rx_ni(wl->hw, skb);
 }
 
 static void wl1251_rx_ack(struct wl1251 *wl)
diff --git a/drivers/net/wireless/wl12xx/wl1251_spi.c b/drivers/net/wireless/wl12xx/wl1251_spi.c
index 14eff2b3d4c6..9cc8c323830f 100644
--- a/drivers/net/wireless/wl12xx/wl1251_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1251_spi.c
@@ -270,6 +270,8 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi)
 		return -ENODEV;
 	}
 
+	wl->use_eeprom = pdata->use_eeprom;
+
 	ret = request_irq(wl->irq, wl1251_irq, 0, DRIVER_NAME, wl);
 	if (ret < 0) {
 		wl1251_error("request_irq() failed: %d", ret);
@@ -307,7 +309,7 @@ static int __devexit wl1251_spi_remove(struct spi_device *spi)
 
 static struct spi_driver wl1251_spi_driver = {
 	.driver = {
-		.name		= "wl12xx",
+		.name		= "wl1251",
 		.bus		= &spi_bus_type,
 		.owner		= THIS_MODULE,
 	},
diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl1271.h
index 55818f94017b..94359b1a861f 100644
--- a/drivers/net/wireless/wl12xx/wl1271.h
+++ b/drivers/net/wireless/wl12xx/wl1271.h
@@ -32,6 +32,8 @@
 #include <linux/bitops.h>
 #include <net/mac80211.h>
 
+#include "wl1271_conf.h"
+
 #define DRIVER_NAME "wl1271"
 #define DRIVER_PREFIX DRIVER_NAME ": "
 
@@ -97,21 +99,42 @@ enum {
 	} while (0)
 
 #define WL1271_DEFAULT_RX_CONFIG (CFG_UNI_FILTER_EN |	\
-				  CFG_BSSID_FILTER_EN)
+				  CFG_BSSID_FILTER_EN | \
+				  CFG_MC_FILTER_EN)
 
 #define WL1271_DEFAULT_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN |  \
 				  CFG_RX_MGMT_EN | CFG_RX_DATA_EN |   \
 				  CFG_RX_CTL_EN | CFG_RX_BCN_EN |     \
 				  CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
 
+#define WL1271_DEFAULT_BASIC_RATE_SET (CONF_TX_RATE_MASK_ALL)
+
 #define WL1271_FW_NAME "wl1271-fw.bin"
 #define WL1271_NVS_NAME "wl1271-nvs.bin"
 
-#define WL1271_BUSY_WORD_LEN 8
+/*
+ * Enable/disable 802.11a support for WL1273
+ */
+#undef WL1271_80211A_ENABLED
+
+/*
+ * FIXME: for the wl1271, a busy word count of 1 here will result in a more
+ * optimal SPI interface. There is some SPI bug however, causing RXS time outs
+ * with this mode occasionally on boot, so lets have three for now. A value of
+ * three should make sure, that the chipset will always be ready, though this
+ * will impact throughput and latencies slightly.
+ */
+#define WL1271_BUSY_WORD_CNT 3
+#define WL1271_BUSY_WORD_LEN (WL1271_BUSY_WORD_CNT * sizeof(u32))
 
 #define WL1271_ELP_HW_STATE_ASLEEP 0
 #define WL1271_ELP_HW_STATE_IRQ    1
 
+#define WL1271_DEFAULT_BEACON_INT  100
+#define WL1271_DEFAULT_DTIM_PERIOD 1
+
+#define ACX_TX_DESCRIPTORS         32
+
 enum wl1271_state {
 	WL1271_STATE_OFF,
 	WL1271_STATE_ON,
@@ -134,6 +157,8 @@ struct wl1271_partition {
 struct wl1271_partition_set {
 	struct wl1271_partition mem;
 	struct wl1271_partition reg;
+	struct wl1271_partition mem2;
+	struct wl1271_partition mem3;
 };
 
 struct wl1271;
@@ -258,15 +283,15 @@ struct wl1271_debugfs {
 
 /* FW status registers */
 struct wl1271_fw_status {
-	u32 intr;
+	__le32 intr;
 	u8  fw_rx_counter;
 	u8  drv_rx_counter;
 	u8  reserved;
 	u8  tx_results_counter;
-	u32 rx_pkt_descs[NUM_RX_PKT_DESC];
-	u32 tx_released_blks[NUM_TX_QUEUES];
-	u32 fw_localtime;
-	u32 padding[2];
+	__le32 rx_pkt_descs[NUM_RX_PKT_DESC];
+	__le32 tx_released_blks[NUM_TX_QUEUES];
+	__le32 fw_localtime;
+	__le32 padding[2];
 } __attribute__ ((packed));
 
 struct wl1271_rx_mem_pool_addr {
@@ -274,6 +299,15 @@ struct wl1271_rx_mem_pool_addr {
 	u32 addr_extra;
 };
 
+struct wl1271_scan {
+	u8 state;
+	u8 ssid[IW_ESSID_MAX_SIZE+1];
+	size_t ssid_len;
+	u8 active;
+	u8 high_prio;
+	u8 probe_requests;
+};
+
 struct wl1271 {
 	struct ieee80211_hw *hw;
 	bool mac80211_registered;
@@ -288,10 +322,7 @@ struct wl1271 {
 	enum wl1271_state state;
 	struct mutex mutex;
 
-	int physical_mem_addr;
-	int physical_reg_addr;
-	int virtual_mem_addr;
-	int virtual_reg_addr;
+	struct wl1271_partition_set part;
 
 	struct wl1271_chip chip;
 
@@ -308,7 +339,6 @@ struct wl1271 {
 	u8 bss_type;
 	u8 ssid[IW_ESSID_MAX_SIZE + 1];
 	u8 ssid_len;
-	u8 listen_int;
 	int channel;
 
 	struct wl1271_acx_mem_map *target_mem_map;
@@ -332,10 +362,14 @@ struct wl1271 {
 	bool tx_queue_stopped;
 
 	struct work_struct tx_work;
-	struct work_struct filter_work;
 
 	/* Pending TX frames */
-	struct sk_buff *tx_frames[16];
+	struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS];
+
+	/* Security sequence number counters */
+	u8 tx_security_last_seq;
+	u16 tx_security_seq_16;
+	u32 tx_security_seq_32;
 
 	/* FW Rx counter */
 	u32 rx_counter;
@@ -354,10 +388,17 @@ struct wl1271 {
 
 	/* Are we currently scanning */
 	bool scanning;
+	struct wl1271_scan scan;
 
 	/* Our association ID */
 	u16 aid;
 
+	/* currently configured rate set */
+	u32 basic_rate_set;
+
+	/* The current band */
+	enum ieee80211_band band;
+
 	/* Default key (for WEP) */
 	u32 default_key;
 
@@ -368,6 +409,7 @@ struct wl1271 {
 	bool elp;
 
 	struct completion *elp_compl;
+	struct delayed_work elp_work;
 
 	/* we can be in psm, but not in elp, we have to differentiate */
 	bool psm;
@@ -375,6 +417,9 @@ struct wl1271 {
 	/* PSM mode requested */
 	bool psm_requested;
 
+	/* retry counter for PSM entries */
+	u8 psm_entry_retry;
+
 	/* in dBm */
 	int power_level;
 
@@ -383,11 +428,20 @@ struct wl1271 {
 
 	u32 buffer_32;
 	u32 buffer_cmd;
-	u8 buffer_busyword[WL1271_BUSY_WORD_LEN];
-	struct wl1271_rx_descriptor *rx_descriptor;
+	u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
 
 	struct wl1271_fw_status *fw_status;
 	struct wl1271_tx_hw_res_if *tx_res_if;
+
+	struct ieee80211_vif *vif;
+
+	/* Used for a workaround to send disconnect before rejoining */
+	bool joined;
+
+	/* Current chipset configuration */
+	struct conf_drv_settings conf;
+
+	struct list_head list;
 };
 
 int wl1271_plt_start(struct wl1271 *wl);
@@ -404,4 +458,13 @@ int wl1271_plt_stop(struct wl1271 *wl);
 /* WL1271 needs a 200ms sleep after power on */
 #define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */
 
+static inline bool wl1271_11a_enabled(void)
+{
+#ifdef WL1271_80211A_ENABLED
+	return true;
+#else
+	return false;
+#endif
+}
+
 #endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/wl1271_acx.c
index f622a4092615..5cc89bbdac7a 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.c
@@ -34,8 +34,7 @@
 #include "wl1271_spi.h"
 #include "wl1271_ps.h"
 
-int wl1271_acx_wake_up_conditions(struct wl1271 *wl, u8 wake_up_event,
-				  u8 listen_interval)
+int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
 {
 	struct acx_wake_up_condition *wake_up;
 	int ret;
@@ -48,8 +47,8 @@ int wl1271_acx_wake_up_conditions(struct wl1271 *wl, u8 wake_up_event,
 		goto out;
 	}
 
-	wake_up->wake_up_event = wake_up_event;
-	wake_up->listen_interval = listen_interval;
+	wake_up->wake_up_event = wl->conf.conn.wake_up_event;
+	wake_up->listen_interval = wl->conf.conn.listen_interval;
 
 	ret = wl1271_cmd_configure(wl, ACX_WAKE_UP_CONDITIONS,
 				   wake_up, sizeof(*wake_up));
@@ -137,7 +136,12 @@ int wl1271_acx_tx_power(struct wl1271 *wl, int power)
 		goto out;
 	}
 
-	acx->current_tx_power = power * 10;
+	/*
+	 * FIXME: This is a workaround needed while we don't the correct
+	 * calibration, to avoid distortions
+	 */
+	/* acx->current_tx_power = power * 10; */
+	acx->current_tx_power = 120;
 
 	ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx));
 	if (ret < 0) {
@@ -193,7 +197,7 @@ int wl1271_acx_mem_map(struct wl1271 *wl, struct acx_header *mem_map,
 	return 0;
 }
 
-int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl, u32 life_time)
+int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl)
 {
 	struct acx_rx_msdu_lifetime *acx;
 	int ret;
@@ -206,7 +210,7 @@ int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl, u32 life_time)
 		goto out;
 	}
 
-	acx->lifetime = life_time;
+	acx->lifetime = cpu_to_le32(wl->conf.rx.rx_msdu_life_time);
 	ret = wl1271_cmd_configure(wl, DOT11_RX_MSDU_LIFE_TIME,
 				   acx, sizeof(*acx));
 	if (ret < 0) {
@@ -232,8 +236,8 @@ int wl1271_acx_rx_config(struct wl1271 *wl, u32 config, u32 filter)
 		goto out;
 	}
 
-	rx_config->config_options = config;
-	rx_config->filter_options = filter;
+	rx_config->config_options = cpu_to_le32(config);
+	rx_config->filter_options = cpu_to_le32(filter);
 
 	ret = wl1271_cmd_configure(wl, ACX_RX_CFG,
 				   rx_config, sizeof(*rx_config));
@@ -260,7 +264,7 @@ int wl1271_acx_pd_threshold(struct wl1271 *wl)
 		goto out;
 	}
 
-	/* FIXME: threshold value not set */
+	pd->threshold = cpu_to_le32(wl->conf.rx.packet_detection_threshold);
 
 	ret = wl1271_cmd_configure(wl, ACX_PD_THRESHOLD, pd, sizeof(*pd));
 	if (ret < 0) {
@@ -300,7 +304,8 @@ out:
 	return ret;
 }
 
-int wl1271_acx_group_address_tbl(struct wl1271 *wl)
+int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
+				 void *mc_list, u32 mc_list_len)
 {
 	struct acx_dot11_grp_addr_tbl *acx;
 	int ret;
@@ -314,9 +319,9 @@ int wl1271_acx_group_address_tbl(struct wl1271 *wl)
 	}
 
 	/* MAC filtering */
-	acx->enabled = 0;
-	acx->num_groups = 0;
-	memset(acx->mac_table, 0, ADDRESS_GROUP_MAX_LEN);
+	acx->enabled = enable;
+	acx->num_groups = mc_list_len;
+	memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN);
 
 	ret = wl1271_cmd_configure(wl, DOT11_GROUP_ADDRESS_TBL,
 				   acx, sizeof(*acx));
@@ -343,8 +348,8 @@ int wl1271_acx_service_period_timeout(struct wl1271 *wl)
 
 	wl1271_debug(DEBUG_ACX, "acx service period timeout");
 
-	rx_timeout->ps_poll_timeout = RX_TIMEOUT_PS_POLL_DEF;
-	rx_timeout->upsd_timeout = RX_TIMEOUT_UPSD_DEF;
+	rx_timeout->ps_poll_timeout = cpu_to_le16(wl->conf.rx.ps_poll_timeout);
+	rx_timeout->upsd_timeout = cpu_to_le16(wl->conf.rx.upsd_timeout);
 
 	ret = wl1271_cmd_configure(wl, ACX_SERVICE_PERIOD_TIMEOUT,
 				   rx_timeout, sizeof(*rx_timeout));
@@ -372,7 +377,7 @@ int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold)
 		goto out;
 	}
 
-	rts->threshold = rts_threshold;
+	rts->threshold = cpu_to_le16(rts_threshold);
 
 	ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts));
 	if (ret < 0) {
@@ -385,20 +390,29 @@ out:
 	return ret;
 }
 
-int wl1271_acx_beacon_filter_opt(struct wl1271 *wl)
+int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
 {
-	struct acx_beacon_filter_option *beacon_filter;
-	int ret;
+	struct acx_beacon_filter_option *beacon_filter = NULL;
+	int ret = 0;
 
 	wl1271_debug(DEBUG_ACX, "acx beacon filter opt");
 
+	if (enable_filter &&
+	    wl->conf.conn.bcn_filt_mode == CONF_BCN_FILT_MODE_DISABLED)
+		goto out;
+
 	beacon_filter = kzalloc(sizeof(*beacon_filter), GFP_KERNEL);
 	if (!beacon_filter) {
 		ret = -ENOMEM;
 		goto out;
 	}
 
-	beacon_filter->enable = 0;
+	beacon_filter->enable = enable_filter;
+
+	/*
+	 * When set to zero, and the filter is enabled, beacons
+	 * without the unicast TIM bit set are dropped.
+	 */
 	beacon_filter->max_num_beacons = 0;
 
 	ret = wl1271_cmd_configure(wl, ACX_BEACON_FILTER_OPT,
@@ -416,7 +430,9 @@ out:
 int wl1271_acx_beacon_filter_table(struct wl1271 *wl)
 {
 	struct acx_beacon_filter_ie_table *ie_table;
+	int i, idx = 0;
 	int ret;
+	bool vendor_spec = false;
 
 	wl1271_debug(DEBUG_ACX, "acx beacon filter table");
 
@@ -426,8 +442,32 @@ int wl1271_acx_beacon_filter_table(struct wl1271 *wl)
 		goto out;
 	}
 
+	/* configure default beacon pass-through rules */
 	ie_table->num_ie = 0;
-	memset(ie_table->table, 0, BEACON_FILTER_TABLE_MAX_SIZE);
+	for (i = 0; i < wl->conf.conn.bcn_filt_ie_count; i++) {
+		struct conf_bcn_filt_rule *r = &(wl->conf.conn.bcn_filt_ie[i]);
+		ie_table->table[idx++] = r->ie;
+		ie_table->table[idx++] = r->rule;
+
+		if (r->ie == WLAN_EID_VENDOR_SPECIFIC) {
+			/* only one vendor specific ie allowed */
+			if (vendor_spec)
+				continue;
+
+			/* for vendor specific rules configure the
+			   additional fields */
+			memcpy(&(ie_table->table[idx]), r->oui,
+			       CONF_BCN_IE_OUI_LEN);
+			idx += CONF_BCN_IE_OUI_LEN;
+			ie_table->table[idx++] = r->type;
+			memcpy(&(ie_table->table[idx]), r->version,
+			       CONF_BCN_IE_VER_LEN);
+			idx += CONF_BCN_IE_VER_LEN;
+			vendor_spec = true;
+		}
+
+		ie_table->num_ie++;
+	}
 
 	ret = wl1271_cmd_configure(wl, ACX_BEACON_FILTER_TABLE,
 				   ie_table, sizeof(*ie_table));
@@ -441,6 +481,36 @@ out:
 	return ret;
 }
 
+int wl1271_acx_conn_monit_params(struct wl1271 *wl)
+{
+	struct acx_conn_monit_params *acx;
+	int ret;
+
+	wl1271_debug(DEBUG_ACX, "acx connection monitor parameters");
+
+	acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+	if (!acx) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	acx->synch_fail_thold = cpu_to_le32(wl->conf.conn.synch_fail_thold);
+	acx->bss_lose_timeout = cpu_to_le32(wl->conf.conn.bss_lose_timeout);
+
+	ret = wl1271_cmd_configure(wl, ACX_CONN_MONIT_PARAMS,
+				   acx, sizeof(*acx));
+	if (ret < 0) {
+		wl1271_warning("failed to set connection monitor "
+			       "parameters: %d", ret);
+		goto out;
+	}
+
+out:
+	kfree(acx);
+	return ret;
+}
+
+
 int wl1271_acx_sg_enable(struct wl1271 *wl)
 {
 	struct acx_bt_wlan_coex *pta;
@@ -470,6 +540,7 @@ out:
 int wl1271_acx_sg_cfg(struct wl1271 *wl)
 {
 	struct acx_bt_wlan_coex_param *param;
+	struct conf_sg_settings *c = &wl->conf.sg;
 	int ret;
 
 	wl1271_debug(DEBUG_ACX, "acx sg cfg");
@@ -481,34 +552,19 @@ int wl1271_acx_sg_cfg(struct wl1271 *wl)
 	}
 
 	/* BT-WLAN coext parameters */
-	param->min_rate = RATE_INDEX_24MBPS;
-	param->bt_hp_max_time = PTA_BT_HP_MAXTIME_DEF;
-	param->wlan_hp_max_time = PTA_WLAN_HP_MAX_TIME_DEF;
-	param->sense_disable_timer = PTA_SENSE_DISABLE_TIMER_DEF;
-	param->rx_time_bt_hp = PTA_PROTECTIVE_RX_TIME_DEF;
-	param->tx_time_bt_hp = PTA_PROTECTIVE_TX_TIME_DEF;
-	param->rx_time_bt_hp_fast = PTA_PROTECTIVE_RX_TIME_FAST_DEF;
-	param->tx_time_bt_hp_fast = PTA_PROTECTIVE_TX_TIME_FAST_DEF;
-	param->wlan_cycle_fast = PTA_CYCLE_TIME_FAST_DEF;
-	param->bt_anti_starvation_period = PTA_ANTI_STARVE_PERIOD_DEF;
-	param->next_bt_lp_packet = PTA_TIMEOUT_NEXT_BT_LP_PACKET_DEF;
-	param->wake_up_beacon = PTA_TIME_BEFORE_BEACON_DEF;
-	param->hp_dm_max_guard_time = PTA_HPDM_MAX_TIME_DEF;
-	param->next_wlan_packet = PTA_TIME_OUT_NEXT_WLAN_DEF;
-	param->antenna_type = PTA_ANTENNA_TYPE_DEF;
-	param->signal_type = PTA_SIGNALING_TYPE_DEF;
-	param->afh_leverage_on = PTA_AFH_LEVERAGE_ON_DEF;
-	param->quiet_cycle_num = PTA_NUMBER_QUIET_CYCLE_DEF;
-	param->max_cts = PTA_MAX_NUM_CTS_DEF;
-	param->wlan_packets_num = PTA_NUMBER_OF_WLAN_PACKETS_DEF;
-	param->bt_packets_num = PTA_NUMBER_OF_BT_PACKETS_DEF;
-	param->missed_rx_avalanche = PTA_RX_FOR_AVALANCHE_DEF;
-	param->wlan_elp_hp = PTA_ELP_HP_DEF;
-	param->bt_anti_starvation_cycles = PTA_ANTI_STARVE_NUM_CYCLE_DEF;
-	param->ack_mode_dual_ant = PTA_ACK_MODE_DEF;
-	param->pa_sd_enable = PTA_ALLOW_PA_SD_DEF;
-	param->pta_auto_mode_enable = PTA_AUTO_MODE_NO_CTS_DEF;
-	param->bt_hp_respected_num = PTA_BT_HP_RESPECTED_DEF;
+	param->per_threshold = cpu_to_le32(c->per_threshold);
+	param->max_scan_compensation_time =
+		cpu_to_le32(c->max_scan_compensation_time);
+	param->nfs_sample_interval = cpu_to_le16(c->nfs_sample_interval);
+	param->load_ratio = c->load_ratio;
+	param->auto_ps_mode = c->auto_ps_mode;
+	param->probe_req_compensation = c->probe_req_compensation;
+	param->scan_window_compensation = c->scan_window_compensation;
+	param->antenna_config = c->antenna_config;
+	param->beacon_miss_threshold = c->beacon_miss_threshold;
+	param->rate_adaptation_threshold =
+		cpu_to_le32(c->rate_adaptation_threshold);
+	param->rate_adaptation_snr = c->rate_adaptation_snr;
 
 	ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param));
 	if (ret < 0) {
@@ -534,8 +590,8 @@ int wl1271_acx_cca_threshold(struct wl1271 *wl)
 		goto out;
 	}
 
-	detection->rx_cca_threshold = CCA_THRSH_DISABLE_ENERGY_D;
-	detection->tx_energy_detection = 0;
+	detection->rx_cca_threshold = cpu_to_le16(wl->conf.rx.rx_cca_threshold);
+	detection->tx_energy_detection = wl->conf.tx.tx_energy_detection;
 
 	ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD,
 				   detection, sizeof(*detection));
@@ -562,10 +618,10 @@ int wl1271_acx_bcn_dtim_options(struct wl1271 *wl)
 		goto out;
 	}
 
-	bb->beacon_rx_timeout = BCN_RX_TIMEOUT_DEF_VALUE;
-	bb->broadcast_timeout = BROADCAST_RX_TIMEOUT_DEF_VALUE;
-	bb->rx_broadcast_in_ps = RX_BROADCAST_IN_PS_DEF_VALUE;
-	bb->ps_poll_threshold = CONSECUTIVE_PS_POLL_FAILURE_DEF;
+	bb->beacon_rx_timeout = cpu_to_le16(wl->conf.conn.beacon_rx_timeout);
+	bb->broadcast_timeout = cpu_to_le16(wl->conf.conn.broadcast_timeout);
+	bb->rx_broadcast_in_ps = wl->conf.conn.rx_broadcast_in_ps;
+	bb->ps_poll_threshold = wl->conf.conn.ps_poll_threshold;
 
 	ret = wl1271_cmd_configure(wl, ACX_BCN_DTIM_OPTIONS, bb, sizeof(*bb));
 	if (ret < 0) {
@@ -591,7 +647,7 @@ int wl1271_acx_aid(struct wl1271 *wl, u16 aid)
 		goto out;
 	}
 
-	acx_aid->aid = aid;
+	acx_aid->aid = cpu_to_le16(aid);
 
 	ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid));
 	if (ret < 0) {
@@ -618,9 +674,8 @@ int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask)
 	}
 
 	/* high event mask is unused */
-	mask->high_event_mask = 0xffffffff;
-
-	mask->event_mask = event_mask;
+	mask->high_event_mask = cpu_to_le32(0xffffffff);
+	mask->event_mask = cpu_to_le32(event_mask);
 
 	ret = wl1271_cmd_configure(wl, ACX_EVENT_MBOX_MASK,
 				   mask, sizeof(*mask));
@@ -703,9 +758,10 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
 	return 0;
 }
 
-int wl1271_acx_rate_policies(struct wl1271 *wl)
+int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates)
 {
 	struct acx_rate_policy *acx;
+	struct conf_tx_rate_class *c = &wl->conf.tx.rc_conf;
 	int ret = 0;
 
 	wl1271_debug(DEBUG_ACX, "acx rate policies");
@@ -718,11 +774,11 @@ int wl1271_acx_rate_policies(struct wl1271 *wl)
 	}
 
 	/* configure one default (one-size-fits-all) rate class */
-	acx->rate_class_cnt = 1;
-	acx->rate_class[0].enabled_rates = ACX_RATE_MASK_ALL;
-	acx->rate_class[0].short_retry_limit = ACX_RATE_RETRY_LIMIT;
-	acx->rate_class[0].long_retry_limit = ACX_RATE_RETRY_LIMIT;
-	acx->rate_class[0].aflags = 0;
+	acx->rate_class_cnt = cpu_to_le32(1);
+	acx->rate_class[0].enabled_rates = cpu_to_le32(enabled_rates);
+	acx->rate_class[0].short_retry_limit = c->short_retry_limit;
+	acx->rate_class[0].long_retry_limit = c->long_retry_limit;
+	acx->rate_class[0].aflags = c->aflags;
 
 	ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
 	if (ret < 0) {
@@ -749,22 +805,14 @@ int wl1271_acx_ac_cfg(struct wl1271 *wl)
 		goto out;
 	}
 
-	/*
-	 * FIXME: Configure each AC with appropriate values (most suitable
-	 * values will probably be different for each AC.
-	 */
-	for (i = 0; i < WL1271_ACX_AC_COUNT; i++) {
-		acx->ac = i;
-
-		/*
-		 * FIXME: The following default values originate from
-		 * the TI reference driver. What do they mean?
-		 */
-		acx->cw_min = 15;
-		acx->cw_max = 63;
-		acx->aifsn = 3;
+	for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
+		struct conf_tx_ac_category *c = &(wl->conf.tx.ac_conf[i]);
+		acx->ac = c->ac;
+		acx->cw_min = c->cw_min;
+		acx->cw_max = cpu_to_le16(c->cw_max);
+		acx->aifsn = c->aifsn;
 		acx->reserved = 0;
-		acx->tx_op_limit = 0;
+		acx->tx_op_limit = cpu_to_le16(c->tx_op_limit);
 
 		ret = wl1271_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx));
 		if (ret < 0) {
@@ -793,12 +841,15 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl)
 		goto out;
 	}
 
-	/* FIXME: configure each TID with a different AC reference */
-	for (i = 0; i < WL1271_ACX_TID_COUNT; i++) {
-		acx->queue_id = i;
-		acx->tsid = WL1271_ACX_AC_BE;
-		acx->ps_scheme = WL1271_ACX_PS_SCHEME_LEGACY;
-		acx->ack_policy = WL1271_ACX_ACK_POLICY_LEGACY;
+	for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
+		struct conf_tx_tid *c = &(wl->conf.tx.tid_conf[i]);
+		acx->queue_id = c->queue_id;
+		acx->channel_type = c->channel_type;
+		acx->tsid = c->tsid;
+		acx->ps_scheme = c->ps_scheme;
+		acx->ack_policy = c->ack_policy;
+		acx->apsd_conf[0] = cpu_to_le32(c->apsd_conf[0]);
+		acx->apsd_conf[1] = cpu_to_le32(c->apsd_conf[1]);
 
 		ret = wl1271_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx));
 		if (ret < 0) {
@@ -826,7 +877,7 @@ int wl1271_acx_frag_threshold(struct wl1271 *wl)
 		goto out;
 	}
 
-	acx->frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
+	acx->frag_threshold = cpu_to_le16(wl->conf.tx.frag_threshold);
 	ret = wl1271_cmd_configure(wl, ACX_FRAG_CFG, acx, sizeof(*acx));
 	if (ret < 0) {
 		wl1271_warning("Setting of frag threshold failed: %d", ret);
@@ -852,8 +903,8 @@ int wl1271_acx_tx_config_options(struct wl1271 *wl)
 		goto out;
 	}
 
-	acx->tx_compl_timeout = WL1271_ACX_TX_COMPL_TIMEOUT;
-	acx->tx_compl_threshold = WL1271_ACX_TX_COMPL_THRESHOLD;
+	acx->tx_compl_timeout = cpu_to_le16(wl->conf.tx.tx_compl_timeout);
+	acx->tx_compl_threshold = cpu_to_le16(wl->conf.tx.tx_compl_threshold);
 	ret = wl1271_cmd_configure(wl, ACX_TX_CONFIG_OPT, acx, sizeof(*acx));
 	if (ret < 0) {
 		wl1271_warning("Setting of tx options failed: %d", ret);
@@ -879,11 +930,11 @@ int wl1271_acx_mem_cfg(struct wl1271 *wl)
 	}
 
 	/* memory config */
-	mem_conf->num_stations = cpu_to_le16(DEFAULT_NUM_STATIONS);
+	mem_conf->num_stations = DEFAULT_NUM_STATIONS;
 	mem_conf->rx_mem_block_num = ACX_RX_MEM_BLOCKS;
 	mem_conf->tx_min_mem_block_num = ACX_TX_MIN_MEM_BLOCKS;
 	mem_conf->num_ssid_profiles = ACX_NUM_SSID_PROFILES;
-	mem_conf->total_tx_descriptors = ACX_TX_DESCRIPTORS;
+	mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS);
 
 	ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf,
 				   sizeof(*mem_conf));
@@ -906,7 +957,7 @@ int wl1271_acx_init_mem_config(struct wl1271 *wl)
 		return ret;
 
 	wl->target_mem_map = kzalloc(sizeof(struct wl1271_acx_mem_map),
-					  GFP_KERNEL);
+				     GFP_KERNEL);
 	if (!wl->target_mem_map) {
 		wl1271_error("couldn't allocate target memory map");
 		return -ENOMEM;
@@ -923,7 +974,8 @@ int wl1271_acx_init_mem_config(struct wl1271 *wl)
 	}
 
 	/* initialize TX block book keeping */
-	wl->tx_blocks_available = wl->target_mem_map->num_tx_mem_blocks;
+	wl->tx_blocks_available =
+		le32_to_cpu(wl->target_mem_map->num_tx_mem_blocks);
 	wl1271_debug(DEBUG_TX, "available tx blocks: %d",
 		     wl->tx_blocks_available);
 
@@ -943,10 +995,10 @@ int wl1271_acx_init_rx_interrupt(struct wl1271 *wl)
 		goto out;
 	}
 
-	rx_conf->threshold = WL1271_RX_INTR_THRESHOLD_DEF;
-	rx_conf->timeout = WL1271_RX_INTR_TIMEOUT_DEF;
-	rx_conf->mblk_threshold = USHORT_MAX; /* Disabled */
-	rx_conf->queue_type = RX_QUEUE_TYPE_RX_LOW_PRIORITY;
+	rx_conf->threshold = cpu_to_le16(wl->conf.rx.irq_pkt_threshold);
+	rx_conf->timeout = cpu_to_le16(wl->conf.rx.irq_timeout);
+	rx_conf->mblk_threshold = cpu_to_le16(wl->conf.rx.irq_blk_threshold);
+	rx_conf->queue_type = wl->conf.rx.queue_type;
 
 	ret = wl1271_cmd_configure(wl, ACX_RX_CONFIG_OPT, rx_conf,
 				   sizeof(*rx_conf));
@@ -959,3 +1011,124 @@ out:
 	kfree(rx_conf);
 	return ret;
 }
+
+int wl1271_acx_smart_reflex(struct wl1271 *wl)
+{
+	struct acx_smart_reflex_state *sr_state = NULL;
+	struct acx_smart_reflex_config_params *sr_param = NULL;
+	int i, ret;
+
+	wl1271_debug(DEBUG_ACX, "acx smart reflex");
+
+	sr_param = kzalloc(sizeof(*sr_param), GFP_KERNEL);
+	if (!sr_param) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	for (i = 0; i < CONF_SR_ERR_TBL_COUNT; i++) {
+		struct conf_mart_reflex_err_table *e =
+			&(wl->conf.init.sr_err_tbl[i]);
+
+		sr_param->error_table[i].len = e->len;
+		sr_param->error_table[i].upper_limit = e->upper_limit;
+		memcpy(sr_param->error_table[i].values, e->values, e->len);
+	}
+
+	ret = wl1271_cmd_configure(wl, ACX_SET_SMART_REFLEX_PARAMS,
+				   sr_param, sizeof(*sr_param));
+	if (ret < 0) {
+		wl1271_warning("failed to set smart reflex params: %d", ret);
+		goto out;
+	}
+
+	sr_state = kzalloc(sizeof(*sr_state), GFP_KERNEL);
+	if (!sr_state) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	/* enable smart reflex */
+	sr_state->enable = wl->conf.init.sr_enable;
+
+	ret = wl1271_cmd_configure(wl, ACX_SET_SMART_REFLEX_STATE,
+				   sr_state, sizeof(*sr_state));
+	if (ret < 0) {
+		wl1271_warning("failed to set smart reflex params: %d", ret);
+		goto out;
+	}
+
+out:
+	kfree(sr_state);
+	kfree(sr_param);
+	return ret;
+
+}
+
+int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
+{
+	struct wl1271_acx_bet_enable *acx = NULL;
+	int ret = 0;
+
+	wl1271_debug(DEBUG_ACX, "acx bet enable");
+
+	if (enable && wl->conf.conn.bet_enable == CONF_BET_MODE_DISABLE)
+		goto out;
+
+	acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+	if (!acx) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	acx->enable = enable ? CONF_BET_MODE_ENABLE : CONF_BET_MODE_DISABLE;
+	acx->max_consecutive = wl->conf.conn.bet_max_consecutive;
+
+	ret = wl1271_cmd_configure(wl, ACX_BET_ENABLE, acx, sizeof(*acx));
+	if (ret < 0) {
+		wl1271_warning("acx bet enable failed: %d", ret);
+		goto out;
+	}
+
+out:
+	kfree(acx);
+	return ret;
+}
+
+int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address,
+			     u8 version)
+{
+	struct wl1271_acx_arp_filter *acx;
+	int ret;
+
+	wl1271_debug(DEBUG_ACX, "acx arp ip filter, enable: %d", enable);
+
+	acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+	if (!acx) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	acx->version = version;
+	acx->enable = enable;
+
+	if (enable == true) {
+		if (version == ACX_IPV4_VERSION)
+			memcpy(acx->address, address, ACX_IPV4_ADDR_SIZE);
+		else if (version == ACX_IPV6_VERSION)
+			memcpy(acx->address, address, sizeof(acx->address));
+		else
+			wl1271_error("Invalid IP version");
+	}
+
+	ret = wl1271_cmd_configure(wl, ACX_ARP_IP_FILTER,
+				   acx, sizeof(*acx));
+	if (ret < 0) {
+		wl1271_warning("failed to set arp ip filter: %d", ret);
+		goto out;
+	}
+
+out:
+	kfree(acx);
+	return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/wl1271_acx.h
index 9068daaf0ddf..2ce0a8128542 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.h
@@ -61,8 +61,9 @@
 					    WL1271_ACX_INTR_HW_AVAILABLE  | \
 					    WL1271_ACX_INTR_DATA)
 
-#define WL1271_INTR_MASK                   (WL1271_ACX_INTR_EVENT_A |	\
-					    WL1271_ACX_INTR_EVENT_B | \
+#define WL1271_INTR_MASK                   (WL1271_ACX_INTR_EVENT_A      | \
+					    WL1271_ACX_INTR_EVENT_B      | \
+					    WL1271_ACX_INTR_HW_AVAILABLE | \
 					    WL1271_ACX_INTR_DATA)
 
 /* Target's information element */
@@ -70,11 +71,11 @@ struct acx_header {
 	struct wl1271_cmd_header cmd;
 
 	/* acx (or information element) header */
-	u16 id;
+	__le16 id;
 
 	/* payload length (not including headers */
-	u16 len;
-};
+	__le16 len;
+} __attribute__ ((packed));
 
 struct acx_error_counter {
 	struct acx_header header;
@@ -82,21 +83,21 @@ struct acx_error_counter {
 	/* The number of PLCP errors since the last time this */
 	/* information element was interrogated. This field is */
 	/* automatically cleared when it is interrogated.*/
-	u32 PLCP_error;
+	__le32 PLCP_error;
 
 	/* The number of FCS errors since the last time this */
 	/* information element was interrogated. This field is */
 	/* automatically cleared when it is interrogated.*/
-	u32 FCS_error;
+	__le32 FCS_error;
 
 	/* The number of MPDUs without PLCP header errors received*/
 	/* since the last time this information element was interrogated. */
 	/* This field is automatically cleared when it is interrogated.*/
-	u32 valid_frame;
+	__le32 valid_frame;
 
 	/* the number of missed sequence numbers in the squentially */
 	/* values of frames seq numbers */
-	u32 seq_num_miss;
+	__le32 seq_num_miss;
 } __attribute__ ((packed));
 
 struct acx_revision {
@@ -125,7 +126,7 @@ struct acx_revision {
 	 *              (1 = first spin, 2 = second spin, and so on).
 	 * bits 24 - 31: Chip ID - The WiLink chip ID.
 	 */
-	u32 hw_version;
+	__le32 hw_version;
 } __attribute__ ((packed));
 
 enum wl1271_psm_mode {
@@ -170,7 +171,6 @@ enum {
 #define  DP_RX_PACKET_RING_CHUNK_NUM 2
 #define  DP_TX_PACKET_RING_CHUNK_NUM 2
 #define  DP_TX_COMPLETE_TIME_OUT 20
-#define  FW_TX_CMPLT_BLOCK_SIZE 16
 
 #define TX_MSDU_LIFETIME_MIN       0
 #define TX_MSDU_LIFETIME_MAX       3000
@@ -186,7 +186,7 @@ struct acx_rx_msdu_lifetime {
 	 * The maximum amount of time, in TU, before the
 	 * firmware discards the MSDU.
 	 */
-	u32 lifetime;
+	__le32 lifetime;
 } __attribute__ ((packed));
 
 /*
@@ -273,14 +273,14 @@ struct acx_rx_msdu_lifetime {
 struct acx_rx_config {
 	struct acx_header header;
 
-	u32 config_options;
-	u32 filter_options;
+	__le32 config_options;
+	__le32 filter_options;
 } __attribute__ ((packed));
 
 struct acx_packet_detection {
 	struct acx_header header;
 
-	u32 threshold;
+	__le32 threshold;
 } __attribute__ ((packed));
 
 
@@ -302,8 +302,8 @@ struct acx_slot {
 } __attribute__ ((packed));
 
 
-#define ADDRESS_GROUP_MAX	(8)
-#define ADDRESS_GROUP_MAX_LEN	(ETH_ALEN * ADDRESS_GROUP_MAX)
+#define ACX_MC_ADDRESS_GROUP_MAX	(8)
+#define ADDRESS_GROUP_MAX_LEN	        (ETH_ALEN * ACX_MC_ADDRESS_GROUP_MAX)
 
 struct acx_dot11_grp_addr_tbl {
 	struct acx_header header;
@@ -314,40 +314,17 @@ struct acx_dot11_grp_addr_tbl {
 	u8 mac_table[ADDRESS_GROUP_MAX_LEN];
 } __attribute__ ((packed));
 
-
-#define  RX_TIMEOUT_PS_POLL_MIN    0
-#define  RX_TIMEOUT_PS_POLL_MAX    (200000)
-#define  RX_TIMEOUT_PS_POLL_DEF    (15)
-#define  RX_TIMEOUT_UPSD_MIN       0
-#define  RX_TIMEOUT_UPSD_MAX       (200000)
-#define  RX_TIMEOUT_UPSD_DEF       (15)
-
 struct acx_rx_timeout {
 	struct acx_header header;
 
-	/*
-	 * The longest time the STA will wait to receive
-	 * traffic from the AP after a PS-poll has been
-	 * transmitted.
-	 */
-	u16 ps_poll_timeout;
-
-	/*
-	 * The longest time the STA will wait to receive
-	 * traffic from the AP after a frame has been sent
-	 * from an UPSD enabled queue.
-	 */
-	u16 upsd_timeout;
+	__le16 ps_poll_timeout;
+	__le16 upsd_timeout;
 } __attribute__ ((packed));
 
-#define RTS_THRESHOLD_MIN              0
-#define RTS_THRESHOLD_MAX              4096
-#define RTS_THRESHOLD_DEF              2347
-
 struct acx_rts_threshold {
 	struct acx_header header;
 
-	u16 threshold;
+	__le16 threshold;
 	u8 pad[2];
 } __attribute__ ((packed));
 
@@ -408,6 +385,13 @@ struct acx_beacon_filter_ie_table {
 	u8 pad[3];
 } __attribute__ ((packed));
 
+struct acx_conn_monit_params {
+       struct acx_header header;
+
+       __le32 synch_fail_thold; /* number of beacons missed */
+       __le32 bss_lose_timeout; /* number of TU's from synch fail */
+} __attribute__ ((packed));
+
 enum {
 	SG_ENABLE = 0,
 	SG_DISABLE,
@@ -431,6 +415,25 @@ struct acx_bt_wlan_coex {
 	u8 pad[3];
 } __attribute__ ((packed));
 
+struct acx_smart_reflex_state {
+	struct acx_header header;
+
+	u8 enable;
+	u8 padding[3];
+} __attribute__ ((packed));
+
+struct smart_reflex_err_table {
+	u8 len;
+	s8 upper_limit;
+	s8 values[14];
+} __attribute__ ((packed));
+
+struct acx_smart_reflex_config_params {
+	struct acx_header header;
+
+	struct smart_reflex_err_table error_table[3];
+} __attribute__ ((packed));
+
 #define PTA_ANTENNA_TYPE_DEF		  (0)
 #define PTA_BT_HP_MAXTIME_DEF		  (2000)
 #define PTA_WLAN_HP_MAX_TIME_DEF	  (5000)
@@ -463,150 +466,34 @@ struct acx_bt_wlan_coex {
 struct acx_bt_wlan_coex_param {
 	struct acx_header header;
 
-	/*
-	 * The minimum rate of a received WLAN packet in the STA,
-	 * during protective mode, of which a new BT-HP request
-	 * during this Rx will always be respected and gain the antenna.
-	 */
-	u32 min_rate;
-
-	/* Max time the BT HP will be respected. */
-	u16 bt_hp_max_time;
-
-	/* Max time the WLAN HP will be respected. */
-	u16 wlan_hp_max_time;
-
-	/*
-	 * The time between the last BT activity
-	 * and the moment when the sense mode returns
-	 * to SENSE_INACTIVE.
-	 */
-	u16 sense_disable_timer;
-
-	/* Time before the next BT HP instance */
-	u16 rx_time_bt_hp;
-	u16 tx_time_bt_hp;
-
-	/* range: 10-20000    default: 1500 */
-	u16 rx_time_bt_hp_fast;
-	u16 tx_time_bt_hp_fast;
-
-	/* range: 2000-65535  default: 8700 */
-	u16 wlan_cycle_fast;
-
-	/* range: 0 - 15000 (Msec) default: 1000 */
-	u16 bt_anti_starvation_period;
-
-	/* range 400-10000(Usec) default: 3000 */
-	u16 next_bt_lp_packet;
-
-	/* Deafult: worst case for BT DH5 traffic */
-	u16 wake_up_beacon;
-
-	/* range: 0-50000(Usec) default: 1050 */
-	u16 hp_dm_max_guard_time;
-
-	/*
-	 * This is to prevent both BT & WLAN antenna
-	 * starvation.
-	 * Range: 100-50000(Usec) default:2550
-	 */
-	u16 next_wlan_packet;
-
-	/* 0 -> shared antenna */
-	u8 antenna_type;
-
-	/*
-	 * 0 -> TI legacy
-	 * 1 -> Palau
-	 */
-	u8 signal_type;
-
-	/*
-	 * BT AFH status
-	 * 0 -> no AFH
-	 * 1 -> from dedicated GPIO
-	 * 2 -> AFH on (from host)
-	 */
-	u8 afh_leverage_on;
-
-	/*
-	 * The number of cycles during which no
-	 * TX will be sent after 1 cycle of RX
-	 * transaction in protective mode
-	 */
-	u8 quiet_cycle_num;
-
-	/*
-	 * The maximum number of CTSs that will
-	 * be sent for receiving RX packet in
-	 * protective mode
-	 */
-	u8 max_cts;
-
-	/*
-	 * The number of WLAN packets
-	 * transferred in common mode before
-	 * switching to BT.
-	 */
-	u8 wlan_packets_num;
-
-	/*
-	 * The number of BT packets
-	 * transferred in common mode before
-	 * switching to WLAN.
-	 */
-	u8 bt_packets_num;
-
-	/* range: 1-255  default: 5 */
-	u8 missed_rx_avalanche;
-
-	/* range: 0-1    default: 1 */
-	u8 wlan_elp_hp;
-
-	/* range: 0 - 15  default: 4 */
-	u8 bt_anti_starvation_cycles;
-
-	u8 ack_mode_dual_ant;
-
-	/*
-	 * Allow PA_SD assertion/de-assertion
-	 * during enabled BT activity.
-	 */
-	u8 pa_sd_enable;
-
-	/*
-	 * Enable/Disable PTA in auto mode:
-	 * Support Both Active & P.S modes
-	 */
-	u8 pta_auto_mode_enable;
-
-	/* range: 0 - 20  default: 1 */
-	u8 bt_hp_respected_num;
+	__le32 per_threshold;
+	__le32 max_scan_compensation_time;
+	__le16 nfs_sample_interval;
+	u8 load_ratio;
+	u8 auto_ps_mode;
+	u8 probe_req_compensation;
+	u8 scan_window_compensation;
+	u8 antenna_config;
+	u8 beacon_miss_threshold;
+	__le32 rate_adaptation_threshold;
+	s8 rate_adaptation_snr;
+	u8 padding[3];
 } __attribute__ ((packed));
 
-#define CCA_THRSH_ENABLE_ENERGY_D       0x140A
-#define CCA_THRSH_DISABLE_ENERGY_D      0xFFEF
-
 struct acx_energy_detection {
 	struct acx_header header;
 
 	/* The RX Clear Channel Assessment threshold in the PHY */
-	u16 rx_cca_threshold;
+	__le16 rx_cca_threshold;
 	u8 tx_energy_detection;
 	u8 pad;
 } __attribute__ ((packed));
 
-#define BCN_RX_TIMEOUT_DEF_VALUE        10000
-#define BROADCAST_RX_TIMEOUT_DEF_VALUE  20000
-#define RX_BROADCAST_IN_PS_DEF_VALUE    1
-#define CONSECUTIVE_PS_POLL_FAILURE_DEF 4
-
 struct acx_beacon_broadcast {
 	struct acx_header header;
 
-	u16 beacon_rx_timeout;
-	u16 broadcast_timeout;
+	__le16 beacon_rx_timeout;
+	__le16 broadcast_timeout;
 
 	/* Enables receiving of broadcast packets in PS mode */
 	u8 rx_broadcast_in_ps;
@@ -619,8 +506,8 @@ struct acx_beacon_broadcast {
 struct acx_event_mask {
 	struct acx_header header;
 
-	u32 event_mask;
-	u32 high_event_mask; /* Unused */
+	__le32 event_mask;
+	__le32 high_event_mask; /* Unused */
 } __attribute__ ((packed));
 
 #define CFG_RX_FCS		BIT(2)
@@ -657,11 +544,15 @@ struct acx_event_mask {
 #define SCAN_TRIGGERED		BIT(2)
 #define SCAN_PRIORITY_HIGH	BIT(3)
 
+/* When set, disable HW encryption */
+#define DF_ENCRYPTION_DISABLE      0x01
+#define DF_SNIFF_MODE_ENABLE       0x80
+
 struct acx_feature_config {
 	struct acx_header header;
 
-	u32 options;
-	u32 data_flow_options;
+	__le32 options;
+	__le32 data_flow_options;
 } __attribute__ ((packed));
 
 struct acx_current_tx_power {
@@ -671,14 +562,6 @@ struct acx_current_tx_power {
 	u8  padding[3];
 } __attribute__ ((packed));
 
-enum acx_wake_up_event {
-	WAKE_UP_EVENT_BEACON_BITMAP	= 0x01, /* Wake on every Beacon*/
-	WAKE_UP_EVENT_DTIM_BITMAP	= 0x02,	/* Wake on every DTIM*/
-	WAKE_UP_EVENT_N_DTIM_BITMAP	= 0x04, /* Wake on every Nth DTIM */
-	WAKE_UP_EVENT_N_BEACONS_BITMAP	= 0x08, /* Wake on every Nth Beacon */
-	WAKE_UP_EVENT_BITS_MASK		= 0x0F
-};
-
 struct acx_wake_up_condition {
 	struct acx_header header;
 
@@ -693,7 +576,7 @@ struct acx_aid {
 	/*
 	 * To be set when associated with an AP.
 	 */
-	u16 aid;
+	__le16 aid;
 	u8 pad[2];
 } __attribute__ ((packed));
 
@@ -725,152 +608,152 @@ struct acx_ctsprotect {
 } __attribute__ ((packed));
 
 struct acx_tx_statistics {
-	u32 internal_desc_overflow;
+	__le32 internal_desc_overflow;
 }  __attribute__ ((packed));
 
 struct acx_rx_statistics {
-	u32 out_of_mem;
-	u32 hdr_overflow;
-	u32 hw_stuck;
-	u32 dropped;
-	u32 fcs_err;
-	u32 xfr_hint_trig;
-	u32 path_reset;
-	u32 reset_counter;
+	__le32 out_of_mem;
+	__le32 hdr_overflow;
+	__le32 hw_stuck;
+	__le32 dropped;
+	__le32 fcs_err;
+	__le32 xfr_hint_trig;
+	__le32 path_reset;
+	__le32 reset_counter;
 } __attribute__ ((packed));
 
 struct acx_dma_statistics {
-	u32 rx_requested;
-	u32 rx_errors;
-	u32 tx_requested;
-	u32 tx_errors;
+	__le32 rx_requested;
+	__le32 rx_errors;
+	__le32 tx_requested;
+	__le32 tx_errors;
 }  __attribute__ ((packed));
 
 struct acx_isr_statistics {
 	/* host command complete */
-	u32 cmd_cmplt;
+	__le32 cmd_cmplt;
 
 	/* fiqisr() */
-	u32 fiqs;
+	__le32 fiqs;
 
 	/* (INT_STS_ND & INT_TRIG_RX_HEADER) */
-	u32 rx_headers;
+	__le32 rx_headers;
 
 	/* (INT_STS_ND & INT_TRIG_RX_CMPLT) */
-	u32 rx_completes;
+	__le32 rx_completes;
 
 	/* (INT_STS_ND & INT_TRIG_NO_RX_BUF) */
-	u32 rx_mem_overflow;
+	__le32 rx_mem_overflow;
 
 	/* (INT_STS_ND & INT_TRIG_S_RX_RDY) */
-	u32 rx_rdys;
+	__le32 rx_rdys;
 
 	/* irqisr() */
-	u32 irqs;
+	__le32 irqs;
 
 	/* (INT_STS_ND & INT_TRIG_TX_PROC) */
-	u32 tx_procs;
+	__le32 tx_procs;
 
 	/* (INT_STS_ND & INT_TRIG_DECRYPT_DONE) */
-	u32 decrypt_done;
+	__le32 decrypt_done;
 
 	/* (INT_STS_ND & INT_TRIG_DMA0) */
-	u32 dma0_done;
+	__le32 dma0_done;
 
 	/* (INT_STS_ND & INT_TRIG_DMA1) */
-	u32 dma1_done;
+	__le32 dma1_done;
 
 	/* (INT_STS_ND & INT_TRIG_TX_EXC_CMPLT) */
-	u32 tx_exch_complete;
+	__le32 tx_exch_complete;
 
 	/* (INT_STS_ND & INT_TRIG_COMMAND) */
-	u32 commands;
+	__le32 commands;
 
 	/* (INT_STS_ND & INT_TRIG_RX_PROC) */
-	u32 rx_procs;
+	__le32 rx_procs;
 
 	/* (INT_STS_ND & INT_TRIG_PM_802) */
-	u32 hw_pm_mode_changes;
+	__le32 hw_pm_mode_changes;
 
 	/* (INT_STS_ND & INT_TRIG_ACKNOWLEDGE) */
-	u32 host_acknowledges;
+	__le32 host_acknowledges;
 
 	/* (INT_STS_ND & INT_TRIG_PM_PCI) */
-	u32 pci_pm;
+	__le32 pci_pm;
 
 	/* (INT_STS_ND & INT_TRIG_ACM_WAKEUP) */
-	u32 wakeups;
+	__le32 wakeups;
 
 	/* (INT_STS_ND & INT_TRIG_LOW_RSSI) */
-	u32 low_rssi;
+	__le32 low_rssi;
 } __attribute__ ((packed));
 
 struct acx_wep_statistics {
 	/* WEP address keys configured */
-	u32 addr_key_count;
+	__le32 addr_key_count;
 
 	/* default keys configured */
-	u32 default_key_count;
+	__le32 default_key_count;
 
-	u32 reserved;
+	__le32 reserved;
 
 	/* number of times that WEP key not found on lookup */
-	u32 key_not_found;
+	__le32 key_not_found;
 
 	/* number of times that WEP key decryption failed */
-	u32 decrypt_fail;
+	__le32 decrypt_fail;
 
 	/* WEP packets decrypted */
-	u32 packets;
+	__le32 packets;
 
 	/* WEP decrypt interrupts */
-	u32 interrupt;
+	__le32 interrupt;
 } __attribute__ ((packed));
 
 #define ACX_MISSED_BEACONS_SPREAD 10
 
 struct acx_pwr_statistics {
 	/* the amount of enters into power save mode (both PD & ELP) */
-	u32 ps_enter;
+	__le32 ps_enter;
 
 	/* the amount of enters into ELP mode */
-	u32 elp_enter;
+	__le32 elp_enter;
 
 	/* the amount of missing beacon interrupts to the host */
-	u32 missing_bcns;
+	__le32 missing_bcns;
 
 	/* the amount of wake on host-access times */
-	u32 wake_on_host;
+	__le32 wake_on_host;
 
 	/* the amount of wake on timer-expire */
-	u32 wake_on_timer_exp;
+	__le32 wake_on_timer_exp;
 
 	/* the number of packets that were transmitted with PS bit set */
-	u32 tx_with_ps;
+	__le32 tx_with_ps;
 
 	/* the number of packets that were transmitted with PS bit clear */
-	u32 tx_without_ps;
+	__le32 tx_without_ps;
 
 	/* the number of received beacons */
-	u32 rcvd_beacons;
+	__le32 rcvd_beacons;
 
 	/* the number of entering into PowerOn (power save off) */
-	u32 power_save_off;
+	__le32 power_save_off;
 
 	/* the number of entries into power save mode */
-	u16 enable_ps;
+	__le16 enable_ps;
 
 	/*
 	 * the number of exits from power save, not including failed PS
 	 * transitions
 	 */
-	u16 disable_ps;
+	__le16 disable_ps;
 
 	/*
 	 * the number of times the TSF counter was adjusted because
 	 * of drift
 	 */
-	u32 fix_tsf_ps;
+	__le32 fix_tsf_ps;
 
 	/* Gives statistics about the spread continuous missed beacons.
 	 * The 16 LSB are dedicated for the PS mode.
@@ -881,53 +764,53 @@ struct acx_pwr_statistics {
 	 * ...
 	 * cont_miss_bcns_spread[9] - ten and more continuous missed beacons.
 	*/
-	u32 cont_miss_bcns_spread[ACX_MISSED_BEACONS_SPREAD];
+	__le32 cont_miss_bcns_spread[ACX_MISSED_BEACONS_SPREAD];
 
 	/* the number of beacons in awake mode */
-	u32 rcvd_awake_beacons;
+	__le32 rcvd_awake_beacons;
 } __attribute__ ((packed));
 
 struct acx_mic_statistics {
-	u32 rx_pkts;
-	u32 calc_failure;
+	__le32 rx_pkts;
+	__le32 calc_failure;
 } __attribute__ ((packed));
 
 struct acx_aes_statistics {
-	u32 encrypt_fail;
-	u32 decrypt_fail;
-	u32 encrypt_packets;
-	u32 decrypt_packets;
-	u32 encrypt_interrupt;
-	u32 decrypt_interrupt;
+	__le32 encrypt_fail;
+	__le32 decrypt_fail;
+	__le32 encrypt_packets;
+	__le32 decrypt_packets;
+	__le32 encrypt_interrupt;
+	__le32 decrypt_interrupt;
 } __attribute__ ((packed));
 
 struct acx_event_statistics {
-	u32 heart_beat;
-	u32 calibration;
-	u32 rx_mismatch;
-	u32 rx_mem_empty;
-	u32 rx_pool;
-	u32 oom_late;
-	u32 phy_transmit_error;
-	u32 tx_stuck;
+	__le32 heart_beat;
+	__le32 calibration;
+	__le32 rx_mismatch;
+	__le32 rx_mem_empty;
+	__le32 rx_pool;
+	__le32 oom_late;
+	__le32 phy_transmit_error;
+	__le32 tx_stuck;
 } __attribute__ ((packed));
 
 struct acx_ps_statistics {
-	u32 pspoll_timeouts;
-	u32 upsd_timeouts;
-	u32 upsd_max_sptime;
-	u32 upsd_max_apturn;
-	u32 pspoll_max_apturn;
-	u32 pspoll_utilization;
-	u32 upsd_utilization;
+	__le32 pspoll_timeouts;
+	__le32 upsd_timeouts;
+	__le32 upsd_max_sptime;
+	__le32 upsd_max_apturn;
+	__le32 pspoll_max_apturn;
+	__le32 pspoll_utilization;
+	__le32 upsd_utilization;
 } __attribute__ ((packed));
 
 struct acx_rxpipe_statistics {
-	u32 rx_prep_beacon_drop;
-	u32 descr_host_int_trig_rx_data;
-	u32 beacon_buffer_thres_host_int_trig_rx_data;
-	u32 missed_beacon_host_int_trig_rx_data;
-	u32 tx_xfr_host_int_trig_rx_data;
+	__le32 rx_prep_beacon_drop;
+	__le32 descr_host_int_trig_rx_data;
+	__le32 beacon_buffer_thres_host_int_trig_rx_data;
+	__le32 missed_beacon_host_int_trig_rx_data;
+	__le32 tx_xfr_host_int_trig_rx_data;
 } __attribute__ ((packed));
 
 struct acx_statistics {
@@ -946,13 +829,8 @@ struct acx_statistics {
 	struct acx_rxpipe_statistics rxpipe;
 } __attribute__ ((packed));
 
-#define ACX_MAX_RATE_CLASSES       8
-#define ACX_RATE_MASK_UNSPECIFIED  0
-#define ACX_RATE_MASK_ALL          0x1eff
-#define ACX_RATE_RETRY_LIMIT       10
-
 struct acx_rate_class {
-	u32 enabled_rates;
+	__le32 enabled_rates;
 	u8 short_retry_limit;
 	u8 long_retry_limit;
 	u8 aflags;
@@ -962,47 +840,20 @@ struct acx_rate_class {
 struct acx_rate_policy {
 	struct acx_header header;
 
-	u32 rate_class_cnt;
-	struct acx_rate_class rate_class[ACX_MAX_RATE_CLASSES];
+	__le32 rate_class_cnt;
+	struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES];
 } __attribute__ ((packed));
 
-#define WL1271_ACX_AC_COUNT 4
-
 struct acx_ac_cfg {
 	struct acx_header header;
 	u8 ac;
 	u8 cw_min;
-	u16 cw_max;
+	__le16 cw_max;
 	u8 aifsn;
 	u8 reserved;
-	u16 tx_op_limit;
+	__le16 tx_op_limit;
 } __attribute__ ((packed));
 
-enum wl1271_acx_ac {
-	WL1271_ACX_AC_BE = 0,
-	WL1271_ACX_AC_BK = 1,
-	WL1271_ACX_AC_VI = 2,
-	WL1271_ACX_AC_VO = 3,
-	WL1271_ACX_AC_CTS2SELF = 4,
-	WL1271_ACX_AC_ANY_TID = 0x1F,
-	WL1271_ACX_AC_INVALID = 0xFF,
-};
-
-enum wl1271_acx_ps_scheme {
-	WL1271_ACX_PS_SCHEME_LEGACY = 0,
-	WL1271_ACX_PS_SCHEME_UPSD_TRIGGER = 1,
-	WL1271_ACX_PS_SCHEME_LEGACY_PSPOLL = 2,
-	WL1271_ACX_PS_SCHEME_SAPSD = 3,
-};
-
-enum wl1271_acx_ack_policy {
-	WL1271_ACX_ACK_POLICY_LEGACY = 0,
-	WL1271_ACX_ACK_POLICY_NO_ACK = 1,
-	WL1271_ACX_ACK_POLICY_BLOCK = 2,
-};
-
-#define WL1271_ACX_TID_COUNT 7
-
 struct acx_tid_config {
 	struct acx_header header;
 	u8 queue_id;
@@ -1011,22 +862,19 @@ struct acx_tid_config {
 	u8 ps_scheme;
 	u8 ack_policy;
 	u8 padding[3];
-	u32 apsd_conf[2];
+	__le32 apsd_conf[2];
 } __attribute__ ((packed));
 
 struct acx_frag_threshold {
 	struct acx_header header;
-	u16 frag_threshold;
+	__le16 frag_threshold;
 	u8 padding[2];
 } __attribute__ ((packed));
 
-#define WL1271_ACX_TX_COMPL_TIMEOUT   5
-#define WL1271_ACX_TX_COMPL_THRESHOLD 5
-
 struct acx_tx_config_options {
 	struct acx_header header;
-	u16 tx_compl_timeout;     /* msec */
-	u16 tx_compl_threshold;   /* number of packets */
+	__le16 tx_compl_timeout;     /* msec */
+	__le16 tx_compl_threshold;   /* number of packets */
 } __attribute__ ((packed));
 
 #define ACX_RX_MEM_BLOCKS     64
@@ -1041,79 +889,87 @@ struct wl1271_acx_config_memory {
 	u8 tx_min_mem_block_num;
 	u8 num_stations;
 	u8 num_ssid_profiles;
-	u32 total_tx_descriptors;
+	__le32 total_tx_descriptors;
 } __attribute__ ((packed));
 
 struct wl1271_acx_mem_map {
 	struct acx_header header;
 
-	void *code_start;
-	void *code_end;
+	__le32 code_start;
+	__le32 code_end;
 
-	void *wep_defkey_start;
-	void *wep_defkey_end;
+	__le32 wep_defkey_start;
+	__le32 wep_defkey_end;
 
-	void *sta_table_start;
-	void *sta_table_end;
+	__le32 sta_table_start;
+	__le32 sta_table_end;
 
-	void *packet_template_start;
-	void *packet_template_end;
+	__le32 packet_template_start;
+	__le32 packet_template_end;
 
 	/* Address of the TX result interface (control block) */
-	u32 tx_result;
-	u32 tx_result_queue_start;
+	__le32 tx_result;
+	__le32 tx_result_queue_start;
 
-	void *queue_memory_start;
-	void *queue_memory_end;
+	__le32 queue_memory_start;
+	__le32 queue_memory_end;
 
-	u32 packet_memory_pool_start;
-	u32 packet_memory_pool_end;
+	__le32 packet_memory_pool_start;
+	__le32 packet_memory_pool_end;
 
-	void *debug_buffer1_start;
-	void *debug_buffer1_end;
+	__le32 debug_buffer1_start;
+	__le32 debug_buffer1_end;
 
-	void *debug_buffer2_start;
-	void *debug_buffer2_end;
+	__le32 debug_buffer2_start;
+	__le32 debug_buffer2_end;
 
 	/* Number of blocks FW allocated for TX packets */
-	u32 num_tx_mem_blocks;
+	__le32 num_tx_mem_blocks;
 
 	/* Number of blocks FW allocated for RX packets */
-	u32 num_rx_mem_blocks;
+	__le32 num_rx_mem_blocks;
 
 	/* the following 4 fields are valid in SLAVE mode only */
 	u8 *tx_cbuf;
 	u8 *rx_cbuf;
-	void *rx_ctrl;
-	void *tx_ctrl;
+	__le32 rx_ctrl;
+	__le32 tx_ctrl;
 } __attribute__ ((packed));
 
-enum wl1271_acx_rx_queue_type {
-	RX_QUEUE_TYPE_RX_LOW_PRIORITY,    /* All except the high priority */
-	RX_QUEUE_TYPE_RX_HIGH_PRIORITY,   /* Management and voice packets */
-	RX_QUEUE_TYPE_NUM,
-	RX_QUEUE_TYPE_MAX = USHORT_MAX
-};
-
-#define WL1271_RX_INTR_THRESHOLD_DEF  0       /* no pacing, send interrupt on
-					       * every event */
-#define WL1271_RX_INTR_THRESHOLD_MIN  0
-#define WL1271_RX_INTR_THRESHOLD_MAX  15
-
-#define WL1271_RX_INTR_TIMEOUT_DEF    5
-#define WL1271_RX_INTR_TIMEOUT_MIN    1
-#define WL1271_RX_INTR_TIMEOUT_MAX    100
-
 struct wl1271_acx_rx_config_opt {
 	struct acx_header header;
 
-	u16 mblk_threshold;
-	u16 threshold;
-	u16 timeout;
+	__le16 mblk_threshold;
+	__le16 threshold;
+	__le16 timeout;
 	u8 queue_type;
 	u8 reserved;
 } __attribute__ ((packed));
 
+
+struct wl1271_acx_bet_enable {
+	struct acx_header header;
+
+	u8 enable;
+	u8 max_consecutive;
+	u8 padding[2];
+} __attribute__ ((packed));
+
+#define ACX_IPV4_VERSION 4
+#define ACX_IPV6_VERSION 6
+#define ACX_IPV4_ADDR_SIZE 4
+struct wl1271_acx_arp_filter {
+	struct acx_header header;
+	u8 version;         /* ACX_IPV4_VERSION, ACX_IPV6_VERSION */
+	u8 enable;          /* 1 to enable ARP filtering, 0 to disable */
+	u8 padding[2];
+	u8 address[16];     /* The configured device IP address - all ARP
+			       requests directed to this IP address will pass
+			       through. For IPv4, the first four bytes are
+			       used. */
+} __attribute__((packed));
+
+
 enum {
 	ACX_WAKE_UP_CONDITIONS      = 0x0002,
 	ACX_MEM_CFG                 = 0x0003,
@@ -1170,6 +1026,9 @@ enum {
 	ACX_PEER_HT_CAP             = 0x0057,
 	ACX_HT_BSS_OPERATION        = 0x0058,
 	ACX_COEX_ACTIVITY           = 0x0059,
+	ACX_SET_SMART_REFLEX_DEBUG  = 0x005A,
+	ACX_SET_SMART_REFLEX_STATE  = 0x005B,
+	ACX_SET_SMART_REFLEX_PARAMS = 0x005F,
 	DOT11_RX_MSDU_LIFE_TIME     = 0x1004,
 	DOT11_CUR_TX_PWR            = 0x100D,
 	DOT11_RX_DOT11_MODE         = 0x1012,
@@ -1182,23 +1041,24 @@ enum {
 };
 
 
-int wl1271_acx_wake_up_conditions(struct wl1271 *wl, u8 wake_up_event,
-				  u8 listen_interval);
+int wl1271_acx_wake_up_conditions(struct wl1271 *wl);
 int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth);
 int wl1271_acx_fw_version(struct wl1271 *wl, char *buf, size_t len);
 int wl1271_acx_tx_power(struct wl1271 *wl, int power);
 int wl1271_acx_feature_cfg(struct wl1271 *wl);
 int wl1271_acx_mem_map(struct wl1271 *wl,
 		       struct acx_header *mem_map, size_t len);
-int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl, u32 life_time);
+int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl);
 int wl1271_acx_rx_config(struct wl1271 *wl, u32 config, u32 filter);
 int wl1271_acx_pd_threshold(struct wl1271 *wl);
 int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time);
-int wl1271_acx_group_address_tbl(struct wl1271 *wl);
+int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
+				 void *mc_list, u32 mc_list_len);
 int wl1271_acx_service_period_timeout(struct wl1271 *wl);
 int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold);
-int wl1271_acx_beacon_filter_opt(struct wl1271 *wl);
+int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
 int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
+int wl1271_acx_conn_monit_params(struct wl1271 *wl);
 int wl1271_acx_sg_enable(struct wl1271 *wl);
 int wl1271_acx_sg_cfg(struct wl1271 *wl);
 int wl1271_acx_cca_threshold(struct wl1271 *wl);
@@ -1207,9 +1067,9 @@ int wl1271_acx_aid(struct wl1271 *wl, u16 aid);
 int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask);
 int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
 int wl1271_acx_cts_protect(struct wl1271 *wl,
-			    enum acx_ctsprotect_type ctsprotect);
+			   enum acx_ctsprotect_type ctsprotect);
 int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
-int wl1271_acx_rate_policies(struct wl1271 *wl);
+int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates);
 int wl1271_acx_ac_cfg(struct wl1271 *wl);
 int wl1271_acx_tid_cfg(struct wl1271 *wl);
 int wl1271_acx_frag_threshold(struct wl1271 *wl);
@@ -1217,5 +1077,9 @@ int wl1271_acx_tx_config_options(struct wl1271 *wl);
 int wl1271_acx_mem_cfg(struct wl1271 *wl);
 int wl1271_acx_init_mem_config(struct wl1271 *wl);
 int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
+int wl1271_acx_smart_reflex(struct wl1271 *wl);
+int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
+int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address,
+			     u8 version);
 
 #endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.c b/drivers/net/wireless/wl12xx/wl1271_boot.c
index 8228ef474a7e..b7c96454cca3 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.c
@@ -39,6 +39,14 @@ static struct wl1271_partition_set part_table[PART_TABLE_LEN] = {
 			.start = REGISTERS_BASE,
 			.size  = 0x00008800
 		},
+		.mem2 = {
+			.start = 0x00000000,
+			.size  = 0x00000000
+		},
+		.mem3 = {
+			.start = 0x00000000,
+			.size  = 0x00000000
+		},
 	},
 
 	[PART_WORK] = {
@@ -48,7 +56,15 @@ static struct wl1271_partition_set part_table[PART_TABLE_LEN] = {
 		},
 		.reg = {
 			.start = REGISTERS_BASE,
-			.size  = 0x0000b000
+			.size  = 0x0000a000
+		},
+		.mem2 = {
+			.start = 0x003004f8,
+			.size  = 0x00000004
+		},
+		.mem3 = {
+			.start = 0x00040404,
+			.size  = 0x00000000
 		},
 	},
 
@@ -60,6 +76,14 @@ static struct wl1271_partition_set part_table[PART_TABLE_LEN] = {
 		.reg = {
 			.start = DRPW_BASE,
 			.size  = 0x00006000
+		},
+		.mem2 = {
+			.start = 0x00000000,
+			.size  = 0x00000000
+		},
+		.mem3 = {
+			.start = 0x00000000,
+			.size  = 0x00000000
 		}
 	}
 };
@@ -69,19 +93,19 @@ static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
 	u32 cpu_ctrl;
 
 	/* 10.5.0 run the firmware (I) */
-	cpu_ctrl = wl1271_reg_read32(wl, ACX_REG_ECPU_CONTROL);
+	cpu_ctrl = wl1271_spi_read32(wl, ACX_REG_ECPU_CONTROL);
 
 	/* 10.5.1 run the firmware (II) */
 	cpu_ctrl |= flag;
-	wl1271_reg_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl);
+	wl1271_spi_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl);
 }
 
 static void wl1271_boot_fw_version(struct wl1271 *wl)
 {
 	struct wl1271_static_data static_data;
 
-	wl1271_spi_mem_read(wl, wl->cmd_box_addr,
-			    &static_data, sizeof(static_data));
+	wl1271_spi_read(wl, wl->cmd_box_addr,
+			&static_data, sizeof(static_data), false);
 
 	strncpy(wl->chip.fw_ver, static_data.fw_version,
 		sizeof(wl->chip.fw_ver));
@@ -93,8 +117,9 @@ static void wl1271_boot_fw_version(struct wl1271 *wl)
 static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
 					     size_t fw_data_len, u32 dest)
 {
+	struct wl1271_partition_set partition;
 	int addr, chunk_num, partition_limit;
-	u8 *p;
+	u8 *p, *chunk;
 
 	/* whal_FwCtrl_LoadFwImageSm() */
 
@@ -103,16 +128,20 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
 	wl1271_debug(DEBUG_BOOT, "fw_data_len %zd chunk_size %d",
 		     fw_data_len, CHUNK_SIZE);
 
-
 	if ((fw_data_len % 4) != 0) {
 		wl1271_error("firmware length not multiple of four");
 		return -EIO;
 	}
 
-	wl1271_set_partition(wl, dest,
-			     part_table[PART_DOWN].mem.size,
-			     part_table[PART_DOWN].reg.start,
-			     part_table[PART_DOWN].reg.size);
+	chunk = kmalloc(CHUNK_SIZE, GFP_KERNEL);
+	if (!chunk) {
+		wl1271_error("allocation for firmware upload chunk failed");
+		return -ENOMEM;
+	}
+
+	memcpy(&partition, &part_table[PART_DOWN], sizeof(partition));
+	partition.mem.start = dest;
+	wl1271_set_partition(wl, &partition);
 
 	/* 10.1 set partition limit and chunk num */
 	chunk_num = 0;
@@ -125,21 +154,17 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
 			addr = dest + chunk_num * CHUNK_SIZE;
 			partition_limit = chunk_num * CHUNK_SIZE +
 				part_table[PART_DOWN].mem.size;
-
-			/* FIXME: Over 80 chars! */
-			wl1271_set_partition(wl,
-					     addr,
-					     part_table[PART_DOWN].mem.size,
-					     part_table[PART_DOWN].reg.start,
-					     part_table[PART_DOWN].reg.size);
+			partition.mem.start = addr;
+			wl1271_set_partition(wl, &partition);
 		}
 
 		/* 10.3 upload the chunk */
 		addr = dest + chunk_num * CHUNK_SIZE;
 		p = buf + chunk_num * CHUNK_SIZE;
+		memcpy(chunk, p, CHUNK_SIZE);
 		wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x",
 			     p, addr);
-		wl1271_spi_mem_write(wl, addr, p, CHUNK_SIZE);
+		wl1271_spi_write(wl, addr, chunk, CHUNK_SIZE, false);
 
 		chunk_num++;
 	}
@@ -147,28 +172,31 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
 	/* 10.4 upload the last chunk */
 	addr = dest + chunk_num * CHUNK_SIZE;
 	p = buf + chunk_num * CHUNK_SIZE;
+	memcpy(chunk, p, fw_data_len % CHUNK_SIZE);
 	wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x",
 		     fw_data_len % CHUNK_SIZE, p, addr);
-	wl1271_spi_mem_write(wl, addr, p, fw_data_len % CHUNK_SIZE);
+	wl1271_spi_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false);
 
+	kfree(chunk);
 	return 0;
 }
 
 static int wl1271_boot_upload_firmware(struct wl1271 *wl)
 {
 	u32 chunks, addr, len;
+	int ret = 0;
 	u8 *fw;
 
 	fw = wl->fw;
-	chunks = be32_to_cpup((u32 *) fw);
+	chunks = be32_to_cpup((__be32 *) fw);
 	fw += sizeof(u32);
 
 	wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks);
 
 	while (chunks--) {
-		addr = be32_to_cpup((u32 *) fw);
+		addr = be32_to_cpup((__be32 *) fw);
 		fw += sizeof(u32);
-		len = be32_to_cpup((u32 *) fw);
+		len = be32_to_cpup((__be32 *) fw);
 		fw += sizeof(u32);
 
 		if (len > 300000) {
@@ -177,11 +205,13 @@ static int wl1271_boot_upload_firmware(struct wl1271 *wl)
 		}
 		wl1271_debug(DEBUG_BOOT, "chunk %d addr 0x%x len %u",
 			     chunks, addr, len);
-		wl1271_boot_upload_firmware_chunk(wl, fw, len, addr);
+		ret = wl1271_boot_upload_firmware_chunk(wl, fw, len, addr);
+		if (ret != 0)
+			break;
 		fw += len;
 	}
 
-	return 0;
+	return ret;
 }
 
 static int wl1271_boot_upload_nvs(struct wl1271 *wl)
@@ -235,7 +265,7 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
 			wl1271_debug(DEBUG_BOOT,
 				     "nvs burst write 0x%x: 0x%x",
 				     dest_addr, val);
-			wl1271_reg_write32(wl, dest_addr, val);
+			wl1271_spi_write32(wl, dest_addr, val);
 
 			nvs_ptr += 4;
 			dest_addr += 4;
@@ -253,20 +283,18 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
 	/* FIXME: The driver sets the partition here, but this is not needed,
 	   since it sets to the same one as currently in use */
 	/* Now we must set the partition correctly */
-	wl1271_set_partition(wl,
-			     part_table[PART_WORK].mem.start,
-			     part_table[PART_WORK].mem.size,
-			     part_table[PART_WORK].reg.start,
-			     part_table[PART_WORK].reg.size);
+	wl1271_set_partition(wl, &part_table[PART_WORK]);
 
 	/* Copy the NVS tables to a new block to ensure alignment */
 	nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL);
+	if (!nvs_aligned)
+		return -ENOMEM;
 
 	/* And finally we upload the NVS tables */
 	/* FIXME: In wl1271, we upload everything at once.
 	   No endianness handling needed here?! The ref driver doesn't do
 	   anything about it at this point */
-	wl1271_spi_mem_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len);
+	wl1271_spi_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len, false);
 
 	kfree(nvs_aligned);
 	return 0;
@@ -275,9 +303,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
 static void wl1271_boot_enable_interrupts(struct wl1271 *wl)
 {
 	enable_irq(wl->irq);
-	wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK,
+	wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK,
 			   WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
-	wl1271_reg_write32(wl, HI_CFG, HI_CFG_DEF_VAL);
+	wl1271_spi_write32(wl, HI_CFG, HI_CFG_DEF_VAL);
 }
 
 static int wl1271_boot_soft_reset(struct wl1271 *wl)
@@ -286,12 +314,13 @@ static int wl1271_boot_soft_reset(struct wl1271 *wl)
 	u32 boot_data;
 
 	/* perform soft reset */
-	wl1271_reg_write32(wl, ACX_REG_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT);
+	wl1271_spi_write32(wl, ACX_REG_SLV_SOFT_RESET,
+			   ACX_SLV_SOFT_RESET_BIT);
 
 	/* SOFT_RESET is self clearing */
 	timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME);
 	while (1) {
-		boot_data = wl1271_reg_read32(wl, ACX_REG_SLV_SOFT_RESET);
+		boot_data = wl1271_spi_read32(wl, ACX_REG_SLV_SOFT_RESET);
 		wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data);
 		if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0)
 			break;
@@ -307,10 +336,10 @@ static int wl1271_boot_soft_reset(struct wl1271 *wl)
 	}
 
 	/* disable Rx/Tx */
-	wl1271_reg_write32(wl, ENABLE, 0x0);
+	wl1271_spi_write32(wl, ENABLE, 0x0);
 
 	/* disable auto calibration on start*/
-	wl1271_reg_write32(wl, SPARE_A2, 0xffff);
+	wl1271_spi_write32(wl, SPARE_A2, 0xffff);
 
 	return 0;
 }
@@ -322,7 +351,7 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
 
 	wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
 
-	chip_id = wl1271_reg_read32(wl, CHIP_ID_B);
+	chip_id = wl1271_spi_read32(wl, CHIP_ID_B);
 
 	wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id);
 
@@ -335,7 +364,8 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
 	loop = 0;
 	while (loop++ < INIT_LOOP) {
 		udelay(INIT_LOOP_DELAY);
-		interrupt = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
+		interrupt = wl1271_spi_read32(wl,
+					      ACX_REG_INTERRUPT_NO_CLEAR);
 
 		if (interrupt == 0xffffffff) {
 			wl1271_error("error reading hardware complete "
@@ -344,30 +374,26 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
 		}
 		/* check that ACX_INTR_INIT_COMPLETE is enabled */
 		else if (interrupt & WL1271_ACX_INTR_INIT_COMPLETE) {
-			wl1271_reg_write32(wl, ACX_REG_INTERRUPT_ACK,
+			wl1271_spi_write32(wl, ACX_REG_INTERRUPT_ACK,
 					   WL1271_ACX_INTR_INIT_COMPLETE);
 			break;
 		}
 	}
 
-	if (loop >= INIT_LOOP) {
+	if (loop > INIT_LOOP) {
 		wl1271_error("timeout waiting for the hardware to "
 			     "complete initialization");
 		return -EIO;
 	}
 
 	/* get hardware config command mail box */
-	wl->cmd_box_addr = wl1271_reg_read32(wl, REG_COMMAND_MAILBOX_PTR);
+	wl->cmd_box_addr = wl1271_spi_read32(wl, REG_COMMAND_MAILBOX_PTR);
 
 	/* get hardware config event mail box */
-	wl->event_box_addr = wl1271_reg_read32(wl, REG_EVENT_MAILBOX_PTR);
+	wl->event_box_addr = wl1271_spi_read32(wl, REG_EVENT_MAILBOX_PTR);
 
 	/* set the working partition to its "running" mode offset */
-	wl1271_set_partition(wl,
-			     part_table[PART_WORK].mem.start,
-			     part_table[PART_WORK].mem.size,
-			     part_table[PART_WORK].reg.start,
-			     part_table[PART_WORK].reg.size);
+	wl1271_set_partition(wl, &part_table[PART_WORK]);
 
 	wl1271_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x event_box_addr 0x%x",
 		     wl->cmd_box_addr, wl->event_box_addr);
@@ -379,11 +405,10 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
 	 * ready to receive event from the command mailbox
 	 */
 
-	/* enable gpio interrupts */
-	wl1271_boot_enable_interrupts(wl);
-
-	/* unmask all mbox events  */
-	wl->event_mask = 0xffffffff;
+	/* unmask required mbox events  */
+	wl->event_mask = BSS_LOSE_EVENT_ID |
+		SCAN_COMPLETE_EVENT_ID |
+		PS_REPORT_EVENT_ID;
 
 	ret = wl1271_event_unmask(wl);
 	if (ret < 0) {
@@ -399,34 +424,13 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
 
 static int wl1271_boot_write_irq_polarity(struct wl1271 *wl)
 {
-	u32 polarity, status, i;
-
-	wl1271_reg_write32(wl, OCP_POR_CTR, OCP_REG_POLARITY);
-	wl1271_reg_write32(wl, OCP_CMD, OCP_CMD_READ);
-
-	/* Wait until the command is complete (ie. bit 18 is set) */
-	for (i = 0; i < OCP_CMD_LOOP; i++) {
-		polarity = wl1271_reg_read32(wl, OCP_DATA_READ);
-		if (polarity & OCP_READY_MASK)
-			break;
-	}
-	if (i == OCP_CMD_LOOP) {
-		wl1271_error("OCP command timeout!");
-		return -EIO;
-	}
+	u32 polarity;
 
-	status = polarity & OCP_STATUS_MASK;
-	if (status != OCP_STATUS_OK) {
-		wl1271_error("OCP command failed (%d)", status);
-		return -EIO;
-	}
+	polarity = wl1271_top_reg_read(wl, OCP_REG_POLARITY);
 
 	/* We use HIGH polarity, so unset the LOW bit */
 	polarity &= ~POLARITY_LOW;
-
-	wl1271_reg_write32(wl, OCP_POR_CTR, OCP_REG_POLARITY);
-	wl1271_reg_write32(wl, OCP_DATA_WRITE, polarity);
-	wl1271_reg_write32(wl, OCP_CMD, OCP_CMD_WRITE);
+	wl1271_top_reg_write(wl, OCP_REG_POLARITY, polarity);
 
 	return 0;
 }
@@ -436,16 +440,32 @@ int wl1271_boot(struct wl1271 *wl)
 	int ret = 0;
 	u32 tmp, clk, pause;
 
-	if (REF_CLOCK == 0 || REF_CLOCK == 2)
-		/* ref clk: 19.2/38.4 */
+	if (REF_CLOCK == 0 || REF_CLOCK == 2 || REF_CLOCK == 4)
+		/* ref clk: 19.2/38.4/38.4-XTAL */
 		clk = 0x3;
 	else if (REF_CLOCK == 1 || REF_CLOCK == 3)
 		/* ref clk: 26/52 */
 		clk = 0x5;
 
-	wl1271_reg_write32(wl, PLL_PARAMETERS, clk);
+	if (REF_CLOCK != 0) {
+		u16 val;
+		/* Set clock type */
+		val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE);
+		val &= FREF_CLK_TYPE_BITS;
+		val |= CLK_REQ_PRCM;
+		wl1271_top_reg_write(wl, OCP_REG_CLK_TYPE, val);
+	} else {
+		u16 val;
+		/* Set clock polarity */
+		val = wl1271_top_reg_read(wl, OCP_REG_CLK_POLARITY);
+		val &= FREF_CLK_POLARITY_BITS;
+		val |= CLK_REQ_OUTN_SEL;
+		wl1271_top_reg_write(wl, OCP_REG_CLK_POLARITY, val);
+	}
+
+	wl1271_spi_write32(wl, PLL_PARAMETERS, clk);
 
-	pause = wl1271_reg_read32(wl, PLL_PARAMETERS);
+	pause = wl1271_spi_read32(wl, PLL_PARAMETERS);
 
 	wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause);
 
@@ -454,39 +474,31 @@ int wl1271_boot(struct wl1271 *wl)
 					   * 0x3ff (magic number ).  How does
 					   * this work?! */
 	pause |= WU_COUNTER_PAUSE_VAL;
-	wl1271_reg_write32(wl, WU_COUNTER_PAUSE, pause);
+	wl1271_spi_write32(wl, WU_COUNTER_PAUSE, pause);
 
 	/* Continue the ELP wake up sequence */
-	wl1271_reg_write32(wl, WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
+	wl1271_spi_write32(wl, WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
 	udelay(500);
 
-	wl1271_set_partition(wl,
-			     part_table[PART_DRPW].mem.start,
-			     part_table[PART_DRPW].mem.size,
-			     part_table[PART_DRPW].reg.start,
-			     part_table[PART_DRPW].reg.size);
+	wl1271_set_partition(wl, &part_table[PART_DRPW]);
 
 	/* Read-modify-write DRPW_SCRATCH_START register (see next state)
 	   to be used by DRPw FW. The RTRIM value will be added by the FW
 	   before taking DRPw out of reset */
 
 	wl1271_debug(DEBUG_BOOT, "DRPW_SCRATCH_START %08x", DRPW_SCRATCH_START);
-	clk = wl1271_reg_read32(wl, DRPW_SCRATCH_START);
+	clk = wl1271_spi_read32(wl, DRPW_SCRATCH_START);
 
 	wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
 
 	/* 2 */
 	clk |= (REF_CLOCK << 1) << 4;
-	wl1271_reg_write32(wl, DRPW_SCRATCH_START, clk);
+	wl1271_spi_write32(wl, DRPW_SCRATCH_START, clk);
 
-	wl1271_set_partition(wl,
-			     part_table[PART_WORK].mem.start,
-			     part_table[PART_WORK].mem.size,
-			     part_table[PART_WORK].reg.start,
-			     part_table[PART_WORK].reg.size);
+	wl1271_set_partition(wl, &part_table[PART_WORK]);
 
 	/* Disable interrupts */
-	wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
+	wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
 
 	ret = wl1271_boot_soft_reset(wl);
 	if (ret < 0)
@@ -501,21 +513,22 @@ int wl1271_boot(struct wl1271 *wl)
 	 * ACX_EEPROMLESS_IND_REG */
 	wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG");
 
-	wl1271_reg_write32(wl, ACX_EEPROMLESS_IND_REG, ACX_EEPROMLESS_IND_REG);
+	wl1271_spi_write32(wl, ACX_EEPROMLESS_IND_REG,
+			   ACX_EEPROMLESS_IND_REG);
 
-	tmp = wl1271_reg_read32(wl, CHIP_ID_B);
+	tmp = wl1271_spi_read32(wl, CHIP_ID_B);
 
 	wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp);
 
 	/* 6. read the EEPROM parameters */
-	tmp = wl1271_reg_read32(wl, SCR_PAD2);
+	tmp = wl1271_spi_read32(wl, SCR_PAD2);
 
 	ret = wl1271_boot_write_irq_polarity(wl);
 	if (ret < 0)
 		goto out;
 
 	/* FIXME: Need to check whether this is really what we want */
-	wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK,
+	wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK,
 			   WL1271_ACX_ALL_EVENTS_VECTOR);
 
 	/* WL1271: The reference driver skips steps 7 to 10 (jumps directly
@@ -530,6 +543,9 @@ int wl1271_boot(struct wl1271 *wl)
 	if (ret < 0)
 		goto out;
 
+	/* Enable firmware interrupts now */
+	wl1271_boot_enable_interrupts(wl);
+
 	/* set the wl1271 default filters */
 	wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
 	wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.h b/drivers/net/wireless/wl12xx/wl1271_boot.h
index b0d8fb46a439..412443ee655a 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.h
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.h
@@ -50,23 +50,17 @@ struct wl1271_static_data {
 #define WU_COUNTER_PAUSE_VAL 0x3FF
 #define WELP_ARM_COMMAND_VAL 0x4
 
-#define OCP_CMD_LOOP  32
-
-#define OCP_CMD_WRITE 0x1
-#define OCP_CMD_READ  0x2
-
-#define OCP_READY_MASK  BIT(18)
-#define OCP_STATUS_MASK (BIT(16) | BIT(17))
-
-#define OCP_STATUS_NO_RESP    0x00000
-#define OCP_STATUS_OK         0x10000
-#define OCP_STATUS_REQ_FAILED 0x20000
-#define OCP_STATUS_RESP_ERROR 0x30000
-
-#define OCP_REG_POLARITY 0x30032
+#define OCP_REG_POLARITY     0x0064
+#define OCP_REG_CLK_TYPE     0x0448
+#define OCP_REG_CLK_POLARITY 0x0cb2
 
 #define CMD_MBOX_ADDRESS 0x407B4
 
 #define POLARITY_LOW BIT(1)
 
+#define FREF_CLK_TYPE_BITS     0xfffffe7f
+#define CLK_REQ_PRCM           0x100
+#define FREF_CLK_POLARITY_BITS 0xfffff8ff
+#define CLK_REQ_OUTN_SEL       0x700
+
 #endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c
index 2a4351ff54dc..886a9bc39cc1 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c
@@ -42,26 +42,28 @@
  * @buf: buffer containing the command, must work with dma
  * @len: length of the buffer
  */
-int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len)
+int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
+		    size_t res_len)
 {
 	struct wl1271_cmd_header *cmd;
 	unsigned long timeout;
 	u32 intr;
 	int ret = 0;
+	u16 status;
 
 	cmd = buf;
-	cmd->id = id;
+	cmd->id = cpu_to_le16(id);
 	cmd->status = 0;
 
 	WARN_ON(len % 4 != 0);
 
-	wl1271_spi_mem_write(wl, wl->cmd_box_addr, buf, len);
+	wl1271_spi_write(wl, wl->cmd_box_addr, buf, len, false);
 
-	wl1271_reg_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD);
+	wl1271_spi_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD);
 
 	timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT);
 
-	intr = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
+	intr = wl1271_spi_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
 	while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) {
 		if (time_after(jiffies, timeout)) {
 			wl1271_error("command complete timeout");
@@ -71,17 +73,28 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len)
 
 		msleep(1);
 
-		intr = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
+		intr = wl1271_spi_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
 	}
 
-	wl1271_reg_write32(wl, ACX_REG_INTERRUPT_ACK,
+	/* read back the status code of the command */
+	if (res_len == 0)
+		res_len = sizeof(struct wl1271_cmd_header);
+	wl1271_spi_read(wl, wl->cmd_box_addr, cmd, res_len, false);
+
+	status = le16_to_cpu(cmd->status);
+	if (status != CMD_STATUS_SUCCESS) {
+		wl1271_error("command execute failure %d", status);
+		ret = -EIO;
+	}
+
+	wl1271_spi_write32(wl, ACX_REG_INTERRUPT_ACK,
 			   WL1271_ACX_INTR_CMD_COMPLETE);
 
 out:
 	return ret;
 }
 
-int wl1271_cmd_cal_channel_tune(struct wl1271 *wl)
+static int wl1271_cmd_cal_channel_tune(struct wl1271 *wl)
 {
 	struct wl1271_cmd_cal_channel_tune *cmd;
 	int ret = 0;
@@ -104,7 +117,7 @@ int wl1271_cmd_cal_channel_tune(struct wl1271 *wl)
 	return ret;
 }
 
-int wl1271_cmd_cal_update_ref_point(struct wl1271 *wl)
+static int wl1271_cmd_cal_update_ref_point(struct wl1271 *wl)
 {
 	struct wl1271_cmd_cal_update_ref_point *cmd;
 	int ret = 0;
@@ -129,7 +142,7 @@ int wl1271_cmd_cal_update_ref_point(struct wl1271 *wl)
 	return ret;
 }
 
-int wl1271_cmd_cal_p2g(struct wl1271 *wl)
+static int wl1271_cmd_cal_p2g(struct wl1271 *wl)
 {
 	struct wl1271_cmd_cal_p2g *cmd;
 	int ret = 0;
@@ -150,7 +163,7 @@ int wl1271_cmd_cal_p2g(struct wl1271 *wl)
 	return ret;
 }
 
-int wl1271_cmd_cal(struct wl1271 *wl)
+static int wl1271_cmd_cal(struct wl1271 *wl)
 {
 	/*
 	 * FIXME: we must make sure that we're not sleeping when calibration
@@ -175,11 +188,116 @@ int wl1271_cmd_cal(struct wl1271 *wl)
 	return ret;
 }
 
-int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type, u8 dtim_interval,
-		    u16 beacon_interval, u8 wait)
+int wl1271_cmd_general_parms(struct wl1271 *wl)
+{
+	struct wl1271_general_parms_cmd *gen_parms;
+	struct conf_general_parms *g = &wl->conf.init.genparam;
+	int ret;
+
+	gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
+	if (!gen_parms)
+		return -ENOMEM;
+
+	gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM;
+
+	gen_parms->ref_clk = g->ref_clk;
+	gen_parms->settling_time = g->settling_time;
+	gen_parms->clk_valid_on_wakeup = g->clk_valid_on_wakeup;
+	gen_parms->dc2dcmode = g->dc2dcmode;
+	gen_parms->single_dual_band = g->single_dual_band;
+	gen_parms->tx_bip_fem_autodetect = g->tx_bip_fem_autodetect;
+	gen_parms->tx_bip_fem_manufacturer = g->tx_bip_fem_manufacturer;
+	gen_parms->settings = g->settings;
+
+	ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), 0);
+	if (ret < 0)
+		wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
+
+	kfree(gen_parms);
+	return ret;
+}
+
+int wl1271_cmd_radio_parms(struct wl1271 *wl)
+{
+	struct wl1271_radio_parms_cmd *radio_parms;
+	struct conf_radio_parms *r = &wl->conf.init.radioparam;
+	int i, ret;
+
+	radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL);
+	if (!radio_parms)
+		return -ENOMEM;
+
+	radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
+
+	/* Static radio parameters */
+	radio_parms->rx_trace_loss = r->rx_trace_loss;
+	radio_parms->tx_trace_loss = r->tx_trace_loss;
+	memcpy(radio_parms->rx_rssi_and_proc_compens,
+	       r->rx_rssi_and_proc_compens,
+	       CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE);
+
+	memcpy(radio_parms->rx_trace_loss_5, r->rx_trace_loss_5,
+	       CONF_NUMBER_OF_SUB_BANDS_5);
+	memcpy(radio_parms->tx_trace_loss_5, r->tx_trace_loss_5,
+	       CONF_NUMBER_OF_SUB_BANDS_5);
+	memcpy(radio_parms->rx_rssi_and_proc_compens_5,
+	       r->rx_rssi_and_proc_compens_5,
+	       CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE);
+
+	/* Dynamic radio parameters */
+	radio_parms->tx_ref_pd_voltage = cpu_to_le16(r->tx_ref_pd_voltage);
+	radio_parms->tx_ref_power = r->tx_ref_power;
+	radio_parms->tx_offset_db = r->tx_offset_db;
+
+	memcpy(radio_parms->tx_rate_limits_normal, r->tx_rate_limits_normal,
+	       CONF_NUMBER_OF_RATE_GROUPS);
+	memcpy(radio_parms->tx_rate_limits_degraded, r->tx_rate_limits_degraded,
+	       CONF_NUMBER_OF_RATE_GROUPS);
+
+	memcpy(radio_parms->tx_channel_limits_11b, r->tx_channel_limits_11b,
+	       CONF_NUMBER_OF_CHANNELS_2_4);
+	memcpy(radio_parms->tx_channel_limits_ofdm, r->tx_channel_limits_ofdm,
+	       CONF_NUMBER_OF_CHANNELS_2_4);
+	memcpy(radio_parms->tx_pdv_rate_offsets, r->tx_pdv_rate_offsets,
+	       CONF_NUMBER_OF_RATE_GROUPS);
+	memcpy(radio_parms->tx_ibias, r->tx_ibias, CONF_NUMBER_OF_RATE_GROUPS);
+
+	radio_parms->rx_fem_insertion_loss = r->rx_fem_insertion_loss;
+
+	for (i = 0; i < CONF_NUMBER_OF_SUB_BANDS_5; i++)
+		radio_parms->tx_ref_pd_voltage_5[i] =
+			cpu_to_le16(r->tx_ref_pd_voltage_5[i]);
+	memcpy(radio_parms->tx_ref_power_5, r->tx_ref_power_5,
+	       CONF_NUMBER_OF_SUB_BANDS_5);
+	memcpy(radio_parms->tx_offset_db_5, r->tx_offset_db_5,
+	       CONF_NUMBER_OF_SUB_BANDS_5);
+	memcpy(radio_parms->tx_rate_limits_normal_5,
+	       r->tx_rate_limits_normal_5, CONF_NUMBER_OF_RATE_GROUPS);
+	memcpy(radio_parms->tx_rate_limits_degraded_5,
+	       r->tx_rate_limits_degraded_5, CONF_NUMBER_OF_RATE_GROUPS);
+	memcpy(radio_parms->tx_channel_limits_ofdm_5,
+	       r->tx_channel_limits_ofdm_5, CONF_NUMBER_OF_CHANNELS_5);
+	memcpy(radio_parms->tx_pdv_rate_offsets_5, r->tx_pdv_rate_offsets_5,
+	       CONF_NUMBER_OF_RATE_GROUPS);
+	memcpy(radio_parms->tx_ibias_5, r->tx_ibias_5,
+	       CONF_NUMBER_OF_RATE_GROUPS);
+	memcpy(radio_parms->rx_fem_insertion_loss_5,
+	       r->rx_fem_insertion_loss_5, CONF_NUMBER_OF_SUB_BANDS_5);
+
+	wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
+		    radio_parms, sizeof(*radio_parms));
+
+	ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0);
+	if (ret < 0)
+		wl1271_warning("CMD_INI_FILE_RADIO_PARAM failed");
+
+	kfree(radio_parms);
+	return ret;
+}
+
+int wl1271_cmd_join(struct wl1271 *wl)
 {
 	static bool do_cal = true;
-	unsigned long timeout;
 	struct wl1271_cmd_join *join;
 	int ret, i;
 	u8 *bssid;
@@ -193,6 +311,18 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type, u8 dtim_interval,
 			do_cal = false;
 	}
 
+	/* FIXME: This is a workaround, because with the current stack, we
+	 * cannot know when we have disassociated.  So, if we have already
+	 * joined, we disconnect before joining again. */
+	if (wl->joined) {
+		ret = wl1271_cmd_disconnect(wl);
+		if (ret < 0) {
+			wl1271_error("failed to disconnect before rejoining");
+			goto out;
+		}
+
+		wl->joined = false;
+	}
 
 	join = kzalloc(sizeof(*join), GFP_KERNEL);
 	if (!join) {
@@ -207,15 +337,34 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type, u8 dtim_interval,
 	for (i = 0; i < ETH_ALEN; i++)
 		bssid[i] = wl->bssid[ETH_ALEN - i - 1];
 
-	join->rx_config_options = wl->rx_config;
-	join->rx_filter_options = wl->rx_filter;
+	join->rx_config_options = cpu_to_le32(wl->rx_config);
+	join->rx_filter_options = cpu_to_le32(wl->rx_filter);
+	join->bss_type = wl->bss_type;
 
-	join->basic_rate_set = RATE_MASK_1MBPS | RATE_MASK_2MBPS |
-		RATE_MASK_5_5MBPS | RATE_MASK_11MBPS;
+	/*
+	 * FIXME: disable temporarily all filters because after commit
+	 * 9cef8737 "mac80211: fix managed mode BSSID handling" broke
+	 * association. The filter logic needs to be implemented properly
+	 * and once that is done, this hack can be removed.
+	 */
+	join->rx_config_options = cpu_to_le32(0);
+	join->rx_filter_options = cpu_to_le32(WL1271_DEFAULT_RX_FILTER);
+
+	if (wl->band == IEEE80211_BAND_2GHZ)
+		join->basic_rate_set = cpu_to_le32(CONF_HW_BIT_RATE_1MBPS   |
+						   CONF_HW_BIT_RATE_2MBPS   |
+						   CONF_HW_BIT_RATE_5_5MBPS |
+						   CONF_HW_BIT_RATE_11MBPS);
+	else {
+		join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ;
+		join->basic_rate_set = cpu_to_le32(CONF_HW_BIT_RATE_6MBPS  |
+						   CONF_HW_BIT_RATE_12MBPS |
+						   CONF_HW_BIT_RATE_24MBPS);
+	}
+
+	join->beacon_interval = cpu_to_le16(WL1271_DEFAULT_BEACON_INT);
+	join->dtim_interval = WL1271_DEFAULT_DTIM_PERIOD;
 
-	join->beacon_interval = beacon_interval;
-	join->dtim_interval = dtim_interval;
-	join->bss_type = bss_type;
 	join->channel = wl->channel;
 	join->ssid_len = wl->ssid_len;
 	memcpy(join->ssid, wl->ssid, wl->ssid_len);
@@ -228,21 +377,24 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type, u8 dtim_interval,
 
 	join->ctrl |= wl->session_counter << WL1271_JOIN_CMD_TX_SESSION_OFFSET;
 
+	/* reset TX security counters */
+	wl->tx_security_last_seq = 0;
+	wl->tx_security_seq_16 = 0;
+	wl->tx_security_seq_32 = 0;
 
-	ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join));
+	ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join), 0);
 	if (ret < 0) {
 		wl1271_error("failed to initiate cmd join");
 		goto out_free;
 	}
 
-	timeout = msecs_to_jiffies(JOIN_TIMEOUT);
+	wl->joined = true;
 
 	/*
 	 * ugly hack: we should wait for JOIN_EVENT_COMPLETE_ID but to
 	 * simplify locking we just sleep instead, for now
 	 */
-	if (wait)
-		msleep(10);
+	msleep(10);
 
 out_free:
 	kfree(join);
@@ -262,34 +414,21 @@ out:
 int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer)
 {
 	int ret;
+	size_t res_len = 0;
 
 	wl1271_debug(DEBUG_CMD, "cmd test");
 
-	ret = wl1271_cmd_send(wl, CMD_TEST, buf, buf_len);
+	if (answer)
+		res_len = buf_len;
+
+	ret = wl1271_cmd_send(wl, CMD_TEST, buf, buf_len, res_len);
 
 	if (ret < 0) {
 		wl1271_warning("TEST command failed");
 		return ret;
 	}
 
-	if (answer) {
-		struct wl1271_command *cmd_answer;
-
-		/*
-		 * The test command got in, we can read the answer.
-		 * The answer would be a wl1271_command, where the
-		 * parameter array contains the actual answer.
-		 */
-		wl1271_spi_mem_read(wl, wl->cmd_box_addr, buf, buf_len);
-
-		cmd_answer = buf;
-
-		if (cmd_answer->header.status != CMD_STATUS_SUCCESS)
-			wl1271_error("TEST command answer error: %d",
-				     cmd_answer->header.status);
-	}
-
-	return 0;
+	return ret;
 }
 
 /**
@@ -307,26 +446,15 @@ int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len)
 
 	wl1271_debug(DEBUG_CMD, "cmd interrogate");
 
-	acx->id = id;
+	acx->id = cpu_to_le16(id);
 
 	/* payload length, does not include any headers */
-	acx->len = len - sizeof(*acx);
+	acx->len = cpu_to_le16(len - sizeof(*acx));
 
-	ret = wl1271_cmd_send(wl, CMD_INTERROGATE, acx, sizeof(*acx));
-	if (ret < 0) {
+	ret = wl1271_cmd_send(wl, CMD_INTERROGATE, acx, sizeof(*acx), len);
+	if (ret < 0)
 		wl1271_error("INTERROGATE command failed");
-		goto out;
-	}
 
-	/* the interrogate command got in, we can read the answer */
-	wl1271_spi_mem_read(wl, wl->cmd_box_addr, buf, len);
-
-	acx = buf;
-	if (acx->cmd.status != CMD_STATUS_SUCCESS)
-		wl1271_error("INTERROGATE command error: %d",
-			     acx->cmd.status);
-
-out:
 	return ret;
 }
 
@@ -345,12 +473,12 @@ int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len)
 
 	wl1271_debug(DEBUG_CMD, "cmd configure");
 
-	acx->id = id;
+	acx->id = cpu_to_le16(id);
 
 	/* payload length, does not include any headers */
-	acx->len = len - sizeof(*acx);
+	acx->len = cpu_to_le16(len - sizeof(*acx));
 
-	ret = wl1271_cmd_send(wl, CMD_CONFIGURE, acx, len);
+	ret = wl1271_cmd_send(wl, CMD_CONFIGURE, acx, len, 0);
 	if (ret < 0) {
 		wl1271_warning("CONFIGURE command NOK");
 		return ret;
@@ -383,7 +511,7 @@ int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable)
 		cmd_tx = CMD_DISABLE_TX;
 	}
 
-	ret = wl1271_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd));
+	ret = wl1271_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd), 0);
 	if (ret < 0) {
 		wl1271_error("rx %s cmd for channel %d failed",
 			     enable ? "start" : "stop", channel);
@@ -393,7 +521,7 @@ int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable)
 	wl1271_debug(DEBUG_BOOT, "rx %s cmd channel %d",
 		     enable ? "start" : "stop", channel);
 
-	ret = wl1271_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd));
+	ret = wl1271_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd), 0);
 	if (ret < 0) {
 		wl1271_error("tx %s cmd for channel %d failed",
 			     enable ? "start" : "stop", channel);
@@ -414,8 +542,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
 	int ret = 0;
 
 	/* FIXME: this should be in ps.c */
-	ret = wl1271_acx_wake_up_conditions(wl, WAKE_UP_EVENT_DTIM_BITMAP,
-					    wl->listen_int);
+	ret = wl1271_acx_wake_up_conditions(wl);
 	if (ret < 0) {
 		wl1271_error("couldn't set wake up conditions");
 		goto out;
@@ -433,10 +560,10 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
 	ps_params->send_null_data = 1;
 	ps_params->retries = 5;
 	ps_params->hang_over_period = 128;
-	ps_params->null_data_rate = 1; /* 1 Mbps */
+	ps_params->null_data_rate = cpu_to_le32(1); /* 1 Mbps */
 
 	ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
-			      sizeof(*ps_params));
+			      sizeof(*ps_params), 0);
 	if (ret < 0) {
 		wl1271_error("cmd set_ps_mode failed");
 		goto out;
@@ -464,22 +591,17 @@ int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
 	WARN_ON(len > MAX_READ_SIZE);
 	len = min_t(size_t, len, MAX_READ_SIZE);
 
-	cmd->addr = addr;
-	cmd->size = len;
+	cmd->addr = cpu_to_le32(addr);
+	cmd->size = cpu_to_le32(len);
 
-	ret = wl1271_cmd_send(wl, CMD_READ_MEMORY, cmd, sizeof(*cmd));
+	ret = wl1271_cmd_send(wl, CMD_READ_MEMORY, cmd, sizeof(*cmd),
+			      sizeof(*cmd));
 	if (ret < 0) {
 		wl1271_error("read memory command failed: %d", ret);
 		goto out;
 	}
 
-	/* the read command got in, we can now read the answer */
-	wl1271_spi_mem_read(wl, wl->cmd_box_addr, cmd, sizeof(*cmd));
-
-	if (cmd->header.status != CMD_STATUS_SUCCESS)
-		wl1271_error("error in read command result: %d",
-			     cmd->header.status);
-
+	/* the read command got in */
 	memcpy(answer, cmd->value, len);
 
 out:
@@ -488,14 +610,31 @@ out:
 }
 
 int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
-		    u8 active_scan, u8 high_prio, u8 num_channels,
+		    u8 active_scan, u8 high_prio, u8 band,
 		    u8 probe_requests)
 {
 
 	struct wl1271_cmd_trigger_scan_to *trigger = NULL;
 	struct wl1271_cmd_scan *params = NULL;
-	int i, ret;
+	struct ieee80211_channel *channels;
+	int i, j, n_ch, ret;
 	u16 scan_options = 0;
+	u8 ieee_band;
+
+	if (band == WL1271_SCAN_BAND_2_4_GHZ)
+		ieee_band = IEEE80211_BAND_2GHZ;
+	else if (band == WL1271_SCAN_BAND_DUAL && wl1271_11a_enabled())
+		ieee_band = IEEE80211_BAND_2GHZ;
+	else if (band == WL1271_SCAN_BAND_5_GHZ && wl1271_11a_enabled())
+		ieee_band = IEEE80211_BAND_5GHZ;
+	else
+		return -EINVAL;
+
+	if (wl->hw->wiphy->bands[ieee_band]->channels == NULL)
+		return -EINVAL;
+
+	channels = wl->hw->wiphy->bands[ieee_band]->channels;
+	n_ch = wl->hw->wiphy->bands[ieee_band]->n_channels;
 
 	if (wl->scanning)
 		return -EINVAL;
@@ -512,32 +651,43 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
 		scan_options |= WL1271_SCAN_OPT_PASSIVE;
 	if (high_prio)
 		scan_options |= WL1271_SCAN_OPT_PRIORITY_HIGH;
-	params->params.scan_options = scan_options;
+	params->params.scan_options = cpu_to_le16(scan_options);
 
-	params->params.num_channels = num_channels;
 	params->params.num_probe_requests = probe_requests;
-	params->params.tx_rate = cpu_to_le32(RATE_MASK_2MBPS);
+	/* Let the fw autodetect suitable tx_rate for probes */
+	params->params.tx_rate = 0;
 	params->params.tid_trigger = 0;
 	params->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
 
-	for (i = 0; i < num_channels; i++) {
-		params->channels[i].min_duration =
-			cpu_to_le32(WL1271_SCAN_CHAN_MIN_DURATION);
-		params->channels[i].max_duration =
-			cpu_to_le32(WL1271_SCAN_CHAN_MAX_DURATION);
-		memset(&params->channels[i].bssid_lsb, 0xff, 4);
-		memset(&params->channels[i].bssid_msb, 0xff, 2);
-		params->channels[i].early_termination = 0;
-		params->channels[i].tx_power_att = WL1271_SCAN_CURRENT_TX_PWR;
-		params->channels[i].channel = i + 1;
+	if (band == WL1271_SCAN_BAND_DUAL)
+		params->params.band = WL1271_SCAN_BAND_2_4_GHZ;
+	else
+		params->params.band = band;
+
+	for (i = 0, j = 0; i < n_ch && i < WL1271_SCAN_MAX_CHANNELS; i++) {
+		if (!(channels[i].flags & IEEE80211_CHAN_DISABLED)) {
+			params->channels[j].min_duration =
+				cpu_to_le32(WL1271_SCAN_CHAN_MIN_DURATION);
+			params->channels[j].max_duration =
+				cpu_to_le32(WL1271_SCAN_CHAN_MAX_DURATION);
+			memset(&params->channels[j].bssid_lsb, 0xff, 4);
+			memset(&params->channels[j].bssid_msb, 0xff, 2);
+			params->channels[j].early_termination = 0;
+			params->channels[j].tx_power_att =
+				WL1271_SCAN_CURRENT_TX_PWR;
+			params->channels[j].channel = channels[i].hw_value;
+			j++;
+		}
 	}
 
+	params->params.num_channels = j;
+
 	if (len && ssid) {
 		params->params.ssid_len = len;
 		memcpy(params->params.ssid, ssid, len);
 	}
 
-	ret = wl1271_cmd_build_probe_req(wl, ssid, len);
+	ret = wl1271_cmd_build_probe_req(wl, ssid, len, ieee_band);
 	if (ret < 0) {
 		wl1271_error("PROBE request template failed");
 		goto out;
@@ -553,7 +703,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
 	trigger->timeout = 0;
 
 	ret = wl1271_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger,
-			      sizeof(*trigger));
+			      sizeof(*trigger), 0);
 	if (ret < 0) {
 		wl1271_error("trigger scan to failed for hw scan");
 		goto out;
@@ -562,20 +712,24 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
 	wl1271_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params));
 
 	wl->scanning = true;
+	if (wl1271_11a_enabled()) {
+		wl->scan.state = band;
+		if (band == WL1271_SCAN_BAND_DUAL) {
+			wl->scan.active = active_scan;
+			wl->scan.high_prio = high_prio;
+			wl->scan.probe_requests = probe_requests;
+			if (len && ssid) {
+				wl->scan.ssid_len = len;
+				memcpy(wl->scan.ssid, ssid, len);
+			} else
+				wl->scan.ssid_len = 0;
+		}
+	}
 
-	ret = wl1271_cmd_send(wl, CMD_SCAN, params, sizeof(*params));
+	ret = wl1271_cmd_send(wl, CMD_SCAN, params, sizeof(*params), 0);
 	if (ret < 0) {
 		wl1271_error("SCAN failed");
-		goto out;
-	}
-
-	wl1271_spi_mem_read(wl, wl->cmd_box_addr, params, sizeof(*params));
-
-	if (params->header.status != CMD_STATUS_SUCCESS) {
-		wl1271_error("Scan command error: %d",
-			     params->header.status);
 		wl->scanning = false;
-		ret = -EIO;
 		goto out;
 	}
 
@@ -603,14 +757,14 @@ int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
 
 	cmd->len = cpu_to_le16(buf_len);
 	cmd->template_type = template_id;
-	cmd->enabled_rates = ACX_RATE_MASK_UNSPECIFIED;
-	cmd->short_retry_limit = ACX_RATE_RETRY_LIMIT;
-	cmd->long_retry_limit = ACX_RATE_RETRY_LIMIT;
+	cmd->enabled_rates = cpu_to_le32(wl->conf.tx.rc_conf.enabled_rates);
+	cmd->short_retry_limit = wl->conf.tx.rc_conf.short_retry_limit;
+	cmd->long_retry_limit = wl->conf.tx.rc_conf.long_retry_limit;
 
 	if (buf)
 		memcpy(cmd->template_data, buf, buf_len);
 
-	ret = wl1271_cmd_send(wl, CMD_SET_TEMPLATE, cmd, sizeof(*cmd));
+	ret = wl1271_cmd_send(wl, CMD_SET_TEMPLATE, cmd, sizeof(*cmd), 0);
 	if (ret < 0) {
 		wl1271_warning("cmd set_template failed: %d", ret);
 		goto out_free;
@@ -623,30 +777,62 @@ out:
 	return ret;
 }
 
-static int wl1271_build_basic_rates(char *rates)
+static int wl1271_build_basic_rates(char *rates, u8 band)
 {
 	u8 index = 0;
 
-	rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
-	rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
-	rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
-	rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
+	if (band == IEEE80211_BAND_2GHZ) {
+		rates[index++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
+		rates[index++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
+		rates[index++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
+		rates[index++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
+	} else if (band == IEEE80211_BAND_5GHZ) {
+		rates[index++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_6MB;
+		rates[index++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_12MB;
+		rates[index++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB;
+	} else {
+		wl1271_error("build_basic_rates invalid band: %d", band);
+	}
 
 	return index;
 }
 
-static int wl1271_build_extended_rates(char *rates)
+static int wl1271_build_extended_rates(char *rates, u8 band)
 {
 	u8 index = 0;
 
-	rates[index++] = IEEE80211_OFDM_RATE_6MB;
-	rates[index++] = IEEE80211_OFDM_RATE_9MB;
-	rates[index++] = IEEE80211_OFDM_RATE_12MB;
-	rates[index++] = IEEE80211_OFDM_RATE_18MB;
-	rates[index++] = IEEE80211_OFDM_RATE_24MB;
-	rates[index++] = IEEE80211_OFDM_RATE_36MB;
-	rates[index++] = IEEE80211_OFDM_RATE_48MB;
-	rates[index++] = IEEE80211_OFDM_RATE_54MB;
+	if (band == IEEE80211_BAND_2GHZ) {
+		rates[index++] = IEEE80211_OFDM_RATE_6MB;
+		rates[index++] = IEEE80211_OFDM_RATE_9MB;
+		rates[index++] = IEEE80211_OFDM_RATE_12MB;
+		rates[index++] = IEEE80211_OFDM_RATE_18MB;
+		rates[index++] = IEEE80211_OFDM_RATE_24MB;
+		rates[index++] = IEEE80211_OFDM_RATE_36MB;
+		rates[index++] = IEEE80211_OFDM_RATE_48MB;
+		rates[index++] = IEEE80211_OFDM_RATE_54MB;
+	} else if (band == IEEE80211_BAND_5GHZ) {
+		rates[index++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_9MB;
+		rates[index++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_18MB;
+		rates[index++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB;
+		rates[index++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_36MB;
+		rates[index++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_48MB;
+		rates[index++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_54MB;
+	} else {
+		wl1271_error("build_basic_rates invalid band: %d", band);
+	}
 
 	return index;
 }
@@ -665,7 +851,8 @@ int wl1271_cmd_build_null_data(struct wl1271 *wl)
 
 	memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
 	template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA |
-						IEEE80211_STYPE_NULLFUNC);
+						IEEE80211_STYPE_NULLFUNC |
+						IEEE80211_FCTL_TODS);
 
 	return wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, &template,
 				       sizeof(template));
@@ -678,7 +865,10 @@ int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid)
 
 	memcpy(template.bssid, wl->bssid, ETH_ALEN);
 	memcpy(template.ta, wl->mac_addr, ETH_ALEN);
-	template.aid = aid;
+
+	/* aid in PS-Poll has its two MSBs each set to 1 */
+	template.aid = cpu_to_le16(1 << 15 | 1 << 14 | aid);
+
 	template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
 
 	return wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, &template,
@@ -686,12 +876,14 @@ int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid)
 
 }
 
-int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len)
+int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len,
+			       u8 band)
 {
 	struct wl12xx_probe_req_template template;
 	struct wl12xx_ie_rates *rates;
 	char *ptr;
 	u16 size;
+	int ret;
 
 	ptr = (char *)&template;
 	size = sizeof(struct ieee80211_header);
@@ -713,20 +905,25 @@ int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len)
 	/* Basic Rates */
 	rates = (struct wl12xx_ie_rates *)ptr;
 	rates->header.id = WLAN_EID_SUPP_RATES;
-	rates->header.len = wl1271_build_basic_rates(rates->rates);
+	rates->header.len = wl1271_build_basic_rates(rates->rates, band);
 	size += sizeof(struct wl12xx_ie_header) + rates->header.len;
 	ptr += sizeof(struct wl12xx_ie_header) + rates->header.len;
 
 	/* Extended rates */
 	rates = (struct wl12xx_ie_rates *)ptr;
 	rates->header.id = WLAN_EID_EXT_SUPP_RATES;
-	rates->header.len = wl1271_build_extended_rates(rates->rates);
+	rates->header.len = wl1271_build_extended_rates(rates->rates, band);
 	size += sizeof(struct wl12xx_ie_header) + rates->header.len;
 
 	wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", &template, size);
 
-	return wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
-				       &template, size);
+	if (band == IEEE80211_BAND_2GHZ)
+		ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
+					      &template, size);
+	else
+		ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
+					      &template, size);
+	return ret;
 }
 
 int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id)
@@ -743,10 +940,10 @@ int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id)
 	}
 
 	cmd->id = id;
-	cmd->key_action = KEY_SET_ID;
+	cmd->key_action = cpu_to_le16(KEY_SET_ID);
 	cmd->key_type = KEY_WEP;
 
-	ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd));
+	ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
 	if (ret < 0) {
 		wl1271_warning("cmd set_default_wep_key failed: %d", ret);
 		goto out;
@@ -759,7 +956,8 @@ out:
 }
 
 int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
-		       u8 key_size, const u8 *key, const u8 *addr)
+		       u8 key_size, const u8 *key, const u8 *addr,
+		       u32 tx_seq_32, u16 tx_seq_16)
 {
 	struct wl1271_cmd_set_keys *cmd;
 	int ret = 0;
@@ -773,16 +971,18 @@ int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
 	if (key_type != KEY_WEP)
 		memcpy(cmd->addr, addr, ETH_ALEN);
 
-	cmd->key_action = action;
+	cmd->key_action = cpu_to_le16(action);
 	cmd->key_size = key_size;
 	cmd->key_type = key_type;
 
+	cmd->ac_seq_num16[0] = cpu_to_le16(tx_seq_16);
+	cmd->ac_seq_num32[0] = cpu_to_le32(tx_seq_32);
+
 	/* we have only one SSID profile */
 	cmd->ssid_profile = 0;
 
 	cmd->id = id;
 
-	/* FIXME: this is from wl1251, needs to be checked */
 	if (key_type == KEY_TKIP) {
 		/*
 		 * We get the key in the following form:
@@ -800,7 +1000,7 @@ int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
 
 	wl1271_dump(DEBUG_CRYPT, "TARGET KEY: ", cmd, sizeof(*cmd));
 
-	ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd));
+	ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
 	if (ret < 0) {
 		wl1271_warning("could not set keys");
 		goto out;
@@ -811,3 +1011,34 @@ out:
 
 	return ret;
 }
+
+int wl1271_cmd_disconnect(struct wl1271 *wl)
+{
+	struct wl1271_cmd_disconnect *cmd;
+	int ret = 0;
+
+	wl1271_debug(DEBUG_CMD, "cmd disconnect");
+
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (!cmd) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	cmd->rx_config_options = cpu_to_le32(wl->rx_config);
+	cmd->rx_filter_options = cpu_to_le32(wl->rx_filter);
+	/* disconnect reason is not used in immediate disconnections */
+	cmd->type = DISCONNECT_IMMEDIATE;
+
+	ret = wl1271_cmd_send(wl, CMD_DISCONNECT, cmd, sizeof(*cmd), 0);
+	if (ret < 0) {
+		wl1271_error("failed to send disconnect command");
+		goto out_free;
+	}
+
+out_free:
+	kfree(cmd);
+
+out:
+	return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/wl1271_cmd.h
index 951a8447a516..b4fa4acb9229 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.h
@@ -29,9 +29,11 @@
 
 struct acx_header;
 
-int wl1271_cmd_send(struct wl1271 *wl, u16 type, void *buf, size_t buf_len);
-int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type, u8 dtim_interval,
-		    u16 beacon_interval, u8 wait);
+int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
+		    size_t res_len);
+int wl1271_cmd_general_parms(struct wl1271 *wl);
+int wl1271_cmd_radio_parms(struct wl1271 *wl);
+int wl1271_cmd_join(struct wl1271 *wl);
 int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
 int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
 int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
@@ -40,16 +42,19 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode);
 int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
 			   size_t len);
 int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
-		    u8 active_scan, u8 high_prio, u8 num_channels,
+		    u8 active_scan, u8 high_prio, u8 band,
 		    u8 probe_requests);
 int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
 			    void *buf, size_t buf_len);
 int wl1271_cmd_build_null_data(struct wl1271 *wl);
 int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid);
-int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len);
+int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len,
+			       u8 band);
 int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id);
 int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
-		       u8 key_size, const u8 *key, const u8 *addr);
+		       u8 key_size, const u8 *key, const u8 *addr,
+		       u32 tx_seq_32, u16 tx_seq_16);
+int wl1271_cmd_disconnect(struct wl1271 *wl);
 
 enum wl1271_commands {
 	CMD_INTERROGATE     = 1,    /*use this to read information elements*/
@@ -118,8 +123,8 @@ enum cmd_templ {
 #define WL1271_CMD_TEMPL_MAX_SIZE  252
 
 struct wl1271_cmd_header {
-	u16 id;
-	u16 status;
+	__le16 id;
+	__le16 status;
 	/* payload */
 	u8 data[0];
 } __attribute__ ((packed));
@@ -172,17 +177,17 @@ struct cmd_read_write_memory {
 	struct wl1271_cmd_header header;
 
 	/* The address of the memory to read from or write to.*/
-	u32 addr;
+	__le32 addr;
 
 	/* The amount of data in bytes to read from or write to the WiLink
 	 * device.*/
-	u32 size;
+	__le32 size;
 
 	/* The actual value read from or written to the Wilink. The source
 	   of this field is the Host in WRITE command or the Wilink in READ
 	   command. */
 	u8 value[MAX_READ_SIZE];
-};
+} __attribute__ ((packed));
 
 #define CMDMBOX_HEADER_LEN 4
 #define CMDMBOX_INFO_ELEM_HEADER_LEN 4
@@ -196,22 +201,23 @@ enum {
 
 #define WL1271_JOIN_CMD_CTRL_TX_FLUSH     0x80 /* Firmware flushes all Tx */
 #define WL1271_JOIN_CMD_TX_SESSION_OFFSET 1
+#define WL1271_JOIN_CMD_BSS_TYPE_5GHZ 0x10
 
 struct wl1271_cmd_join {
 	struct wl1271_cmd_header header;
 
-	u32 bssid_lsb;
-	u16 bssid_msb;
-	u16 beacon_interval; /* in TBTTs */
-	u32 rx_config_options;
-	u32 rx_filter_options;
+	__le32 bssid_lsb;
+	__le16 bssid_msb;
+	__le16 beacon_interval; /* in TBTTs */
+	__le32 rx_config_options;
+	__le32 rx_filter_options;
 
 	/*
 	 * The target uses this field to determine the rate at
 	 * which to transmit control frame responses (such as
 	 * ACK or CTS frames).
 	 */
-	u32 basic_rate_set;
+	__le32 basic_rate_set;
 	u8 dtim_interval;
 	/*
 	 * bits 0-2: This bitwise field specifies the type
@@ -240,10 +246,10 @@ struct cmd_enabledisable_path {
 struct wl1271_cmd_template_set {
 	struct wl1271_cmd_header header;
 
-	u16 len;
+	__le16 len;
 	u8 template_type;
 	u8 index;  /* relevant only for KLV_TEMPLATE type */
-	u32 enabled_rates;
+	__le32 enabled_rates;
 	u8 short_retry_limit;
 	u8 long_retry_limit;
 	u8 aflags;
@@ -280,18 +286,13 @@ struct wl1271_cmd_ps_params {
 	  * to power save mode.
 	  */
 	u8 hang_over_period;
-	u32 null_data_rate;
+	__le32 null_data_rate;
 } __attribute__ ((packed));
 
 /* HW encryption keys */
 #define NUM_ACCESS_CATEGORIES_COPY 4
 #define MAX_KEY_SIZE 32
 
-/* When set, disable HW encryption */
-#define DF_ENCRYPTION_DISABLE      0x01
-/* When set, disable HW decryption */
-#define DF_SNIFF_MODE_ENABLE       0x80
-
 enum wl1271_cmd_key_action {
 	KEY_ADD_OR_REPLACE = 1,
 	KEY_REMOVE         = 2,
@@ -316,9 +317,9 @@ struct wl1271_cmd_set_keys {
 	u8 addr[ETH_ALEN];
 
 	/* key_action_e */
-	u16 key_action;
+	__le16 key_action;
 
-	u16 reserved_1;
+	__le16 reserved_1;
 
 	/* key size in bytes */
 	u8 key_size;
@@ -334,8 +335,8 @@ struct wl1271_cmd_set_keys {
 	u8 id;
 	u8 reserved_2[6];
 	u8 key[MAX_KEY_SIZE];
-	u16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY];
-	u32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
+	__le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY];
+	__le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
 } __attribute__ ((packed));
 
 
@@ -347,19 +348,22 @@ struct wl1271_cmd_set_keys {
 #define WL1271_SCAN_OPT_PRIORITY_HIGH  4
 #define WL1271_SCAN_CHAN_MIN_DURATION  30000  /* TU */
 #define WL1271_SCAN_CHAN_MAX_DURATION  60000  /* TU */
+#define WL1271_SCAN_BAND_2_4_GHZ 0
+#define WL1271_SCAN_BAND_5_GHZ 1
+#define WL1271_SCAN_BAND_DUAL 2
 
 struct basic_scan_params {
-	u32 rx_config_options;
-	u32 rx_filter_options;
+	__le32 rx_config_options;
+	__le32 rx_filter_options;
 	/* Scan option flags (WL1271_SCAN_OPT_*) */
-	u16 scan_options;
+	__le16 scan_options;
 	/* Number of scan channels in the list (maximum 30) */
 	u8 num_channels;
 	/* This field indicates the number of probe requests to send
 	   per channel for an active scan */
 	u8 num_probe_requests;
 	/* Rate bit field for sending the probes */
-	u32 tx_rate;
+	__le32 tx_rate;
 	u8 tid_trigger;
 	u8 ssid_len;
 	/* in order to align */
@@ -374,10 +378,10 @@ struct basic_scan_params {
 
 struct basic_scan_channel_params {
 	/* Duration in TU to wait for frames on a channel for active scan */
-	u32 min_duration;
-	u32 max_duration;
-	u32 bssid_lsb;
-	u16 bssid_msb;
+	__le32 min_duration;
+	__le32 max_duration;
+	__le32 bssid_lsb;
+	__le16 bssid_msb;
 	u8 early_termination;
 	u8 tx_power_att;
 	u8 channel;
@@ -397,13 +401,13 @@ struct wl1271_cmd_scan {
 struct wl1271_cmd_trigger_scan_to {
 	struct wl1271_cmd_header header;
 
-	u32 timeout;
-};
+	__le32 timeout;
+} __attribute__ ((packed));
 
 struct wl1271_cmd_test_header {
 	u8 id;
 	u8 padding[3];
-};
+} __attribute__ ((packed));
 
 enum wl1271_channel_tune_bands {
 	WL1271_CHANNEL_TUNE_BAND_2_4,
@@ -416,6 +420,76 @@ enum wl1271_channel_tune_bands {
 #define TEST_CMD_P2G_CAL                   0x02
 #define TEST_CMD_CHANNEL_TUNE              0x0d
 #define TEST_CMD_UPDATE_PD_REFERENCE_POINT 0x1d
+#define TEST_CMD_INI_FILE_RADIO_PARAM      0x19
+#define TEST_CMD_INI_FILE_GENERAL_PARAM    0x1E
+
+struct wl1271_general_parms_cmd {
+	struct wl1271_cmd_header header;
+
+	struct wl1271_cmd_test_header test;
+
+	u8 ref_clk;
+	u8 settling_time;
+	u8 clk_valid_on_wakeup;
+	u8 dc2dcmode;
+	u8 single_dual_band;
+
+	u8 tx_bip_fem_autodetect;
+	u8 tx_bip_fem_manufacturer;
+	u8 settings;
+} __attribute__ ((packed));
+
+struct wl1271_radio_parms_cmd {
+	struct wl1271_cmd_header header;
+
+	struct wl1271_cmd_test_header test;
+
+	/* Static radio parameters */
+	/* 2.4GHz */
+	u8 rx_trace_loss;
+	u8 tx_trace_loss;
+	s8 rx_rssi_and_proc_compens[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
+
+	/* 5GHz */
+	u8 rx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
+	u8 tx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
+	s8 rx_rssi_and_proc_compens_5[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
+
+	/* Dynamic radio parameters */
+	/* 2.4GHz */
+	__le16 tx_ref_pd_voltage;
+	s8  tx_ref_power;
+	s8  tx_offset_db;
+
+	s8  tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS];
+	s8  tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS];
+
+	s8  tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4];
+	s8  tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4];
+	s8  tx_pdv_rate_offsets[CONF_NUMBER_OF_RATE_GROUPS];
+
+	u8  tx_ibias[CONF_NUMBER_OF_RATE_GROUPS];
+	u8  rx_fem_insertion_loss;
+
+	u8 padding2;
+
+	/* 5GHz */
+	__le16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5];
+	s8  tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
+	s8  tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5];
+
+	s8  tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS];
+	s8  tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS];
+
+	s8  tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5];
+	s8  tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS];
+
+	/* FIXME: this is inconsistent with the types for 2.4GHz */
+	s8  tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS];
+	s8  rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
+
+	u8 padding3[2];
+} __attribute__ ((packed));
 
 struct wl1271_cmd_cal_channel_tune {
 	struct wl1271_cmd_header header;
@@ -425,7 +499,7 @@ struct wl1271_cmd_cal_channel_tune {
 	u8 band;
 	u8 channel;
 
-	u16 radio_status;
+	__le16 radio_status;
 } __attribute__ ((packed));
 
 struct wl1271_cmd_cal_update_ref_point {
@@ -433,8 +507,8 @@ struct wl1271_cmd_cal_update_ref_point {
 
 	struct wl1271_cmd_test_header test;
 
-	s32 ref_power;
-	s32 ref_detector;
+	__le32 ref_power;
+	__le32 ref_detector;
 	u8  sub_band;
 	u8  padding[3];
 } __attribute__ ((packed));
@@ -449,16 +523,42 @@ struct wl1271_cmd_cal_p2g {
 
 	struct wl1271_cmd_test_header test;
 
-	u16 len;
+	__le16 len;
 	u8  buf[MAX_TLV_LENGTH];
 	u8  type;
 	u8  padding;
 
-	s16 radio_status;
+	__le16 radio_status;
 	u8  nvs_version[MAX_NVS_VERSION_LENGTH];
 
 	u8  sub_band_mask;
 	u8  padding2;
 } __attribute__ ((packed));
 
+
+/*
+ * There are three types of disconnections:
+ *
+ * DISCONNECT_IMMEDIATE: the fw doesn't send any frames
+ * DISCONNECT_DEAUTH:    the fw generates a DEAUTH request with the reason
+ *                       we have passed
+ * DISCONNECT_DISASSOC:  the fw generates a DESASSOC request with the reason
+ *                       we have passed
+ */
+enum wl1271_disconnect_type {
+	DISCONNECT_IMMEDIATE,
+	DISCONNECT_DEAUTH,
+	DISCONNECT_DISASSOC
+};
+
+struct wl1271_cmd_disconnect {
+	__le32 rx_config_options;
+	__le32 rx_filter_options;
+
+	__le16 reason;
+	u8  type;
+
+	u8  padding;
+} __attribute__ ((packed));
+
 #endif /* __WL1271_CMD_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_conf.h b/drivers/net/wireless/wl12xx/wl1271_conf.h
new file mode 100644
index 000000000000..565373ede265
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1271_conf.h
@@ -0,0 +1,919 @@
+/*
+ * This file is part of wl1271
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL1271_CONF_H__
+#define __WL1271_CONF_H__
+
+enum {
+	CONF_HW_BIT_RATE_1MBPS   = BIT(0),
+	CONF_HW_BIT_RATE_2MBPS   = BIT(1),
+	CONF_HW_BIT_RATE_5_5MBPS = BIT(2),
+	CONF_HW_BIT_RATE_6MBPS   = BIT(3),
+	CONF_HW_BIT_RATE_9MBPS   = BIT(4),
+	CONF_HW_BIT_RATE_11MBPS  = BIT(5),
+	CONF_HW_BIT_RATE_12MBPS  = BIT(6),
+	CONF_HW_BIT_RATE_18MBPS  = BIT(7),
+	CONF_HW_BIT_RATE_22MBPS  = BIT(8),
+	CONF_HW_BIT_RATE_24MBPS  = BIT(9),
+	CONF_HW_BIT_RATE_36MBPS  = BIT(10),
+	CONF_HW_BIT_RATE_48MBPS  = BIT(11),
+	CONF_HW_BIT_RATE_54MBPS  = BIT(12),
+	CONF_HW_BIT_RATE_MCS_0   = BIT(13),
+	CONF_HW_BIT_RATE_MCS_1   = BIT(14),
+	CONF_HW_BIT_RATE_MCS_2   = BIT(15),
+	CONF_HW_BIT_RATE_MCS_3   = BIT(16),
+	CONF_HW_BIT_RATE_MCS_4   = BIT(17),
+	CONF_HW_BIT_RATE_MCS_5   = BIT(18),
+	CONF_HW_BIT_RATE_MCS_6   = BIT(19),
+	CONF_HW_BIT_RATE_MCS_7   = BIT(20)
+};
+
+enum {
+	CONF_HW_RATE_INDEX_1MBPS   = 0,
+	CONF_HW_RATE_INDEX_2MBPS   = 1,
+	CONF_HW_RATE_INDEX_5_5MBPS = 2,
+	CONF_HW_RATE_INDEX_6MBPS   = 3,
+	CONF_HW_RATE_INDEX_9MBPS   = 4,
+	CONF_HW_RATE_INDEX_11MBPS  = 5,
+	CONF_HW_RATE_INDEX_12MBPS  = 6,
+	CONF_HW_RATE_INDEX_18MBPS  = 7,
+	CONF_HW_RATE_INDEX_22MBPS  = 8,
+	CONF_HW_RATE_INDEX_24MBPS  = 9,
+	CONF_HW_RATE_INDEX_36MBPS  = 10,
+	CONF_HW_RATE_INDEX_48MBPS  = 11,
+	CONF_HW_RATE_INDEX_54MBPS  = 12,
+	CONF_HW_RATE_INDEX_MAX     = CONF_HW_RATE_INDEX_54MBPS,
+};
+
+struct conf_sg_settings {
+	/*
+	 * Defines the PER threshold in PPM of the BT voice of which reaching
+	 * this value will trigger raising the priority of the BT voice by
+	 * the BT IP until next NFS sample interval time as defined in
+	 * nfs_sample_interval.
+	 *
+	 * Unit: PER value in PPM (parts per million)
+	 * #Error_packets / #Total_packets
+
+	 * Range: u32
+	 */
+	u32 per_threshold;
+
+	/*
+	 * This value is an absolute time in micro-seconds to limit the
+	 * maximum scan duration compensation while in SG
+	 */
+	u32 max_scan_compensation_time;
+
+	/* Defines the PER threshold of the BT voice of which reaching this
+	 * value will trigger raising the priority of the BT voice until next
+	 * NFS sample interval time as defined in sample_interval.
+	 *
+	 * Unit: msec
+	 * Range: 1-65000
+	 */
+	u16 nfs_sample_interval;
+
+	/*
+	 * Defines the load ratio for the BT.
+	 * The WLAN ratio is: 100 - load_ratio
+	 *
+	 * Unit: Percent
+	 * Range: 0-100
+	 */
+	u8 load_ratio;
+
+	/*
+	 * true - Co-ex is allowed to enter/exit P.S automatically and
+	 *        transparently to the host
+	 *
+	 * false - Co-ex is disallowed to enter/exit P.S and will trigger an
+	 *         event to the host to notify for the need to enter/exit P.S
+	 *         due to BT change state
+	 *
+	 */
+	u8 auto_ps_mode;
+
+	/*
+	 * This parameter defines the compensation percentage of num of probe
+	 * requests in case scan is initiated during BT voice/BT ACL
+	 * guaranteed link.
+	 *
+	 * Unit: Percent
+	 * Range: 0-255 (0 - No compensation)
+	 */
+	u8 probe_req_compensation;
+
+	/*
+	 * This parameter defines the compensation percentage of scan window
+	 * size in case scan is initiated during BT voice/BT ACL Guaranteed
+	 * link.
+	 *
+	 * Unit: Percent
+	 * Range: 0-255 (0 - No compensation)
+	 */
+	u8 scan_window_compensation;
+
+	/*
+	 * Defines the antenna configuration.
+	 *
+	 * Range: 0 - Single Antenna; 1 - Dual Antenna
+	 */
+	u8 antenna_config;
+
+	/*
+	 * The percent out of the Max consecutive beacon miss roaming trigger
+	 * which is the threshold for raising the priority of beacon
+	 * reception.
+	 *
+	 * Range: 1-100
+	 * N = MaxConsecutiveBeaconMiss
+	 * P = coexMaxConsecutiveBeaconMissPrecent
+	 * Threshold = MIN( N-1, round(N * P / 100))
+	 */
+	u8 beacon_miss_threshold;
+
+	/*
+	 * The RX rate threshold below which rate adaptation is assumed to be
+	 * occurring at the AP which will raise priority for ACTIVE_RX and RX
+	 * SP.
+	 *
+	 * Range: HW_BIT_RATE_*
+	 */
+	u32 rate_adaptation_threshold;
+
+	/*
+	 * The SNR above which the RX rate threshold indicating AP rate
+	 * adaptation is valid
+	 *
+	 * Range: -128 - 127
+	 */
+	s8 rate_adaptation_snr;
+};
+
+enum conf_rx_queue_type {
+	CONF_RX_QUEUE_TYPE_LOW_PRIORITY,  /* All except the high priority */
+	CONF_RX_QUEUE_TYPE_HIGH_PRIORITY, /* Management and voice packets */
+};
+
+struct conf_rx_settings {
+	/*
+	 * The maximum amount of time, in TU, before the
+	 * firmware discards the MSDU.
+	 *
+	 * Range: 0 - 0xFFFFFFFF
+	 */
+	u32 rx_msdu_life_time;
+
+	/*
+	 * Packet detection threshold in the PHY.
+	 *
+	 * FIXME: details unknown.
+	 */
+	u32 packet_detection_threshold;
+
+	/*
+	 * The longest time the STA will wait to receive traffic from the AP
+	 * after a PS-poll has been transmitted.
+	 *
+	 * Range: 0 - 200000
+	 */
+	u16 ps_poll_timeout;
+	/*
+	 * The longest time the STA will wait to receive traffic from the AP
+	 * after a frame has been sent from an UPSD enabled queue.
+	 *
+	 * Range: 0 - 200000
+	 */
+	u16 upsd_timeout;
+
+	/*
+	 * The number of octets in an MPDU, below which an RTS/CTS
+	 * handshake is not performed.
+	 *
+	 * Range: 0 - 4096
+	 */
+	u16 rts_threshold;
+
+	/*
+	 * The RX Clear Channel Assessment threshold in the PHY
+	 * (the energy threshold).
+	 *
+	 * Range: ENABLE_ENERGY_D  == 0x140A
+	 *        DISABLE_ENERGY_D == 0xFFEF
+	 */
+	u16 rx_cca_threshold;
+
+	/*
+	 * Occupied Rx mem-blocks number which requires interrupting the host
+	 * (0 = no buffering, 0xffff = disabled).
+	 *
+	 * Range: u16
+	 */
+	u16 irq_blk_threshold;
+
+	/*
+	 * Rx packets number which requires interrupting the host
+	 * (0 = no buffering).
+	 *
+	 * Range: u16
+	 */
+	u16 irq_pkt_threshold;
+
+	/*
+	 * Max time in msec the FW may delay RX-Complete interrupt.
+	 *
+	 * Range: 1 - 100
+	 */
+	u16 irq_timeout;
+
+	/*
+	 * The RX queue type.
+	 *
+	 * Range: RX_QUEUE_TYPE_RX_LOW_PRIORITY, RX_QUEUE_TYPE_RX_HIGH_PRIORITY,
+	 */
+	u8 queue_type;
+};
+
+#define CONF_TX_MAX_RATE_CLASSES       8
+
+#define CONF_TX_RATE_MASK_UNSPECIFIED  0
+#define CONF_TX_RATE_MASK_ALL          0x1eff
+#define CONF_TX_RATE_RETRY_LIMIT       10
+
+struct conf_tx_rate_class {
+
+	/*
+	 * The rates enabled for this rate class.
+	 *
+	 * Range: CONF_HW_BIT_RATE_* bit mask
+	 */
+	u32 enabled_rates;
+
+	/*
+	 * The dot11 short retry limit used for TX retries.
+	 *
+	 * Range: u8
+	 */
+	u8 short_retry_limit;
+
+	/*
+	 * The dot11 long retry limit used for TX retries.
+	 *
+	 * Range: u8
+	 */
+	u8 long_retry_limit;
+
+	/*
+	 * Flags controlling the attributes of TX transmission.
+	 *
+	 * Range: bit 0: Truncate - when set, FW attempts to send a frame stop
+	 *               when the total valid per-rate attempts have
+	 *               been exhausted; otherwise transmissions
+	 *               will continue at the lowest available rate
+	 *               until the appropriate one of the
+	 *               short_retry_limit, long_retry_limit,
+	 *               dot11_max_transmit_msdu_life_time, or
+	 *               max_tx_life_time, is exhausted.
+	 *            1: Preamble Override - indicates if the preamble type
+	 *               should be used in TX.
+	 *            2: Preamble Type - the type of the preamble to be used by
+	 *               the policy (0 - long preamble, 1 - short preamble.
+	 */
+	u8 aflags;
+};
+
+#define CONF_TX_MAX_AC_COUNT 4
+
+/* Slot number setting to start transmission at PIFS interval */
+#define CONF_TX_AIFS_PIFS 1
+/* Slot number setting to start transmission at DIFS interval normal
+ * DCF access */
+#define CONF_TX_AIFS_DIFS 2
+
+
+enum conf_tx_ac {
+	CONF_TX_AC_BE = 0,         /* best effort / legacy */
+	CONF_TX_AC_BK = 1,         /* background */
+	CONF_TX_AC_VI = 2,         /* video */
+	CONF_TX_AC_VO = 3,         /* voice */
+	CONF_TX_AC_CTS2SELF = 4,   /* fictious AC, follows AC_VO */
+	CONF_TX_AC_ANY_TID = 0x1f
+};
+
+struct conf_tx_ac_category {
+	/*
+	 * The AC class identifier.
+	 *
+	 * Range: enum conf_tx_ac
+	 */
+	u8 ac;
+
+	/*
+	 * The contention window minimum size (in slots) for the access
+	 * class.
+	 *
+	 * Range: u8
+	 */
+	u8 cw_min;
+
+	/*
+	 * The contention window maximum size (in slots) for the access
+	 * class.
+	 *
+	 * Range: u8
+	 */
+	u16 cw_max;
+
+	/*
+	 * The AIF value (in slots) for the access class.
+	 *
+	 * Range: u8
+	 */
+	u8 aifsn;
+
+	/*
+	 * The TX Op Limit (in microseconds) for the access class.
+	 *
+	 * Range: u16
+	 */
+	u16 tx_op_limit;
+};
+
+#define CONF_TX_MAX_TID_COUNT 7
+
+enum {
+	CONF_CHANNEL_TYPE_DCF = 0,   /* DC/LEGACY*/
+	CONF_CHANNEL_TYPE_EDCF = 1,  /* EDCA*/
+	CONF_CHANNEL_TYPE_HCCA = 2,  /* HCCA*/
+};
+
+enum {
+	CONF_PS_SCHEME_LEGACY = 0,
+	CONF_PS_SCHEME_UPSD_TRIGGER = 1,
+	CONF_PS_SCHEME_LEGACY_PSPOLL = 2,
+	CONF_PS_SCHEME_SAPSD = 3,
+};
+
+enum {
+	CONF_ACK_POLICY_LEGACY = 0,
+	CONF_ACK_POLICY_NO_ACK = 1,
+	CONF_ACK_POLICY_BLOCK = 2,
+};
+
+
+struct conf_tx_tid {
+	u8 queue_id;
+	u8 channel_type;
+	u8 tsid;
+	u8 ps_scheme;
+	u8 ack_policy;
+	u32 apsd_conf[2];
+};
+
+struct conf_tx_settings {
+	/*
+	 * The TX ED value for TELEC Enable/Disable.
+	 *
+	 * Range: 0, 1
+	 */
+	u8 tx_energy_detection;
+
+	/*
+	 * Configuration for rate classes for TX (currently only one
+	 * rate class supported.)
+	 */
+	struct conf_tx_rate_class rc_conf;
+
+	/*
+	 * Configuration for access categories for TX rate control.
+	 */
+	u8 ac_conf_count;
+	struct conf_tx_ac_category ac_conf[CONF_TX_MAX_AC_COUNT];
+
+	/*
+	 * Configuration for TID parameters.
+	 */
+	u8 tid_conf_count;
+	struct conf_tx_tid tid_conf[CONF_TX_MAX_TID_COUNT];
+
+	/*
+	 * The TX fragmentation threshold.
+	 *
+	 * Range: u16
+	 */
+	u16 frag_threshold;
+
+	/*
+	 * Max time in msec the FW may delay frame TX-Complete interrupt.
+	 *
+	 * Range: u16
+	 */
+	u16 tx_compl_timeout;
+
+	/*
+	 * Completed TX packet count which requires to issue the TX-Complete
+	 * interrupt.
+	 *
+	 * Range: u16
+	 */
+	u16 tx_compl_threshold;
+
+};
+
+enum {
+	CONF_WAKE_UP_EVENT_BEACON    = 0x01, /* Wake on every Beacon*/
+	CONF_WAKE_UP_EVENT_DTIM      = 0x02, /* Wake on every DTIM*/
+	CONF_WAKE_UP_EVENT_N_DTIM    = 0x04, /* Wake every Nth DTIM */
+	CONF_WAKE_UP_EVENT_N_BEACONS = 0x08, /* Wake every Nth beacon */
+	CONF_WAKE_UP_EVENT_BITS_MASK = 0x0F
+};
+
+#define CONF_MAX_BCN_FILT_IE_COUNT 32
+
+#define CONF_BCN_RULE_PASS_ON_CHANGE         BIT(0)
+#define CONF_BCN_RULE_PASS_ON_APPEARANCE     BIT(1)
+
+#define CONF_BCN_IE_OUI_LEN    3
+#define CONF_BCN_IE_VER_LEN    2
+
+struct conf_bcn_filt_rule {
+	/*
+	 * IE number to which to associate a rule.
+	 *
+	 * Range: u8
+	 */
+	u8 ie;
+
+	/*
+	 * Rule to associate with the specific ie.
+	 *
+	 * Range: CONF_BCN_RULE_PASS_ON_*
+	 */
+	u8 rule;
+
+	/*
+	 * OUI for the vendor specifie IE (221)
+	 */
+	u8 oui[CONF_BCN_IE_OUI_LEN];
+
+	/*
+	 * Type for the vendor specifie IE (221)
+	 */
+	u8 type;
+
+	/*
+	 * Version for the vendor specifie IE (221)
+	 */
+	u8 version[CONF_BCN_IE_VER_LEN];
+};
+
+#define CONF_MAX_RSSI_SNR_TRIGGERS 8
+
+enum {
+	CONF_TRIG_METRIC_RSSI_BEACON = 0,
+	CONF_TRIG_METRIC_RSSI_DATA,
+	CONF_TRIG_METRIC_SNR_BEACON,
+	CONF_TRIG_METRIC_SNR_DATA
+};
+
+enum {
+	CONF_TRIG_EVENT_TYPE_LEVEL = 0,
+	CONF_TRIG_EVENT_TYPE_EDGE
+};
+
+enum {
+	CONF_TRIG_EVENT_DIR_LOW = 0,
+	CONF_TRIG_EVENT_DIR_HIGH,
+	CONF_TRIG_EVENT_DIR_BIDIR
+};
+
+
+struct conf_sig_trigger {
+	/*
+	 * The RSSI / SNR threshold value.
+	 *
+	 * FIXME: what is the range?
+	 */
+	s16 threshold;
+
+	/*
+	 * Minimum delay between two trigger events for this trigger in ms.
+	 *
+	 * Range: 0 - 60000
+	 */
+	u16 pacing;
+
+	/*
+	 * The measurement data source for this trigger.
+	 *
+	 * Range: CONF_TRIG_METRIC_*
+	 */
+	u8 metric;
+
+	/*
+	 * The trigger type of this trigger.
+	 *
+	 * Range: CONF_TRIG_EVENT_TYPE_*
+	 */
+	u8 type;
+
+	/*
+	 * The direction of the trigger.
+	 *
+	 * Range: CONF_TRIG_EVENT_DIR_*
+	 */
+	u8 direction;
+
+	/*
+	 * Hysteresis range of the trigger around the threshold (in dB)
+	 *
+	 * Range: u8
+	 */
+	u8 hysteresis;
+
+	/*
+	 * Index of the trigger rule.
+	 *
+	 * Range: 0 - CONF_MAX_RSSI_SNR_TRIGGERS-1
+	 */
+	u8 index;
+
+	/*
+	 * Enable / disable this rule (to use for clearing rules.)
+	 *
+	 * Range: 1 - Enabled, 2 - Not enabled
+	 */
+	u8 enable;
+};
+
+struct conf_sig_weights {
+
+	/*
+	 * RSSI from beacons average weight.
+	 *
+	 * Range: u8
+	 */
+	u8 rssi_bcn_avg_weight;
+
+	/*
+	 * RSSI from data average weight.
+	 *
+	 * Range: u8
+	 */
+	u8 rssi_pkt_avg_weight;
+
+	/*
+	 * SNR from beacons average weight.
+	 *
+	 * Range: u8
+	 */
+	u8 snr_bcn_avg_weight;
+
+	/*
+	 * SNR from data average weight.
+	 *
+	 * Range: u8
+	 */
+	u8 snr_pkt_avg_weight;
+};
+
+enum conf_bcn_filt_mode {
+	CONF_BCN_FILT_MODE_DISABLED = 0,
+	CONF_BCN_FILT_MODE_ENABLED = 1
+};
+
+enum conf_bet_mode {
+	CONF_BET_MODE_DISABLE = 0,
+	CONF_BET_MODE_ENABLE = 1,
+};
+
+struct conf_conn_settings {
+	/*
+	 * Firmware wakeup conditions configuration. The host may set only
+	 * one bit.
+	 *
+	 * Range: CONF_WAKE_UP_EVENT_*
+	 */
+	u8 wake_up_event;
+
+	/*
+	 * Listen interval for beacons or Dtims.
+	 *
+	 * Range: 0 for beacon and Dtim wakeup
+	 *        1-10 for x Dtims
+	 *        1-255 for x beacons
+	 */
+	u8 listen_interval;
+
+	/*
+	 * Enable or disable the beacon filtering.
+	 *
+	 * Range: CONF_BCN_FILT_MODE_*
+	 */
+	enum conf_bcn_filt_mode bcn_filt_mode;
+
+	/*
+	 * Configure Beacon filter pass-thru rules.
+	 */
+	u8 bcn_filt_ie_count;
+	struct conf_bcn_filt_rule bcn_filt_ie[CONF_MAX_BCN_FILT_IE_COUNT];
+
+	/*
+	 * The number of consequtive beacons to lose, before the firmware
+	 * becomes out of synch.
+	 *
+	 * Range: u32
+	 */
+	u32 synch_fail_thold;
+
+	/*
+	 * After out-of-synch, the number of TU's to wait without a further
+	 * received beacon (or probe response) before issuing the BSS_EVENT_LOSE
+	 * event.
+	 *
+	 * Range: u32
+	 */
+	u32 bss_lose_timeout;
+
+	/*
+	 * Beacon receive timeout.
+	 *
+	 * Range: u32
+	 */
+	u32 beacon_rx_timeout;
+
+	/*
+	 * Broadcast receive timeout.
+	 *
+	 * Range: u32
+	 */
+	u32 broadcast_timeout;
+
+	/*
+	 * Enable/disable reception of broadcast packets in power save mode
+	 *
+	 * Range: 1 - enable, 0 - disable
+	 */
+	u8 rx_broadcast_in_ps;
+
+	/*
+	 * Consequtive PS Poll failures before sending event to driver
+	 *
+	 * Range: u8
+	 */
+	u8 ps_poll_threshold;
+
+	/*
+	 * Configuration of signal (rssi/snr) triggers.
+	 */
+	u8 sig_trigger_count;
+	struct conf_sig_trigger sig_trigger[CONF_MAX_RSSI_SNR_TRIGGERS];
+
+	/*
+	 * Configuration of signal average weights.
+	 */
+	struct conf_sig_weights sig_weights;
+
+	/*
+	 * Specifies if beacon early termination procedure is enabled or
+	 * disabled.
+	 *
+	 * Range: CONF_BET_MODE_*
+	 */
+	u8 bet_enable;
+
+	/*
+	 * Specifies the maximum number of consecutive beacons that may be
+	 * early terminated. After this number is reached at least one full
+	 * beacon must be correctly received in FW before beacon ET
+	 * resumes.
+	 *
+	 * Range 0 - 255
+	 */
+	u8 bet_max_consecutive;
+
+	/*
+	 * Specifies the maximum number of times to try PSM entry if it fails
+	 * (if sending the appropriate null-func message fails.)
+	 *
+	 * Range 0 - 255
+	 */
+	u8 psm_entry_retries;
+};
+
+#define CONF_SR_ERR_TBL_MAX_VALUES   14
+
+struct conf_mart_reflex_err_table {
+	/*
+	 * Length of the error table values table.
+	 *
+	 * Range: 0 - CONF_SR_ERR_TBL_MAX_VALUES
+	 */
+	u8 len;
+
+	/*
+	 * Smart Reflex error table upper limit.
+	 *
+	 * Range: s8
+	 */
+	s8 upper_limit;
+
+	/*
+	 * Smart Reflex error table values.
+	 *
+	 * Range: s8
+	 */
+	s8 values[CONF_SR_ERR_TBL_MAX_VALUES];
+};
+
+enum {
+	CONF_REF_CLK_19_2_E,
+	CONF_REF_CLK_26_E,
+	CONF_REF_CLK_38_4_E,
+	CONF_REF_CLK_52_E
+};
+
+enum single_dual_band_enum {
+	CONF_SINGLE_BAND,
+	CONF_DUAL_BAND
+};
+
+struct conf_general_parms {
+	/*
+	 * RF Reference Clock type / speed
+	 *
+	 * Range: CONF_REF_CLK_*
+	 */
+	u8 ref_clk;
+
+	/*
+	 * Settling time of the reference clock after boot.
+	 *
+	 * Range: u8
+	 */
+	u8 settling_time;
+
+	/*
+	 * Flag defining whether clock is valid on wakeup.
+	 *
+	 * Range: 0 - not valid on wakeup, 1 - valid on wakeup
+	 */
+	u8 clk_valid_on_wakeup;
+
+	/*
+	 * DC-to-DC mode.
+	 *
+	 * Range: Unknown
+	 */
+	u8 dc2dcmode;
+
+	/*
+	 * Flag defining whether used as single or dual-band.
+	 *
+	 * Range: CONF_SINGLE_BAND, CONF_DUAL_BAND
+	 */
+	u8 single_dual_band;
+
+	/*
+	 * TX bip fem autodetect flag.
+	 *
+	 * Range: Unknown
+	 */
+	u8 tx_bip_fem_autodetect;
+
+	/*
+	 * TX bip gem manufacturer.
+	 *
+	 * Range: Unknown
+	 */
+	u8 tx_bip_fem_manufacturer;
+
+	/*
+	 * Settings flags.
+	 *
+	 * Range: Unknown
+	 */
+	u8 settings;
+};
+
+#define CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE 15
+#define CONF_NUMBER_OF_SUB_BANDS_5  7
+#define CONF_NUMBER_OF_RATE_GROUPS  6
+#define CONF_NUMBER_OF_CHANNELS_2_4 14
+#define CONF_NUMBER_OF_CHANNELS_5   35
+
+struct conf_radio_parms {
+	/*
+	 * Static radio parameters for 2.4GHz
+	 *
+	 * Range: unknown
+	 */
+	u8 rx_trace_loss;
+	u8 tx_trace_loss;
+	s8 rx_rssi_and_proc_compens[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
+
+	/*
+	 * Static radio parameters for 5GHz
+	 *
+	 * Range: unknown
+	 */
+	u8 rx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
+	u8 tx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
+	s8 rx_rssi_and_proc_compens_5[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
+
+	/*
+	 * Dynamic radio parameters for 2.4GHz
+	 *
+	 * Range: unknown
+	 */
+	s16 tx_ref_pd_voltage;
+	s8  tx_ref_power;
+	s8  tx_offset_db;
+
+	s8  tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS];
+	s8  tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS];
+
+	s8  tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4];
+	s8  tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4];
+	s8  tx_pdv_rate_offsets[CONF_NUMBER_OF_RATE_GROUPS];
+
+	u8  tx_ibias[CONF_NUMBER_OF_RATE_GROUPS];
+	u8  rx_fem_insertion_loss;
+
+	/*
+	 * Dynamic radio parameters for 5GHz
+	 *
+	 * Range: unknown
+	 */
+	s16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5];
+	s8  tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
+	s8  tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5];
+
+	s8  tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS];
+	s8  tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS];
+
+	s8  tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5];
+	s8  tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS];
+
+	/* FIXME: this is inconsistent with the types for 2.4GHz */
+	s8  tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS];
+	s8  rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
+};
+
+#define CONF_SR_ERR_TBL_COUNT        3
+
+struct conf_init_settings {
+	/*
+	 * Configure Smart Reflex error table values.
+	 */
+	struct conf_mart_reflex_err_table sr_err_tbl[CONF_SR_ERR_TBL_COUNT];
+
+	/*
+	 * Smart Reflex enable flag.
+	 *
+	 * Range: 1 - Smart Reflex enabled, 0 - Smart Reflex disabled
+	 */
+	u8 sr_enable;
+
+	/*
+	 * Configure general parameters.
+	 */
+	struct conf_general_parms genparam;
+
+	/*
+	 * Configure radio parameters.
+	 */
+	struct conf_radio_parms radioparam;
+
+};
+
+struct conf_drv_settings {
+	struct conf_sg_settings sg;
+	struct conf_rx_settings rx;
+	struct conf_tx_settings tx;
+	struct conf_conn_settings conn;
+	struct conf_init_settings init;
+};
+
+#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.c b/drivers/net/wireless/wl12xx/wl1271_event.c
index f3afd4a6ff33..d13fdd99c85c 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.c
+++ b/drivers/net/wireless/wl12xx/wl1271_event.c
@@ -26,23 +26,86 @@
 #include "wl1271_spi.h"
 #include "wl1271_event.h"
 #include "wl1271_ps.h"
+#include "wl12xx_80211.h"
 
 static int wl1271_event_scan_complete(struct wl1271 *wl,
 				      struct event_mailbox *mbox)
 {
+	int size = sizeof(struct wl12xx_probe_req_template);
 	wl1271_debug(DEBUG_EVENT, "status: 0x%x",
 		     mbox->scheduled_scan_status);
 
 	if (wl->scanning) {
-		mutex_unlock(&wl->mutex);
-		ieee80211_scan_completed(wl->hw, false);
-		mutex_lock(&wl->mutex);
-		wl->scanning = false;
+		if (wl->scan.state == WL1271_SCAN_BAND_DUAL) {
+			wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
+						NULL, size);
+			/* 2.4 GHz band scanned, scan 5 GHz band, pretend
+			 * to the wl1271_cmd_scan function that we are not
+			 * scanning as it checks that.
+			 */
+			wl->scanning = false;
+			wl1271_cmd_scan(wl, wl->scan.ssid, wl->scan.ssid_len,
+						wl->scan.active,
+						wl->scan.high_prio,
+						WL1271_SCAN_BAND_5_GHZ,
+						wl->scan.probe_requests);
+		} else {
+			if (wl->scan.state == WL1271_SCAN_BAND_2_4_GHZ)
+				wl1271_cmd_template_set(wl,
+						CMD_TEMPL_CFG_PROBE_REQ_2_4,
+						NULL, size);
+			else
+				wl1271_cmd_template_set(wl,
+						CMD_TEMPL_CFG_PROBE_REQ_5,
+						NULL, size);
+
+			mutex_unlock(&wl->mutex);
+			ieee80211_scan_completed(wl->hw, false);
+			mutex_lock(&wl->mutex);
+			wl->scanning = false;
+		}
 	}
-
 	return 0;
 }
 
+static int wl1271_event_ps_report(struct wl1271 *wl,
+				  struct event_mailbox *mbox,
+				  bool *beacon_loss)
+{
+	int ret = 0;
+
+	wl1271_debug(DEBUG_EVENT, "ps_status: 0x%x", mbox->ps_status);
+
+	switch (mbox->ps_status) {
+	case EVENT_ENTER_POWER_SAVE_FAIL:
+		if (!wl->psm) {
+			wl->psm_entry_retry = 0;
+			break;
+		}
+
+		if (wl->psm_entry_retry < wl->conf.conn.psm_entry_retries) {
+			wl->psm_entry_retry++;
+			ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
+		} else {
+			wl1271_error("PSM entry failed, giving up.\n");
+			wl->psm_entry_retry = 0;
+			*beacon_loss = true;
+		}
+		break;
+	case EVENT_ENTER_POWER_SAVE_SUCCESS:
+		wl->psm_entry_retry = 0;
+		break;
+	case EVENT_EXIT_POWER_SAVE_FAIL:
+		wl1271_info("PSM exit failed");
+		break;
+	case EVENT_EXIT_POWER_SAVE_SUCCESS:
+	default:
+		break;
+	}
+
+	return ret;
+}
+
 static void wl1271_event_mbox_dump(struct event_mailbox *mbox)
 {
 	wl1271_debug(DEBUG_EVENT, "MBOX DUMP:");
@@ -54,10 +117,12 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
 {
 	int ret;
 	u32 vector;
+	bool beacon_loss = false;
 
 	wl1271_event_mbox_dump(mbox);
 
-	vector = mbox->events_vector & ~(mbox->events_mask);
+	vector = le32_to_cpu(mbox->events_vector);
+	vector &= ~(le32_to_cpu(mbox->events_mask));
 	wl1271_debug(DEBUG_EVENT, "vector: 0x%x", vector);
 
 	if (vector & SCAN_COMPLETE_EVENT_ID) {
@@ -66,14 +131,34 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
 			return ret;
 	}
 
-	if (vector & BSS_LOSE_EVENT_ID) {
+	/*
+	 * The BSS_LOSE_EVENT_ID is only needed while psm (and hence beacon
+	 * filtering) is enabled. Without PSM, the stack will receive all
+	 * beacons and can detect beacon loss by itself.
+	 */
+	if (vector & BSS_LOSE_EVENT_ID && wl->psm) {
 		wl1271_debug(DEBUG_EVENT, "BSS_LOSE_EVENT");
 
-		if (wl->psm_requested && wl->psm) {
-			ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE);
-			if (ret < 0)
-				return ret;
-		}
+		/* indicate to the stack, that beacons have been lost */
+		beacon_loss = true;
+	}
+
+	if (vector & PS_REPORT_EVENT_ID) {
+		wl1271_debug(DEBUG_EVENT, "PS_REPORT_EVENT");
+		ret = wl1271_event_ps_report(wl, mbox, &beacon_loss);
+		if (ret < 0)
+			return ret;
+	}
+
+	if (beacon_loss) {
+		/* Obviously, it's dangerous to release the mutex while
+		   we are holding many of the variables in the wl struct.
+		   That's why it's done last in the function, and care must
+		   be taken that nothing more is done after this function
+		   returns. */
+		mutex_unlock(&wl->mutex);
+		ieee80211_beacon_loss(wl->vif);
+		mutex_lock(&wl->mutex);
 	}
 
 	return 0;
@@ -92,14 +177,14 @@ int wl1271_event_unmask(struct wl1271 *wl)
 
 void wl1271_event_mbox_config(struct wl1271 *wl)
 {
-	wl->mbox_ptr[0] = wl1271_reg_read32(wl, REG_EVENT_MAILBOX_PTR);
+	wl->mbox_ptr[0] = wl1271_spi_read32(wl, REG_EVENT_MAILBOX_PTR);
 	wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox);
 
 	wl1271_debug(DEBUG_EVENT, "MBOX ptrs: 0x%x 0x%x",
 		     wl->mbox_ptr[0], wl->mbox_ptr[1]);
 }
 
-int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
+int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num, bool do_ack)
 {
 	struct event_mailbox mbox;
 	int ret;
@@ -110,8 +195,8 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
 		return -EINVAL;
 
 	/* first we read the mbox descriptor */
-	wl1271_spi_mem_read(wl, wl->mbox_ptr[mbox_num], &mbox,
-			    sizeof(struct event_mailbox));
+	wl1271_spi_read(wl, wl->mbox_ptr[mbox_num], &mbox,
+			sizeof(struct event_mailbox), false);
 
 	/* process the descriptor */
 	ret = wl1271_event_process(wl, &mbox);
@@ -119,7 +204,9 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
 		return ret;
 
 	/* then we let the firmware know it can go on...*/
-	wl1271_reg_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_EVENT_ACK);
+	if (do_ack)
+		wl1271_spi_write32(wl, ACX_REG_INTERRUPT_TRIG,
+				   INTR_TRIG_EVENT_ACK);
 
 	return 0;
 }
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.h b/drivers/net/wireless/wl12xx/wl1271_event.h
index 2cdce7c34bf0..4e3f55ebb1a8 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.h
+++ b/drivers/net/wireless/wl12xx/wl1271_event.h
@@ -63,36 +63,43 @@ enum {
 	EVENT_MBOX_ALL_EVENT_ID			 = 0x7fffffff,
 };
 
+enum {
+	EVENT_ENTER_POWER_SAVE_FAIL = 0,
+	EVENT_ENTER_POWER_SAVE_SUCCESS,
+	EVENT_EXIT_POWER_SAVE_FAIL,
+	EVENT_EXIT_POWER_SAVE_SUCCESS,
+};
+
 struct event_debug_report {
 	u8 debug_event_id;
 	u8 num_params;
-	u16 pad;
-	u32 report_1;
-	u32 report_2;
-	u32 report_3;
+	__le16 pad;
+	__le32 report_1;
+	__le32 report_2;
+	__le32 report_3;
 } __attribute__ ((packed));
 
 #define NUM_OF_RSSI_SNR_TRIGGERS 8
 
 struct event_mailbox {
-	u32 events_vector;
-	u32 events_mask;
-	u32 reserved_1;
-	u32 reserved_2;
+	__le32 events_vector;
+	__le32 events_mask;
+	__le32 reserved_1;
+	__le32 reserved_2;
 
 	u8 dbg_event_id;
 	u8 num_relevant_params;
-	u16 reserved_3;
-	u32 event_report_p1;
-	u32 event_report_p2;
-	u32 event_report_p3;
+	__le16 reserved_3;
+	__le32 event_report_p1;
+	__le32 event_report_p2;
+	__le32 event_report_p3;
 
 	u8 number_of_scan_results;
 	u8 scan_tag;
 	u8 reserved_4[2];
-	u32 compl_scheduled_scan_status;
+	__le32 compl_scheduled_scan_status;
 
-	u16 scheduled_scan_attended_channels;
+	__le16 scheduled_scan_attended_channels;
 	u8 soft_gemini_sense_info;
 	u8 soft_gemini_protective_info;
 	s8 rssi_snr_trigger_metric[NUM_OF_RSSI_SNR_TRIGGERS];
@@ -105,6 +112,6 @@ struct event_mailbox {
 
 int wl1271_event_unmask(struct wl1271 *wl);
 void wl1271_event_mbox_config(struct wl1271 *wl);
-int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
+int wl1271_event_handle(struct wl1271 *wl, u8 mbox, bool do_ack);
 
 #endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.c b/drivers/net/wireless/wl12xx/wl1271_init.c
index 490df217605a..11249b436cf1 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.c
+++ b/drivers/net/wireless/wl12xx/wl1271_init.c
@@ -59,6 +59,14 @@ static int wl1271_init_templates_config(struct wl1271 *wl)
 	if (ret < 0)
 		return ret;
 
+	if (wl1271_11a_enabled()) {
+		ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
+				NULL,
+				sizeof(struct wl12xx_probe_req_template));
+		if (ret < 0)
+			return ret;
+	}
+
 	ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
 				      sizeof(struct wl12xx_null_data_template));
 	if (ret < 0)
@@ -94,7 +102,7 @@ static int wl1271_init_rx_config(struct wl1271 *wl, u32 config, u32 filter)
 {
 	int ret;
 
-	ret = wl1271_acx_rx_msdu_life_time(wl, RX_MSDU_LIFETIME_DEF);
+	ret = wl1271_acx_rx_msdu_life_time(wl);
 	if (ret < 0)
 		return ret;
 
@@ -117,7 +125,7 @@ static int wl1271_init_phy_config(struct wl1271 *wl)
 	if (ret < 0)
 		return ret;
 
-	ret = wl1271_acx_group_address_tbl(wl);
+	ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
 	if (ret < 0)
 		return ret;
 
@@ -125,7 +133,7 @@ static int wl1271_init_phy_config(struct wl1271 *wl)
 	if (ret < 0)
 		return ret;
 
-	ret = wl1271_acx_rts_threshold(wl, RTS_THRESHOLD_DEF);
+	ret = wl1271_acx_rts_threshold(wl, wl->conf.rx.rts_threshold);
 	if (ret < 0)
 		return ret;
 
@@ -136,7 +144,8 @@ static int wl1271_init_beacon_filter(struct wl1271 *wl)
 {
 	int ret;
 
-	ret = wl1271_acx_beacon_filter_opt(wl);
+	/* disable beacon filtering at this stage */
+	ret = wl1271_acx_beacon_filter_opt(wl, false);
 	if (ret < 0)
 		return ret;
 
@@ -184,118 +193,15 @@ static int wl1271_init_beacon_broadcast(struct wl1271 *wl)
 	return 0;
 }
 
-static int wl1271_init_general_parms(struct wl1271 *wl)
-{
-	struct wl1271_general_parms *gen_parms;
-	int ret;
-
-	gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
-	if (!gen_parms)
-		return -ENOMEM;
-
-	gen_parms->id = TEST_CMD_INI_FILE_GENERAL_PARAM;
-
-	gen_parms->ref_clk = REF_CLK_38_4_E;
-	/* FIXME: magic numbers */
-	gen_parms->settling_time = 5;
-	gen_parms->clk_valid_on_wakeup = 0;
-	gen_parms->dc2dcmode = 0;
-	gen_parms->single_dual_band = 0;
-	gen_parms->tx_bip_fem_autodetect = 1;
-	gen_parms->tx_bip_fem_manufacturer = 1;
-	gen_parms->settings = 1;
-
-	ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), 0);
-	if (ret < 0) {
-		wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
-		return ret;
-	}
-
-	kfree(gen_parms);
-	return 0;
-}
-
-static int wl1271_init_radio_parms(struct wl1271 *wl)
-{
-	/*
-	 * FIXME: All these magic numbers should be moved to some place where
-	 * they can be configured (separate file?)
-	 */
-
-	struct wl1271_radio_parms *radio_parms;
-	int ret;
-	u8 compensation[] = { 0xec, 0xf6, 0x00, 0x0c, 0x18, 0xf8, 0xfc, 0x00,
-			      0x08, 0x10, 0xf0, 0xf8, 0x00, 0x0a, 0x14 };
-
-	u8 tx_rate_limits_normal[]   = { 0x1e, 0x1f, 0x22, 0x24, 0x28, 0x29 };
-	u8 tx_rate_limits_degraded[] = { 0x1b, 0x1c, 0x1e, 0x20, 0x24, 0x25 };
-
-	u8 tx_channel_limits_11b[] = { 0x22, 0x50, 0x50, 0x50,
-				       0x50, 0x50, 0x50, 0x50,
-				       0x50, 0x50, 0x22, 0x50,
-				       0x22, 0x50 };
-
-	u8 tx_channel_limits_ofdm[] = { 0x20, 0x50, 0x50, 0x50,
-					0x50, 0x50, 0x50, 0x50,
-					0x50, 0x50, 0x20, 0x50,
-					0x20, 0x50 };
-
-	u8 tx_pdv_rate_offsets[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
-
-	u8 tx_ibias[] = { 0x1a, 0x1a, 0x1a, 0x1a, 0x1a, 0x27 };
-
-	radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL);
-	if (!radio_parms)
-		return -ENOMEM;
-
-	radio_parms->id = TEST_CMD_INI_FILE_RADIO_PARAM;
-
-	/* Static radio parameters */
-	radio_parms->rx_trace_loss = 10;
-	radio_parms->tx_trace_loss = 10;
-	memcpy(radio_parms->rx_rssi_and_proc_compens, compensation,
-	       sizeof(compensation));
-
-	/* We don't set the 5GHz -- N/A */
-
-	/* Dynamic radio parameters */
-	radio_parms->tx_ref_pd_voltage = cpu_to_le16(0x24e);
-	radio_parms->tx_ref_power = 0x78;
-	radio_parms->tx_offset_db = 0x0;
-
-	memcpy(radio_parms->tx_rate_limits_normal, tx_rate_limits_normal,
-	       sizeof(tx_rate_limits_normal));
-	memcpy(radio_parms->tx_rate_limits_degraded, tx_rate_limits_degraded,
-	       sizeof(tx_rate_limits_degraded));
-
-	memcpy(radio_parms->tx_channel_limits_11b, tx_channel_limits_11b,
-	       sizeof(tx_channel_limits_11b));
-	memcpy(radio_parms->tx_channel_limits_ofdm, tx_channel_limits_ofdm,
-	       sizeof(tx_channel_limits_ofdm));
-	memcpy(radio_parms->tx_pdv_rate_offsets, tx_pdv_rate_offsets,
-	       sizeof(tx_pdv_rate_offsets));
-	memcpy(radio_parms->tx_ibias, tx_ibias,
-	       sizeof(tx_ibias));
-
-	radio_parms->rx_fem_insertion_loss = 0x14;
-
-	ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0);
-	if (ret < 0)
-		wl1271_warning("CMD_INI_FILE_RADIO_PARAM failed");
-
-	kfree(radio_parms);
-	return ret;
-}
-
 int wl1271_hw_init(struct wl1271 *wl)
 {
 	int ret;
 
-	ret = wl1271_init_general_parms(wl);
+	ret = wl1271_cmd_general_parms(wl);
 	if (ret < 0)
 		return ret;
 
-	ret = wl1271_init_radio_parms(wl);
+	ret = wl1271_cmd_radio_parms(wl);
 	if (ret < 0)
 		return ret;
 
@@ -311,8 +217,8 @@ int wl1271_hw_init(struct wl1271 *wl)
 
 	/* RX config */
 	ret = wl1271_init_rx_config(wl,
-				       RX_CFG_PROMISCUOUS | RX_CFG_TSF,
-				       RX_FILTER_OPTION_DEF);
+				    RX_CFG_PROMISCUOUS | RX_CFG_TSF,
+				    RX_FILTER_OPTION_DEF);
 	/* RX_CONFIG_OPTION_ANY_DST_ANY_BSS,
 	   RX_FILTER_OPTION_FILTER_ALL); */
 	if (ret < 0)
@@ -323,6 +229,11 @@ int wl1271_hw_init(struct wl1271 *wl)
 	if (ret < 0)
 		goto out_free_memmap;
 
+	/* Initialize connection monitoring thresholds */
+	ret = wl1271_acx_conn_monit_params(wl);
+	if (ret < 0)
+		goto out_free_memmap;
+
 	/* Beacon filtering */
 	ret = wl1271_init_beacon_filter(wl);
 	if (ret < 0)
@@ -369,7 +280,7 @@ int wl1271_hw_init(struct wl1271 *wl)
 		goto out_free_memmap;
 
 	/* Configure TX rate classes */
-	ret = wl1271_acx_rate_policies(wl);
+	ret = wl1271_acx_rate_policies(wl, CONF_TX_RATE_MASK_ALL);
 	if (ret < 0)
 		goto out_free_memmap;
 
@@ -388,10 +299,16 @@ int wl1271_hw_init(struct wl1271 *wl)
 	if (ret < 0)
 		goto out_free_memmap;
 
+	/* Configure smart reflex */
+	ret = wl1271_acx_smart_reflex(wl);
+	if (ret < 0)
+		goto out_free_memmap;
+
 	return 0;
 
  out_free_memmap:
 	kfree(wl->target_mem_map);
+	wl->target_mem_map = NULL;
 
 	return ret;
 }
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.h b/drivers/net/wireless/wl12xx/wl1271_init.h
index bd8ff0fa2272..930677fbe852 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.h
+++ b/drivers/net/wireless/wl12xx/wl1271_init.h
@@ -29,87 +29,4 @@
 int wl1271_hw_init_power_auth(struct wl1271 *wl);
 int wl1271_hw_init(struct wl1271 *wl);
 
-/* These are not really a TEST_CMD, but the ref driver uses them as such */
-#define TEST_CMD_INI_FILE_RADIO_PARAM   0x19
-#define TEST_CMD_INI_FILE_GENERAL_PARAM 0x1E
-
-struct wl1271_general_parms {
-	u8 id;
-	u8 padding[3];
-
-	u8 ref_clk;
-	u8 settling_time;
-	u8 clk_valid_on_wakeup;
-	u8 dc2dcmode;
-	u8 single_dual_band;
-
-	u8 tx_bip_fem_autodetect;
-	u8 tx_bip_fem_manufacturer;
-	u8 settings;
-} __attribute__ ((packed));
-
-enum ref_clk_enum {
-	REF_CLK_19_2_E,
-	REF_CLK_26_E,
-	REF_CLK_38_4_E,
-	REF_CLK_52_E
-};
-
-#define RSSI_AND_PROCESS_COMPENSATION_SIZE 15
-#define NUMBER_OF_SUB_BANDS_5  7
-#define NUMBER_OF_RATE_GROUPS  6
-#define NUMBER_OF_CHANNELS_2_4 14
-#define NUMBER_OF_CHANNELS_5   35
-
-struct wl1271_radio_parms {
-	u8 id;
-	u8 padding[3];
-
-	/* Static radio parameters */
-	/* 2.4GHz */
-	u8 rx_trace_loss;
-	u8 tx_trace_loss;
-	s8 rx_rssi_and_proc_compens[RSSI_AND_PROCESS_COMPENSATION_SIZE];
-
-	/* 5GHz */
-	u8 rx_trace_loss_5[NUMBER_OF_SUB_BANDS_5];
-	u8 tx_trace_loss_5[NUMBER_OF_SUB_BANDS_5];
-	s8 rx_rssi_and_proc_compens_5[RSSI_AND_PROCESS_COMPENSATION_SIZE];
-
-	/* Dynamic radio parameters */
-	/* 2.4GHz */
-	s16 tx_ref_pd_voltage;
-	s8  tx_ref_power;
-	s8  tx_offset_db;
-
-	s8  tx_rate_limits_normal[NUMBER_OF_RATE_GROUPS];
-	s8  tx_rate_limits_degraded[NUMBER_OF_RATE_GROUPS];
-
-	s8  tx_channel_limits_11b[NUMBER_OF_CHANNELS_2_4];
-	s8  tx_channel_limits_ofdm[NUMBER_OF_CHANNELS_2_4];
-	s8  tx_pdv_rate_offsets[NUMBER_OF_RATE_GROUPS];
-
-	u8  tx_ibias[NUMBER_OF_RATE_GROUPS];
-	u8  rx_fem_insertion_loss;
-
-	u8 padding2;
-
-	/* 5GHz */
-	s16 tx_ref_pd_voltage_5[NUMBER_OF_SUB_BANDS_5];
-	s8  tx_ref_power_5[NUMBER_OF_SUB_BANDS_5];
-	s8  tx_offset_db_5[NUMBER_OF_SUB_BANDS_5];
-
-	s8  tx_rate_limits_normal_5[NUMBER_OF_RATE_GROUPS];
-	s8  tx_rate_limits_degraded_5[NUMBER_OF_RATE_GROUPS];
-
-	s8  tx_channel_limits_ofdm_5[NUMBER_OF_CHANNELS_5];
-	s8  tx_pdv_rate_offsets_5[NUMBER_OF_RATE_GROUPS];
-
-	/* FIXME: this is inconsistent with the types for 2.4GHz */
-	s8  tx_ibias_5[NUMBER_OF_RATE_GROUPS];
-	s8  rx_fem_insertion_loss_5[NUMBER_OF_SUB_BANDS_5];
-
-	u8 padding3[2];
-} __attribute__ ((packed));
-
 #endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c
index 27298b19d5bd..b62c00ff42fe 100644
--- a/drivers/net/wireless/wl12xx/wl1271_main.c
+++ b/drivers/net/wireless/wl12xx/wl1271_main.c
@@ -30,7 +30,9 @@
 #include <linux/spi/spi.h>
 #include <linux/crc32.h>
 #include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
 #include <linux/spi/wl12xx.h>
+#include <linux/inetdevice.h>
 
 #include "wl1271.h"
 #include "wl12xx_80211.h"
@@ -45,10 +47,314 @@
 #include "wl1271_cmd.h"
 #include "wl1271_boot.h"
 
+static struct conf_drv_settings default_conf = {
+	.sg = {
+		.per_threshold               = 7500,
+		.max_scan_compensation_time  = 120000,
+		.nfs_sample_interval         = 400,
+		.load_ratio                  = 50,
+		.auto_ps_mode                = 0,
+		.probe_req_compensation      = 170,
+		.scan_window_compensation    = 50,
+		.antenna_config              = 0,
+		.beacon_miss_threshold       = 60,
+		.rate_adaptation_threshold   = CONF_HW_BIT_RATE_12MBPS,
+		.rate_adaptation_snr         = 0
+	},
+	.rx = {
+		.rx_msdu_life_time           = 512000,
+		.packet_detection_threshold  = 0,
+		.ps_poll_timeout             = 15,
+		.upsd_timeout                = 15,
+		.rts_threshold               = 2347,
+		.rx_cca_threshold            = 0xFFEF,
+		.irq_blk_threshold           = 0,
+		.irq_pkt_threshold           = USHORT_MAX,
+		.irq_timeout                 = 5,
+		.queue_type                  = CONF_RX_QUEUE_TYPE_LOW_PRIORITY,
+	},
+	.tx = {
+		.tx_energy_detection         = 0,
+		.rc_conf                     = {
+			.enabled_rates       = CONF_TX_RATE_MASK_UNSPECIFIED,
+			.short_retry_limit   = 10,
+			.long_retry_limit    = 10,
+			.aflags              = 0
+		},
+		.ac_conf_count               = 4,
+		.ac_conf                     = {
+			[0] = {
+				.ac          = CONF_TX_AC_BE,
+				.cw_min      = 15,
+				.cw_max      = 63,
+				.aifsn       = 3,
+				.tx_op_limit = 0,
+			},
+			[1] = {
+				.ac          = CONF_TX_AC_BK,
+				.cw_min      = 15,
+				.cw_max      = 63,
+				.aifsn       = 7,
+				.tx_op_limit = 0,
+			},
+			[2] = {
+				.ac          = CONF_TX_AC_VI,
+				.cw_min      = 15,
+				.cw_max      = 63,
+				.aifsn       = CONF_TX_AIFS_PIFS,
+				.tx_op_limit = 3008,
+			},
+			[3] = {
+				.ac          = CONF_TX_AC_VO,
+				.cw_min      = 15,
+				.cw_max      = 63,
+				.aifsn       = CONF_TX_AIFS_PIFS,
+				.tx_op_limit = 1504,
+			},
+		},
+		.tid_conf_count = 7,
+		.tid_conf = {
+			[0] = {
+				.queue_id    = 0,
+				.channel_type = CONF_CHANNEL_TYPE_DCF,
+				.tsid        = CONF_TX_AC_BE,
+				.ps_scheme   = CONF_PS_SCHEME_LEGACY,
+				.ack_policy  = CONF_ACK_POLICY_LEGACY,
+				.apsd_conf   = {0, 0},
+			},
+			[1] = {
+				.queue_id    = 1,
+				.channel_type = CONF_CHANNEL_TYPE_DCF,
+				.tsid        = CONF_TX_AC_BE,
+				.ps_scheme   = CONF_PS_SCHEME_LEGACY,
+				.ack_policy  = CONF_ACK_POLICY_LEGACY,
+				.apsd_conf   = {0, 0},
+			},
+			[2] = {
+				.queue_id    = 2,
+				.channel_type = CONF_CHANNEL_TYPE_DCF,
+				.tsid        = CONF_TX_AC_BE,
+				.ps_scheme   = CONF_PS_SCHEME_LEGACY,
+				.ack_policy  = CONF_ACK_POLICY_LEGACY,
+				.apsd_conf   = {0, 0},
+			},
+			[3] = {
+				.queue_id    = 3,
+				.channel_type = CONF_CHANNEL_TYPE_DCF,
+				.tsid        = CONF_TX_AC_BE,
+				.ps_scheme   = CONF_PS_SCHEME_LEGACY,
+				.ack_policy  = CONF_ACK_POLICY_LEGACY,
+				.apsd_conf   = {0, 0},
+			},
+			[4] = {
+				.queue_id    = 4,
+				.channel_type = CONF_CHANNEL_TYPE_DCF,
+				.tsid        = CONF_TX_AC_BE,
+				.ps_scheme   = CONF_PS_SCHEME_LEGACY,
+				.ack_policy  = CONF_ACK_POLICY_LEGACY,
+				.apsd_conf   = {0, 0},
+			},
+			[5] = {
+				.queue_id    = 5,
+				.channel_type = CONF_CHANNEL_TYPE_DCF,
+				.tsid        = CONF_TX_AC_BE,
+				.ps_scheme   = CONF_PS_SCHEME_LEGACY,
+				.ack_policy  = CONF_ACK_POLICY_LEGACY,
+				.apsd_conf   = {0, 0},
+			},
+			[6] = {
+				.queue_id    = 6,
+				.channel_type = CONF_CHANNEL_TYPE_DCF,
+				.tsid        = CONF_TX_AC_BE,
+				.ps_scheme   = CONF_PS_SCHEME_LEGACY,
+				.ack_policy  = CONF_ACK_POLICY_LEGACY,
+				.apsd_conf   = {0, 0},
+			}
+		},
+		.frag_threshold              = IEEE80211_MAX_FRAG_THRESHOLD,
+		.tx_compl_timeout            = 5,
+		.tx_compl_threshold          = 5
+	},
+	.conn = {
+		.wake_up_event               = CONF_WAKE_UP_EVENT_DTIM,
+		.listen_interval             = 0,
+		.bcn_filt_mode               = CONF_BCN_FILT_MODE_ENABLED,
+		.bcn_filt_ie_count           = 1,
+		.bcn_filt_ie = {
+			[0] = {
+				.ie          = WLAN_EID_CHANNEL_SWITCH,
+				.rule        = CONF_BCN_RULE_PASS_ON_APPEARANCE,
+			}
+		},
+		.synch_fail_thold            = 5,
+		.bss_lose_timeout            = 100,
+		.beacon_rx_timeout           = 10000,
+		.broadcast_timeout           = 20000,
+		.rx_broadcast_in_ps          = 1,
+		.ps_poll_threshold           = 4,
+		.sig_trigger_count           = 2,
+		.sig_trigger = {
+			[0] = {
+				.threshold   = -75,
+				.pacing      = 500,
+				.metric      = CONF_TRIG_METRIC_RSSI_BEACON,
+				.type        = CONF_TRIG_EVENT_TYPE_EDGE,
+				.direction   = CONF_TRIG_EVENT_DIR_LOW,
+				.hysteresis  = 2,
+				.index       = 0,
+				.enable      = 1
+			},
+			[1] = {
+				.threshold   = -75,
+				.pacing      = 500,
+				.metric      = CONF_TRIG_METRIC_RSSI_BEACON,
+				.type        = CONF_TRIG_EVENT_TYPE_EDGE,
+				.direction   = CONF_TRIG_EVENT_DIR_HIGH,
+				.hysteresis  = 2,
+				.index       = 1,
+				.enable      = 1
+			}
+		},
+		.sig_weights = {
+			.rssi_bcn_avg_weight = 10,
+			.rssi_pkt_avg_weight = 10,
+			.snr_bcn_avg_weight  = 10,
+			.snr_pkt_avg_weight  = 10
+		},
+		.bet_enable                  = CONF_BET_MODE_ENABLE,
+		.bet_max_consecutive         = 10,
+		.psm_entry_retries           = 3
+	},
+	.init = {
+		.sr_err_tbl = {
+			[0] = {
+				.len         = 7,
+				.upper_limit = 0x03,
+				.values      = {
+					0x18, 0x10, 0x05, 0xfb, 0xf0, 0xe8,
+					0x00 }
+			},
+			[1] = {
+				.len         = 7,
+				.upper_limit = 0x03,
+				.values      = {
+					0x18, 0x10, 0x05, 0xf6, 0xf0, 0xe8,
+					0x00 }
+			},
+			[2] = {
+				.len         = 7,
+				.upper_limit = 0x03,
+				.values      = {
+					0x18, 0x10, 0x05, 0xfb, 0xf0, 0xe8,
+					0x00 }
+			}
+		},
+		.sr_enable                   = 1,
+		.genparam                    = {
+			.ref_clk             = CONF_REF_CLK_38_4_E,
+			.settling_time       = 5,
+			.clk_valid_on_wakeup = 0,
+			.dc2dcmode           = 0,
+			.single_dual_band    = CONF_SINGLE_BAND,
+			.tx_bip_fem_autodetect = 0,
+			.tx_bip_fem_manufacturer = 1,
+			.settings = 1,
+		},
+		.radioparam = {
+			.rx_trace_loss       = 10,
+			.tx_trace_loss       = 10,
+			.rx_rssi_and_proc_compens = {
+				0xec, 0xf6, 0x00, 0x0c, 0x18, 0xf8,
+				0xfc, 0x00, 0x08, 0x10, 0xf0, 0xf8,
+				0x00, 0x0a, 0x14 },
+			.rx_trace_loss_5     = { 0, 0, 0, 0, 0, 0, 0 },
+			.tx_trace_loss_5     = { 0, 0, 0, 0, 0, 0, 0 },
+			.rx_rssi_and_proc_compens_5 = {
+				0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+				0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+				0x00, 0x00, 0x00 },
+			.tx_ref_pd_voltage   = 0x24e,
+			.tx_ref_power        = 0x78,
+			.tx_offset_db        = 0x0,
+			.tx_rate_limits_normal = {
+				0x1e, 0x1f, 0x22, 0x24, 0x28, 0x29 },
+			.tx_rate_limits_degraded = {
+				0x1b, 0x1c, 0x1e, 0x20, 0x24, 0x25 },
+			.tx_channel_limits_11b = {
+				0x22, 0x50, 0x50, 0x50, 0x50, 0x50,
+				0x50, 0x50, 0x50, 0x50, 0x22, 0x50,
+				0x22, 0x50 },
+			.tx_channel_limits_ofdm = {
+				0x20, 0x50, 0x50, 0x50, 0x50, 0x50,
+				0x50, 0x50, 0x50, 0x50, 0x20, 0x50,
+				0x20, 0x50 },
+			.tx_pdv_rate_offsets = {
+				0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+			.tx_ibias            = {
+				0x1a, 0x1a, 0x1a, 0x1a, 0x1a, 0x27 },
+			.rx_fem_insertion_loss = 0x14,
+			.tx_ref_pd_voltage_5 = {
+				0x0190, 0x01a4, 0x01c3, 0x01d8,
+				0x020a, 0x021c },
+			.tx_ref_power_5      = {
+				0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 },
+			.tx_offset_db_5      = {
+				0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+			.tx_rate_limits_normal_5 = {
+				0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
+			.tx_rate_limits_degraded_5 = {
+				0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
+			.tx_channel_limits_ofdm_5 = {
+				0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
+				0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
+				0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
+				0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
+				0x50, 0x50, 0x50 },
+			.tx_pdv_rate_offsets_5 = {
+				0x01, 0x02, 0x02, 0x02, 0x02, 0x00 },
+			.tx_ibias_5          = {
+				0x10, 0x10, 0x10, 0x10, 0x10, 0x10 },
+			.rx_fem_insertion_loss_5 = {
+				0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10 }
+		}
+	}
+};
+
+static LIST_HEAD(wl_list);
+
+static void wl1271_conf_init(struct wl1271 *wl)
+{
+
+	/*
+	 * This function applies the default configuration to the driver. This
+	 * function is invoked upon driver load (spi probe.)
+	 *
+	 * The configuration is stored in a run-time structure in order to
+	 * facilitate for run-time adjustment of any of the parameters. Making
+	 * changes to the configuration structure will apply the new values on
+	 * the next interface up (wl1271_op_start.)
+	 */
+
+	/* apply driver default configuration */
+	memcpy(&wl->conf, &default_conf, sizeof(default_conf));
+
+	if (wl1271_11a_enabled())
+		wl->conf.init.genparam.single_dual_band = CONF_DUAL_BAND;
+}
+
+
 static int wl1271_plt_init(struct wl1271 *wl)
 {
 	int ret;
 
+	ret = wl1271_cmd_general_parms(wl);
+	if (ret < 0)
+		return ret;
+
+	ret = wl1271_cmd_radio_parms(wl);
+	if (ret < 0)
+		return ret;
+
 	ret = wl1271_acx_init_mem_config(wl);
 	if (ret < 0)
 		return ret;
@@ -75,20 +381,14 @@ static void wl1271_power_on(struct wl1271 *wl)
 	wl->set_power(true);
 }
 
-static void wl1271_fw_status(struct wl1271 *wl, struct wl1271_fw_status *status)
+static void wl1271_fw_status(struct wl1271 *wl,
+			     struct wl1271_fw_status *status)
 {
 	u32 total = 0;
 	int i;
 
-	/*
-	 * FIXME: Reading the FW status directly from the registers seems to
-	 * be the right thing to do, but it doesn't work.  And in the
-	 * reference driver, there is a workaround called
-	 * USE_SDIO_24M_WORKAROUND, which reads the status from memory
-	 * instead, so we do the same here.
-	 */
-
-	wl1271_spi_mem_read(wl, STATUS_MEM_ADDRESS, status, sizeof(*status));
+	wl1271_spi_read(wl, FW_STATUS_ADDR, status,
+			sizeof(*status), false);
 
 	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
 		     "drv_rx_counter = %d, tx_results_counter = %d)",
@@ -99,25 +399,28 @@ static void wl1271_fw_status(struct wl1271 *wl, struct wl1271_fw_status *status)
 
 	/* update number of available TX blocks */
 	for (i = 0; i < NUM_TX_QUEUES; i++) {
-		u32 cnt = status->tx_released_blks[i] - wl->tx_blocks_freed[i];
-		wl->tx_blocks_freed[i] = status->tx_released_blks[i];
+		u32 cnt = le32_to_cpu(status->tx_released_blks[i]) -
+			wl->tx_blocks_freed[i];
+
+		wl->tx_blocks_freed[i] =
+			le32_to_cpu(status->tx_released_blks[i]);
 		wl->tx_blocks_available += cnt;
 		total += cnt;
 	}
 
 	/* if more blocks are available now, schedule some tx work */
 	if (total && !skb_queue_empty(&wl->tx_queue))
-		schedule_work(&wl->tx_work);
+		ieee80211_queue_work(wl->hw, &wl->tx_work);
 
 	/* update the host-chipset time offset */
-	wl->time_offset = jiffies_to_usecs(jiffies) - status->fw_localtime;
+	wl->time_offset = jiffies_to_usecs(jiffies) -
+		le32_to_cpu(status->fw_localtime);
 }
 
-#define WL1271_IRQ_MAX_LOOPS 10
 static void wl1271_irq_work(struct work_struct *work)
 {
-	u32 intr, ctr = WL1271_IRQ_MAX_LOOPS;
 	int ret;
+	u32 intr;
 	struct wl1271 *wl =
 		container_of(work, struct wl1271, irq_work);
 
@@ -132,9 +435,10 @@ static void wl1271_irq_work(struct work_struct *work)
 	if (ret < 0)
 		goto out;
 
-	wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
+	wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
 
-	intr = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_CLEAR);
+	wl1271_fw_status(wl, wl->fw_status);
+	intr = le32_to_cpu(wl->fw_status->intr);
 	if (!intr) {
 		wl1271_debug(DEBUG_IRQ, "Zero interrupt received.");
 		goto out_sleep;
@@ -142,46 +446,39 @@ static void wl1271_irq_work(struct work_struct *work)
 
 	intr &= WL1271_INTR_MASK;
 
-	do {
-		wl1271_fw_status(wl, wl->fw_status);
-
-
-		if (intr & (WL1271_ACX_INTR_EVENT_A |
-			    WL1271_ACX_INTR_EVENT_B)) {
-			wl1271_debug(DEBUG_IRQ,
-				     "WL1271_ACX_INTR_EVENT (0x%x)", intr);
-			if (intr & WL1271_ACX_INTR_EVENT_A)
-				wl1271_event_handle(wl, 0);
-			else
-				wl1271_event_handle(wl, 1);
-		}
+	if (intr & WL1271_ACX_INTR_EVENT_A) {
+		bool do_ack = (intr & WL1271_ACX_INTR_EVENT_B) ? false : true;
+		wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
+		wl1271_event_handle(wl, 0, do_ack);
+	}
 
-		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
-			wl1271_debug(DEBUG_IRQ,
-				     "WL1271_ACX_INTR_INIT_COMPLETE");
+	if (intr & WL1271_ACX_INTR_EVENT_B) {
+		wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
+		wl1271_event_handle(wl, 1, true);
+	}
 
-		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
-			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
+	if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
+		wl1271_debug(DEBUG_IRQ,
+			     "WL1271_ACX_INTR_INIT_COMPLETE");
 
-		if (intr & WL1271_ACX_INTR_DATA) {
-			u8 tx_res_cnt = wl->fw_status->tx_results_counter -
-				wl->tx_results_count;
+	if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
+		wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
 
-			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
+	if (intr & WL1271_ACX_INTR_DATA) {
+		u8 tx_res_cnt = wl->fw_status->tx_results_counter -
+			wl->tx_results_count;
 
-			/* check for tx results */
-			if (tx_res_cnt)
-				wl1271_tx_complete(wl, tx_res_cnt);
+		wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
 
-			wl1271_rx(wl, wl->fw_status);
-		}
+		/* check for tx results */
+		if (tx_res_cnt)
+			wl1271_tx_complete(wl, tx_res_cnt);
 
-		intr = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_CLEAR);
-		intr &= WL1271_INTR_MASK;
-	} while (intr && --ctr);
+		wl1271_rx(wl, wl->fw_status);
+	}
 
 out_sleep:
-	wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK,
+	wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK,
 			   WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
 	wl1271_ps_elp_sleep(wl);
 
@@ -205,7 +502,7 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
 		wl->elp_compl = NULL;
 	}
 
-	schedule_work(&wl->irq_work);
+	ieee80211_queue_work(wl->hw, &wl->irq_work);
 	spin_unlock_irqrestore(&wl->wl_lock, flags);
 
 	return IRQ_HANDLED;
@@ -231,7 +528,7 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
 	}
 
 	wl->fw_len = fw->size;
-	wl->fw = kmalloc(wl->fw_len, GFP_KERNEL);
+	wl->fw = vmalloc(wl->fw_len);
 
 	if (!wl->fw) {
 		wl1271_error("could not allocate memory for the firmware");
@@ -292,7 +589,7 @@ static void wl1271_fw_wakeup(struct wl1271 *wl)
 	u32 elp_reg;
 
 	elp_reg = ELPCTRL_WAKE_UP;
-	wl1271_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
+	wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
 }
 
 static int wl1271_setup(struct wl1271 *wl)
@@ -314,6 +611,7 @@ static int wl1271_setup(struct wl1271 *wl)
 
 static int wl1271_chip_wakeup(struct wl1271 *wl)
 {
+	struct wl1271_partition_set partition;
 	int ret = 0;
 
 	wl1271_power_on(wl);
@@ -323,11 +621,10 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
 
 	/* We don't need a real memory partition here, because we only want
 	 * to use the registers at this point. */
-	wl1271_set_partition(wl,
-			     0x00000000,
-			     0x00000000,
-			     REGISTERS_BASE,
-			     REGISTERS_DOWN_SIZE);
+	memset(&partition, 0, sizeof(partition));
+	partition.reg.start = REGISTERS_BASE;
+	partition.reg.size = REGISTERS_DOWN_SIZE;
+	wl1271_set_partition(wl, &partition);
 
 	/* ELP module wake up */
 	wl1271_fw_wakeup(wl);
@@ -335,7 +632,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
 	/* whal_FwCtrl_BootSm() */
 
 	/* 0. read chip id from CHIP_ID */
-	wl->chip.id = wl1271_reg_read32(wl, CHIP_ID_B);
+	wl->chip.id = wl1271_spi_read32(wl, CHIP_ID_B);
 
 	/* 1. check if chip id is valid */
 
@@ -346,7 +643,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
 
 		ret = wl1271_setup(wl);
 		if (ret < 0)
-			goto out;
+			goto out_power_off;
 		break;
 	case CHIP_ID_1271_PG20:
 		wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
@@ -354,56 +651,34 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
 
 		ret = wl1271_setup(wl);
 		if (ret < 0)
-			goto out;
+			goto out_power_off;
 		break;
 	default:
 		wl1271_error("unsupported chip id: 0x%x", wl->chip.id);
 		ret = -ENODEV;
-		goto out;
+		goto out_power_off;
 	}
 
 	if (wl->fw == NULL) {
 		ret = wl1271_fetch_firmware(wl);
 		if (ret < 0)
-			goto out;
+			goto out_power_off;
 	}
 
 	/* No NVS from netlink, try to get it from the filesystem */
 	if (wl->nvs == NULL) {
 		ret = wl1271_fetch_nvs(wl);
 		if (ret < 0)
-			goto out;
+			goto out_power_off;
 	}
 
-out:
-	return ret;
-}
-
-static void wl1271_filter_work(struct work_struct *work)
-{
-	struct wl1271 *wl =
-		container_of(work, struct wl1271, filter_work);
-	int ret;
-
-	mutex_lock(&wl->mutex);
-
-	if (wl->state == WL1271_STATE_OFF)
-		goto out;
-
-	ret = wl1271_ps_elp_wakeup(wl, false);
-	if (ret < 0)
-		goto out;
-
-	/* FIXME: replace the magic numbers with proper definitions */
-	ret = wl1271_cmd_join(wl, wl->bss_type, 1, 100, 0);
-	if (ret < 0)
-		goto out_sleep;
+	goto out;
 
-out_sleep:
-	wl1271_ps_elp_sleep(wl);
+out_power_off:
+	wl1271_power_off(wl);
 
 out:
-	mutex_unlock(&wl->mutex);
+	return ret;
 }
 
 int wl1271_plt_start(struct wl1271 *wl)
@@ -429,13 +704,26 @@ int wl1271_plt_start(struct wl1271 *wl)
 
 	ret = wl1271_boot(wl);
 	if (ret < 0)
-		goto out;
+		goto out_power_off;
 
 	wl1271_notice("firmware booted in PLT mode (%s)", wl->chip.fw_ver);
 
 	ret = wl1271_plt_init(wl);
 	if (ret < 0)
-		goto out;
+		goto out_irq_disable;
+
+	/* Make sure power saving is disabled */
+	ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
+	if (ret < 0)
+		goto out_irq_disable;
+
+	goto out;
+
+out_irq_disable:
+	wl1271_disable_interrupts(wl);
+
+out_power_off:
+	wl1271_power_off(wl);
 
 out:
 	mutex_unlock(&wl->mutex);
@@ -462,6 +750,7 @@ int wl1271_plt_stop(struct wl1271 *wl)
 	wl1271_power_off(wl);
 
 	wl->state = WL1271_STATE_OFF;
+	wl->rx_counter = 0;
 
 out:
 	mutex_unlock(&wl->mutex);
@@ -481,7 +770,7 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 	 * before that, the tx_work will not be initialized!
 	 */
 
-	schedule_work(&wl->tx_work);
+	ieee80211_queue_work(wl->hw, &wl->tx_work);
 
 	/*
 	 * The workqueue is slow to process the tx_queue and we need stop
@@ -501,6 +790,93 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 	return NETDEV_TX_OK;
 }
 
+static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
+			     void *arg)
+{
+	struct net_device *dev;
+	struct wireless_dev *wdev;
+	struct wiphy *wiphy;
+	struct ieee80211_hw *hw;
+	struct wl1271 *wl;
+	struct wl1271 *wl_temp;
+	struct in_device *idev;
+	struct in_ifaddr *ifa = arg;
+	int ret = 0;
+
+	/* FIXME: this ugly function should probably be implemented in the
+	 * mac80211, and here should only be a simple callback handling actual
+	 * setting of the filters. Now we need to dig up references to
+	 * various structures to gain access to what we need.
+	 * Also, because of this, there is no "initial" setting of the filter
+	 * in "op_start", because we don't want to dig up struct net_device
+	 * there - the filter will be set upon first change of the interface
+	 * IP address. */
+
+	dev = ifa->ifa_dev->dev;
+
+	wdev = dev->ieee80211_ptr;
+	if (wdev == NULL)
+		return NOTIFY_DONE;
+
+	wiphy = wdev->wiphy;
+	if (wiphy == NULL)
+		return NOTIFY_DONE;
+
+	hw = wiphy_priv(wiphy);
+	if (hw == NULL)
+		return NOTIFY_DONE;
+
+	/* Check that the interface is one supported by this driver. */
+	wl_temp = hw->priv;
+	list_for_each_entry(wl, &wl_list, list) {
+		if (wl == wl_temp)
+			break;
+	}
+	if (wl == NULL)
+		return NOTIFY_DONE;
+
+	/* Get the interface IP address for the device. "ifa" will become
+	   NULL if:
+	     - there is no IPV4 protocol address configured
+	     - there are multiple (virtual) IPV4 addresses configured
+	   When "ifa" is NULL, filtering will be disabled.
+	*/
+	ifa = NULL;
+	idev = dev->ip_ptr;
+	if (idev)
+		ifa = idev->ifa_list;
+
+	if (ifa && ifa->ifa_next)
+		ifa = NULL;
+
+	mutex_lock(&wl->mutex);
+
+	if (wl->state == WL1271_STATE_OFF)
+		goto out;
+
+	ret = wl1271_ps_elp_wakeup(wl, false);
+	if (ret < 0)
+		goto out;
+	if (ifa)
+		ret = wl1271_acx_arp_ip_filter(wl, true,
+					       (u8 *)&ifa->ifa_address,
+					       ACX_IPV4_VERSION);
+	else
+		ret = wl1271_acx_arp_ip_filter(wl, false, NULL,
+					       ACX_IPV4_VERSION);
+	wl1271_ps_elp_sleep(wl);
+
+out:
+	mutex_unlock(&wl->mutex);
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block wl1271_dev_notifier = {
+	.notifier_call = wl1271_dev_notify,
+};
+
+
 static int wl1271_op_start(struct ieee80211_hw *hw)
 {
 	struct wl1271 *wl = hw->priv;
@@ -523,22 +899,32 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
 
 	ret = wl1271_boot(wl);
 	if (ret < 0)
-		goto out;
+		goto out_power_off;
 
 	ret = wl1271_hw_init(wl);
 	if (ret < 0)
-		goto out;
+		goto out_irq_disable;
 
 	wl->state = WL1271_STATE_ON;
 
 	wl1271_info("firmware booted (%s)", wl->chip.fw_ver);
 
-out:
-	if (ret < 0)
-		wl1271_power_off(wl);
+	goto out;
+
+out_irq_disable:
+	wl1271_disable_interrupts(wl);
+
+out_power_off:
+	wl1271_power_off(wl);
 
+out:
 	mutex_unlock(&wl->mutex);
 
+	if (!ret) {
+		list_add(&wl->list, &wl_list);
+		register_inetaddr_notifier(&wl1271_dev_notifier);
+	}
+
 	return ret;
 }
 
@@ -551,6 +937,9 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
 
 	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
 
+	unregister_inetaddr_notifier(&wl1271_dev_notifier);
+	list_del(&wl->list);
+
 	mutex_lock(&wl->mutex);
 
 	WARN_ON(wl->state != WL1271_STATE_ON);
@@ -570,7 +959,6 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
 
 	cancel_work_sync(&wl->irq_work);
 	cancel_work_sync(&wl->tx_work);
-	cancel_work_sync(&wl->filter_work);
 
 	mutex_lock(&wl->mutex);
 
@@ -581,19 +969,25 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
 	memset(wl->bssid, 0, ETH_ALEN);
 	memset(wl->ssid, 0, IW_ESSID_MAX_SIZE + 1);
 	wl->ssid_len = 0;
-	wl->listen_int = 1;
 	wl->bss_type = MAX_BSS_TYPE;
+	wl->band = IEEE80211_BAND_2GHZ;
 
 	wl->rx_counter = 0;
 	wl->elp = false;
 	wl->psm = 0;
+	wl->psm_entry_retry = 0;
 	wl->tx_queue_stopped = false;
 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
 	wl->tx_blocks_available = 0;
 	wl->tx_results_count = 0;
 	wl->tx_packets_count = 0;
+	wl->tx_security_last_seq = 0;
+	wl->tx_security_seq_16 = 0;
+	wl->tx_security_seq_32 = 0;
 	wl->time_offset = 0;
 	wl->session_counter = 0;
+	wl->joined = false;
+
 	for (i = 0; i < NUM_TX_QUEUES; i++)
 		wl->tx_blocks_freed[i] = 0;
 
@@ -611,6 +1005,12 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
 		     conf->type, conf->mac_addr);
 
 	mutex_lock(&wl->mutex);
+	if (wl->vif) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	wl->vif = conf->vif;
 
 	switch (conf->type) {
 	case NL80211_IFTYPE_STATION:
@@ -634,7 +1034,12 @@ out:
 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
 					 struct ieee80211_if_init_conf *conf)
 {
+	struct wl1271 *wl = hw->priv;
+
+	mutex_lock(&wl->mutex);
 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
+	wl->vif = NULL;
+	mutex_unlock(&wl->mutex);
 }
 
 #if 0
@@ -657,23 +1062,24 @@ static int wl1271_op_config_interface(struct ieee80211_hw *hw,
 	if (ret < 0)
 		goto out;
 
-	memcpy(wl->bssid, conf->bssid, ETH_ALEN);
+	if (memcmp(wl->bssid, conf->bssid, ETH_ALEN)) {
+		wl1271_debug(DEBUG_MAC80211, "bssid changed");
 
-	ret = wl1271_cmd_build_null_data(wl);
-	if (ret < 0)
-		goto out_sleep;
+		memcpy(wl->bssid, conf->bssid, ETH_ALEN);
 
-	wl->ssid_len = conf->ssid_len;
-	if (wl->ssid_len)
-		memcpy(wl->ssid, conf->ssid, wl->ssid_len);
+		ret = wl1271_cmd_join(wl);
+		if (ret < 0)
+			goto out_sleep;
 
-	if (wl->bss_type != BSS_TYPE_IBSS) {
-		/* FIXME: replace the magic numbers with proper definitions */
-		ret = wl1271_cmd_join(wl, wl->bss_type, 5, 100, 1);
+		ret = wl1271_cmd_build_null_data(wl);
 		if (ret < 0)
 			goto out_sleep;
 	}
 
+	wl->ssid_len = conf->ssid_len;
+	if (wl->ssid_len)
+		memcpy(wl->ssid, conf->ssid, wl->ssid_len);
+
 	if (conf->changed & IEEE80211_IFCC_BEACON) {
 		beacon = ieee80211_beacon_get(hw, vif);
 		ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
@@ -691,12 +1097,6 @@ static int wl1271_op_config_interface(struct ieee80211_hw *hw,
 
 		if (ret < 0)
 			goto out_sleep;
-
-		/* FIXME: replace the magic numbers with proper definitions */
-		ret = wl1271_cmd_join(wl, wl->bss_type, 1, 100, 0);
-
-		if (ret < 0)
-			goto out_sleep;
 	}
 
 out_sleep:
@@ -724,26 +1124,22 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
 
 	mutex_lock(&wl->mutex);
 
+	wl->band = conf->channel->band;
+
 	ret = wl1271_ps_elp_wakeup(wl, false);
 	if (ret < 0)
 		goto out;
 
 	if (channel != wl->channel) {
-		u8 old_channel = wl->channel;
+		/*
+		 * We assume that the stack will configure the right channel
+		 * before associating, so we don't need to send a join
+		 * command here.  We will join the right channel when the
+		 * BSSID changes
+		 */
 		wl->channel = channel;
-
-		/* FIXME: use beacon interval provided by mac80211 */
-		ret = wl1271_cmd_join(wl, wl->bss_type, 1, 100, 0);
-		if (ret < 0) {
-			wl->channel = old_channel;
-			goto out_sleep;
-		}
 	}
 
-	ret = wl1271_cmd_build_null_data(wl);
-	if (ret < 0)
-		goto out_sleep;
-
 	if (conf->flags & IEEE80211_CONF_PS && !wl->psm_requested) {
 		wl1271_info("psm enabled");
 
@@ -768,7 +1164,7 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
 	if (conf->power_level != wl->power_level) {
 		ret = wl1271_acx_tx_power(wl, conf->power_level);
 		if (ret < 0)
-			goto out;
+			goto out_sleep;
 
 		wl->power_level = conf->power_level;
 	}
@@ -782,6 +1178,45 @@ out:
 	return ret;
 }
 
+struct wl1271_filter_params {
+	bool enabled;
+	int mc_list_length;
+	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
+};
+
+static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count,
+				       struct dev_addr_list *mc_list)
+{
+	struct wl1271_filter_params *fp;
+	int i;
+
+	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
+	if (!fp) {
+		wl1271_error("Out of memory setting filters.");
+		return 0;
+	}
+
+	/* update multicast filtering parameters */
+	fp->enabled = true;
+	if (mc_count > ACX_MC_ADDRESS_GROUP_MAX) {
+		mc_count = 0;
+		fp->enabled = false;
+	}
+
+	fp->mc_list_length = 0;
+	for (i = 0; i < mc_count; i++) {
+		if (mc_list->da_addrlen == ETH_ALEN) {
+			memcpy(fp->mc_list[fp->mc_list_length],
+			       mc_list->da_addr, ETH_ALEN);
+			fp->mc_list_length++;
+		} else
+			wl1271_warning("Unknown mc address length.");
+		mc_list = mc_list->next;
+	}
+
+	return (u64)(unsigned long)fp;
+}
+
 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
 				  FIF_ALLMULTI | \
 				  FIF_FCSFAIL | \
@@ -791,28 +1226,53 @@ out:
 
 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
 				       unsigned int changed,
-				       unsigned int *total,u64 multicast)
+				       unsigned int *total, u64 multicast)
 {
+	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
 	struct wl1271 *wl = hw->priv;
+	int ret;
 
 	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter");
 
+	mutex_lock(&wl->mutex);
+
+	if (wl->state == WL1271_STATE_OFF)
+		goto out;
+
+	ret = wl1271_ps_elp_wakeup(wl, false);
+	if (ret < 0)
+		goto out;
+
 	*total &= WL1271_SUPPORTED_FILTERS;
 	changed &= WL1271_SUPPORTED_FILTERS;
 
+	if (*total & FIF_ALLMULTI)
+		ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
+	else if (fp)
+		ret = wl1271_acx_group_address_tbl(wl, fp->enabled,
+						   fp->mc_list,
+						   fp->mc_list_length);
+	if (ret < 0)
+		goto out_sleep;
+
+	kfree(fp);
+
+	/* FIXME: We still need to set our filters properly */
+
+	/* determine, whether supported filter values have changed */
 	if (changed == 0)
-		return;
+		goto out_sleep;
 
-	/* FIXME: wl->rx_config and wl->rx_filter are not protected */
-	wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
-	wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
+	/* apply configured filters */
+	ret = wl1271_acx_rx_config(wl, wl->rx_config, wl->rx_filter);
+	if (ret < 0)
+		goto out_sleep;
 
-	/*
-	 * FIXME: workqueues need to be properly cancelled on stop(), for
-	 * now let's just disable changing the filter settings. They will
-	 * be updated any on config().
-	 */
-	/* schedule_work(&wl->filter_work); */
+out_sleep:
+	wl1271_ps_elp_sleep(wl);
+
+out:
+	mutex_unlock(&wl->mutex);
 }
 
 static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -823,6 +1283,8 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 	struct wl1271 *wl = hw->priv;
 	const u8 *addr;
 	int ret;
+	u32 tx_seq_32 = 0;
+	u16 tx_seq_16 = 0;
 	u8 key_type;
 
 	static const u8 bcast_addr[ETH_ALEN] =
@@ -861,11 +1323,15 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 		key_type = KEY_TKIP;
 
 		key_conf->hw_key_idx = key_conf->keyidx;
+		tx_seq_32 = wl->tx_security_seq_32;
+		tx_seq_16 = wl->tx_security_seq_16;
 		break;
 	case ALG_CCMP:
 		key_type = KEY_AES;
 
 		key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+		tx_seq_32 = wl->tx_security_seq_32;
+		tx_seq_16 = wl->tx_security_seq_16;
 		break;
 	default:
 		wl1271_error("Unknown key algo 0x%x", key_conf->alg);
@@ -879,7 +1345,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 		ret = wl1271_cmd_set_key(wl, KEY_ADD_OR_REPLACE,
 					 key_conf->keyidx, key_type,
 					 key_conf->keylen, key_conf->key,
-					 addr);
+					 addr, tx_seq_32, tx_seq_16);
 		if (ret < 0) {
 			wl1271_error("Could not add or replace key");
 			goto out_sleep;
@@ -890,7 +1356,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 		ret = wl1271_cmd_set_key(wl, KEY_REMOVE,
 					 key_conf->keyidx, key_type,
 					 key_conf->keylen, key_conf->key,
-					 addr);
+					 addr, 0, 0);
 		if (ret < 0) {
 			wl1271_error("Could not remove key");
 			goto out_sleep;
@@ -921,13 +1387,13 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
 	struct wl1271 *wl = hw->priv;
 	int ret;
 	u8 *ssid = NULL;
-	size_t ssid_len = 0;
+	size_t len = 0;
 
 	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
 
 	if (req->n_ssids) {
 		ssid = req->ssids[0].ssid;
-		ssid_len = req->ssids[0].ssid_len;
+		len = req->ssids[0].ssid_len;
 	}
 
 	mutex_lock(&wl->mutex);
@@ -936,7 +1402,12 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
 	if (ret < 0)
 		goto out;
 
-	ret = wl1271_cmd_scan(hw->priv, ssid, ssid_len, 1, 0, 13, 3);
+	if (wl1271_11a_enabled())
+		ret = wl1271_cmd_scan(hw->priv, ssid, len, 1, 0,
+				      WL1271_SCAN_BAND_DUAL, 3);
+	else
+		ret = wl1271_cmd_scan(hw->priv, ssid, len, 1, 0,
+				      WL1271_SCAN_BAND_2_4_GHZ, 3);
 
 	wl1271_ps_elp_sleep(wl);
 
@@ -969,6 +1440,22 @@ out:
 	return ret;
 }
 
+static u32 wl1271_enabled_rates_get(struct wl1271 *wl, u64 basic_rate_set)
+{
+	struct ieee80211_supported_band *band;
+	u32 enabled_rates = 0;
+	int bit;
+
+	band = wl->hw->wiphy->bands[wl->band];
+	for (bit = 0; bit < band->n_bitrates; bit++) {
+		if (basic_rate_set & 0x1)
+			enabled_rates |= band->bitrates[bit].hw_value;
+		basic_rate_set >>= 1;
+	}
+
+	return enabled_rates;
+}
+
 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
 				       struct ieee80211_vif *vif,
 				       struct ieee80211_bss_conf *bss_conf,
@@ -990,6 +1477,12 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
 		if (bss_conf->assoc) {
 			wl->aid = bss_conf->aid;
 
+			/*
+			 * with wl1271, we don't need to update the
+			 * beacon_int and dtim_period, because the firmware
+			 * updates it by itself when the first beacon is
+			 * received after a join.
+			 */
 			ret = wl1271_cmd_build_ps_poll(wl, wl->aid);
 			if (ret < 0)
 				goto out_sleep;
@@ -1005,8 +1498,14 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
 				if (ret < 0)
 					goto out_sleep;
 			}
+		} else {
+			/* use defaults when not associated */
+			wl->basic_rate_set = WL1271_DEFAULT_BASIC_RATE_SET;
+			wl->aid = 0;
 		}
+
 	}
+
 	if (changed & BSS_CHANGED_ERP_SLOT) {
 		if (bss_conf->use_short_slot)
 			ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT);
@@ -1036,6 +1535,17 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
 		}
 	}
 
+	if (changed & BSS_CHANGED_BASIC_RATES) {
+		wl->basic_rate_set = wl1271_enabled_rates_get(
+			wl, bss_conf->basic_rates);
+
+		ret = wl1271_acx_rate_policies(wl, wl->basic_rate_set);
+		if (ret < 0) {
+			wl1271_warning("Set rate policies failed %d", ret);
+			goto out_sleep;
+		}
+	}
+
 out_sleep:
 	wl1271_ps_elp_sleep(wl);
 
@@ -1047,44 +1557,44 @@ out:
 /* can't be const, mac80211 writes to this */
 static struct ieee80211_rate wl1271_rates[] = {
 	{ .bitrate = 10,
-	  .hw_value = 0x1,
-	  .hw_value_short = 0x1, },
+	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
 	{ .bitrate = 20,
-	  .hw_value = 0x2,
-	  .hw_value_short = 0x2,
+	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
 	{ .bitrate = 55,
-	  .hw_value = 0x4,
-	  .hw_value_short = 0x4,
+	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
 	{ .bitrate = 110,
-	  .hw_value = 0x20,
-	  .hw_value_short = 0x20,
+	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
 	{ .bitrate = 60,
-	  .hw_value = 0x8,
-	  .hw_value_short = 0x8, },
+	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
 	{ .bitrate = 90,
-	  .hw_value = 0x10,
-	  .hw_value_short = 0x10, },
+	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
 	{ .bitrate = 120,
-	  .hw_value = 0x40,
-	  .hw_value_short = 0x40, },
+	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
 	{ .bitrate = 180,
-	  .hw_value = 0x80,
-	  .hw_value_short = 0x80, },
+	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
 	{ .bitrate = 240,
-	  .hw_value = 0x200,
-	  .hw_value_short = 0x200, },
+	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
 	{ .bitrate = 360,
-	 .hw_value = 0x400,
-	 .hw_value_short = 0x400, },
+	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
+	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
 	{ .bitrate = 480,
-	  .hw_value = 0x800,
-	  .hw_value_short = 0x800, },
+	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
 	{ .bitrate = 540,
-	  .hw_value = 0x1000,
-	  .hw_value_short = 0x1000, },
+	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
 };
 
 /* can't be const, mac80211 writes to this */
@@ -1112,6 +1622,88 @@ static struct ieee80211_supported_band wl1271_band_2ghz = {
 	.n_bitrates = ARRAY_SIZE(wl1271_rates),
 };
 
+/* 5 GHz data rates for WL1273 */
+static struct ieee80211_rate wl1271_rates_5ghz[] = {
+	{ .bitrate = 60,
+	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
+	{ .bitrate = 90,
+	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
+	{ .bitrate = 120,
+	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
+	{ .bitrate = 180,
+	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
+	{ .bitrate = 240,
+	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
+	{ .bitrate = 360,
+	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
+	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
+	{ .bitrate = 480,
+	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
+	{ .bitrate = 540,
+	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
+};
+
+/* 5 GHz band channels for WL1273 */
+static struct ieee80211_channel wl1271_channels_5ghz[] = {
+	{ .hw_value = 183, .center_freq = 4915},
+	{ .hw_value = 184, .center_freq = 4920},
+	{ .hw_value = 185, .center_freq = 4925},
+	{ .hw_value = 187, .center_freq = 4935},
+	{ .hw_value = 188, .center_freq = 4940},
+	{ .hw_value = 189, .center_freq = 4945},
+	{ .hw_value = 192, .center_freq = 4960},
+	{ .hw_value = 196, .center_freq = 4980},
+	{ .hw_value = 7, .center_freq = 5035},
+	{ .hw_value = 8, .center_freq = 5040},
+	{ .hw_value = 9, .center_freq = 5045},
+	{ .hw_value = 11, .center_freq = 5055},
+	{ .hw_value = 12, .center_freq = 5060},
+	{ .hw_value = 16, .center_freq = 5080},
+	{ .hw_value = 34, .center_freq = 5170},
+	{ .hw_value = 36, .center_freq = 5180},
+	{ .hw_value = 38, .center_freq = 5190},
+	{ .hw_value = 40, .center_freq = 5200},
+	{ .hw_value = 42, .center_freq = 5210},
+	{ .hw_value = 44, .center_freq = 5220},
+	{ .hw_value = 46, .center_freq = 5230},
+	{ .hw_value = 48, .center_freq = 5240},
+	{ .hw_value = 52, .center_freq = 5260},
+	{ .hw_value = 56, .center_freq = 5280},
+	{ .hw_value = 60, .center_freq = 5300},
+	{ .hw_value = 64, .center_freq = 5320},
+	{ .hw_value = 100, .center_freq = 5500},
+	{ .hw_value = 104, .center_freq = 5520},
+	{ .hw_value = 108, .center_freq = 5540},
+	{ .hw_value = 112, .center_freq = 5560},
+	{ .hw_value = 116, .center_freq = 5580},
+	{ .hw_value = 120, .center_freq = 5600},
+	{ .hw_value = 124, .center_freq = 5620},
+	{ .hw_value = 128, .center_freq = 5640},
+	{ .hw_value = 132, .center_freq = 5660},
+	{ .hw_value = 136, .center_freq = 5680},
+	{ .hw_value = 140, .center_freq = 5700},
+	{ .hw_value = 149, .center_freq = 5745},
+	{ .hw_value = 153, .center_freq = 5765},
+	{ .hw_value = 157, .center_freq = 5785},
+	{ .hw_value = 161, .center_freq = 5805},
+	{ .hw_value = 165, .center_freq = 5825},
+};
+
+
+static struct ieee80211_supported_band wl1271_band_5ghz = {
+	.channels = wl1271_channels_5ghz,
+	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
+	.bitrates = wl1271_rates_5ghz,
+	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
+};
+
 static const struct ieee80211_ops wl1271_ops = {
 	.start = wl1271_op_start,
 	.stop = wl1271_op_stop,
@@ -1119,6 +1711,7 @@ static const struct ieee80211_ops wl1271_ops = {
 	.remove_interface = wl1271_op_remove_interface,
 	.config = wl1271_op_config,
 /* 	.config_interface = wl1271_op_config_interface, */
+	.prepare_multicast = wl1271_op_prepare_multicast,
 	.configure_filter = wl1271_op_configure_filter,
 	.tx = wl1271_op_tx,
 	.set_key = wl1271_op_set_key,
@@ -1151,24 +1744,26 @@ static int wl1271_register_hw(struct wl1271 *wl)
 
 static int wl1271_init_ieee80211(struct wl1271 *wl)
 {
-	/*
-	 * The tx descriptor buffer and the TKIP space.
-	 *
-	 * FIXME: add correct 1271 descriptor size
-	 */
-	wl->hw->extra_tx_headroom = WL1271_TKIP_IV_SPACE;
+	/* The tx descriptor buffer and the TKIP space. */
+	wl->hw->extra_tx_headroom = WL1271_TKIP_IV_SPACE +
+		sizeof(struct wl1271_tx_hw_descr);
 
 	/* unit us */
 	/* FIXME: find a proper value */
 	wl->hw->channel_change_time = 10000;
 
 	wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
-		IEEE80211_HW_NOISE_DBM;
+		IEEE80211_HW_NOISE_DBM |
+		IEEE80211_HW_BEACON_FILTER |
+		IEEE80211_HW_SUPPORTS_PS;
 
 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
 	wl->hw->wiphy->max_scan_ssids = 1;
 	wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz;
 
+	if (wl1271_11a_enabled())
+		wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl1271_band_5ghz;
+
 	SET_IEEE80211_DEV(wl->hw, &wl->spi->dev);
 
 	return 0;
@@ -1213,29 +1808,33 @@ static int __devinit wl1271_probe(struct spi_device *spi)
 	wl = hw->priv;
 	memset(wl, 0, sizeof(*wl));
 
+	INIT_LIST_HEAD(&wl->list);
+
 	wl->hw = hw;
 	dev_set_drvdata(&spi->dev, wl);
 	wl->spi = spi;
 
 	skb_queue_head_init(&wl->tx_queue);
 
-	INIT_WORK(&wl->filter_work, wl1271_filter_work);
+	INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
 	wl->channel = WL1271_DEFAULT_CHANNEL;
 	wl->scanning = false;
 	wl->default_key = 0;
-	wl->listen_int = 1;
 	wl->rx_counter = 0;
 	wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
 	wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
 	wl->elp = false;
 	wl->psm = 0;
 	wl->psm_requested = false;
+	wl->psm_entry_retry = 0;
 	wl->tx_queue_stopped = false;
 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
+	wl->basic_rate_set = WL1271_DEFAULT_BASIC_RATE_SET;
+	wl->band = IEEE80211_BAND_2GHZ;
+	wl->vif = NULL;
+	wl->joined = false;
 
-	/* We use the default power on sleep time until we know which chip
-	 * we're using */
-	for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
+	for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
 		wl->tx_frames[i] = NULL;
 
 	spin_lock_init(&wl->wl_lock);
@@ -1250,13 +1849,6 @@ static int __devinit wl1271_probe(struct spi_device *spi)
 	wl->state = WL1271_STATE_OFF;
 	mutex_init(&wl->mutex);
 
-	wl->rx_descriptor = kmalloc(sizeof(*wl->rx_descriptor), GFP_KERNEL);
-	if (!wl->rx_descriptor) {
-		wl1271_error("could not allocate memory for rx descriptor");
-		ret = -ENOMEM;
-		goto out_free;
-	}
-
 	/* This is the only SPI value that we need to set here, the rest
 	 * comes from the board-peripherals file */
 	spi->bits_per_word = 32;
@@ -1298,6 +1890,9 @@ static int __devinit wl1271_probe(struct spi_device *spi)
 	}
 	dev_set_drvdata(&wl1271_device.dev, wl);
 
+	/* Apply default driver configuration. */
+	wl1271_conf_init(wl);
+
 	ret = wl1271_init_ieee80211(wl);
 	if (ret)
 		goto out_platform;
@@ -1319,9 +1914,6 @@ static int __devinit wl1271_probe(struct spi_device *spi)
 	free_irq(wl->irq, wl);
 
  out_free:
-	kfree(wl->rx_descriptor);
-	wl->rx_descriptor = NULL;
-
 	ieee80211_free_hw(hw);
 
 	return ret;
@@ -1337,14 +1929,11 @@ static int __devexit wl1271_remove(struct spi_device *spi)
 	platform_device_unregister(&wl1271_device);
 	free_irq(wl->irq, wl);
 	kfree(wl->target_mem_map);
-	kfree(wl->fw);
+	vfree(wl->fw);
 	wl->fw = NULL;
 	kfree(wl->nvs);
 	wl->nvs = NULL;
 
-	kfree(wl->rx_descriptor);
-	wl->rx_descriptor = NULL;
-
 	kfree(wl->fw_status);
 	kfree(wl->tx_res_if);
 
@@ -1391,3 +1980,5 @@ module_exit(wl1271_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
+MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
+MODULE_FIRMWARE(WL1271_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.c b/drivers/net/wireless/wl12xx/wl1271_ps.c
index 1dc74b0c7736..507cd91d7eed 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1271_ps.c
@@ -27,25 +27,38 @@
 
 #define WL1271_WAKEUP_TIMEOUT 500
 
+void wl1271_elp_work(struct work_struct *work)
+{
+	struct delayed_work *dwork;
+	struct wl1271 *wl;
+
+	dwork = container_of(work, struct delayed_work, work);
+	wl = container_of(dwork, struct wl1271, elp_work);
+
+	wl1271_debug(DEBUG_PSM, "elp work");
+
+	mutex_lock(&wl->mutex);
+
+	if (wl->elp || !wl->psm)
+		goto out;
+
+	wl1271_debug(DEBUG_PSM, "chip to elp");
+	wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
+	wl->elp = true;
+
+out:
+	mutex_unlock(&wl->mutex);
+}
+
+#define ELP_ENTRY_DELAY  5
+
 /* Routines to toggle sleep mode while in ELP */
 void wl1271_ps_elp_sleep(struct wl1271 *wl)
 {
-	/*
-	 * FIXME: due to a problem in the firmware (causing a firmware
-	 * crash), ELP entry is prevented below. Remove the "true" to
-	 * re-enable ELP entry.
-	 */
-	if (true || wl->elp || !wl->psm)
-		return;
-
-	/*
-	 * Go to ELP unless there is work already pending - pending work
-	 * will immediately wakeup the chipset anyway.
-	 */
-	if (!work_pending(&wl->irq_work) && !work_pending(&wl->tx_work)) {
-		wl1271_debug(DEBUG_PSM, "chip to elp");
-		wl1271_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
-		wl->elp = true;
+	if (wl->psm) {
+		cancel_delayed_work(&wl->elp_work);
+		ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
+					msecs_to_jiffies(ELP_ENTRY_DELAY));
 	}
 }
 
@@ -73,7 +86,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
 		wl->elp_compl = &compl;
 	spin_unlock_irqrestore(&wl->wl_lock, flags);
 
-	wl1271_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
+	wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
 
 	if (!pending) {
 		ret = wait_for_completion_timeout(
@@ -111,6 +124,17 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode)
 	switch (mode) {
 	case STATION_POWER_SAVE_MODE:
 		wl1271_debug(DEBUG_PSM, "entering psm");
+
+		/* enable beacon filtering */
+		ret = wl1271_acx_beacon_filter_opt(wl, true);
+		if (ret < 0)
+			return ret;
+
+		/* enable beacon early termination */
+		ret = wl1271_acx_bet_enable(wl, true);
+		if (ret < 0)
+			return ret;
+
 		ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
 		if (ret < 0)
 			return ret;
@@ -128,6 +152,16 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode)
 		if (ret < 0)
 			return ret;
 
+		/* disable beacon early termination */
+		ret = wl1271_acx_bet_enable(wl, false);
+		if (ret < 0)
+			return ret;
+
+		/* disable beacon filtering */
+		ret = wl1271_acx_beacon_filter_opt(wl, false);
+		if (ret < 0)
+			return ret;
+
 		ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE);
 		if (ret < 0)
 			return ret;
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.h b/drivers/net/wireless/wl12xx/wl1271_ps.h
index de2bd3c7dc9c..779653d0ae85 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.h
+++ b/drivers/net/wireless/wl12xx/wl1271_ps.h
@@ -30,6 +30,6 @@
 int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode);
 void wl1271_ps_elp_sleep(struct wl1271 *wl);
 int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake);
-
+void wl1271_elp_work(struct work_struct *work);
 
 #endif /* __WL1271_PS_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_reg.h b/drivers/net/wireless/wl12xx/wl1271_reg.h
index f8ed4a4fc691..1f237389d1c7 100644
--- a/drivers/net/wireless/wl12xx/wl1271_reg.h
+++ b/drivers/net/wireless/wl12xx/wl1271_reg.h
@@ -34,7 +34,7 @@
 #define REGISTERS_WORK_SIZE 0x0000b000
 
 #define HW_ACCESS_ELP_CTRL_REG_ADDR         0x1FFFC
-#define STATUS_MEM_ADDRESS                  0x40400
+#define FW_STATUS_ADDR                      (0x14FC0 + 0xA000)
 
 /* ELP register commands */
 #define ELPCTRL_WAKE_UP             0x1
@@ -213,7 +213,6 @@
 ==============================================*/
 #define ACX_REG_INTERRUPT_ACK          (REGISTERS_BASE + 0x04F0)
 
-#define RX_DRIVER_DUMMY_WRITE_ADDRESS  (REGISTERS_BASE + 0x0534)
 #define RX_DRIVER_COUNTER_ADDRESS      (REGISTERS_BASE + 0x0538)
 
 /* Device Configuration registers*/
@@ -614,50 +613,6 @@ enum {
 	MAX_RADIO_BANDS = 0xFF
 };
 
-enum {
-	NO_RATE      = 0,
-	RATE_1MBPS   = 0x0A,
-	RATE_2MBPS   = 0x14,
-	RATE_5_5MBPS = 0x37,
-	RATE_6MBPS   = 0x0B,
-	RATE_9MBPS   = 0x0F,
-	RATE_11MBPS  = 0x6E,
-	RATE_12MBPS  = 0x0A,
-	RATE_18MBPS  = 0x0E,
-	RATE_22MBPS  = 0xDC,
-	RATE_24MBPS  = 0x09,
-	RATE_36MBPS  = 0x0D,
-	RATE_48MBPS  = 0x08,
-	RATE_54MBPS  = 0x0C
-};
-
-enum {
-	RATE_INDEX_1MBPS   =  0,
-	RATE_INDEX_2MBPS   =  1,
-	RATE_INDEX_5_5MBPS =  2,
-	RATE_INDEX_6MBPS   =  3,
-	RATE_INDEX_9MBPS   =  4,
-	RATE_INDEX_11MBPS  =  5,
-	RATE_INDEX_12MBPS  =  6,
-	RATE_INDEX_18MBPS  =  7,
-	RATE_INDEX_22MBPS  =  8,
-	RATE_INDEX_24MBPS  =  9,
-	RATE_INDEX_36MBPS  =  10,
-	RATE_INDEX_48MBPS  =  11,
-	RATE_INDEX_54MBPS  =  12,
-	RATE_INDEX_MAX     =  RATE_INDEX_54MBPS,
-	MAX_RATE_INDEX,
-	INVALID_RATE_INDEX = MAX_RATE_INDEX,
-	RATE_INDEX_ENUM_MAX_SIZE = 0x7FFFFFFF
-};
-
-enum {
-	RATE_MASK_1MBPS = 0x1,
-	RATE_MASK_2MBPS = 0x2,
-	RATE_MASK_5_5MBPS = 0x4,
-	RATE_MASK_11MBPS = 0x20,
-};
-
 #define SHORT_PREAMBLE_BIT   BIT(0) /* CCK or Barker depending on the rate */
 #define OFDM_RATE_BIT        BIT(6)
 #define PBCC_RATE_BIT        BIT(7)
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.c b/drivers/net/wireless/wl12xx/wl1271_rx.c
index ad8b6904c5eb..ca645f38109b 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.c
@@ -30,14 +30,15 @@
 static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status,
 				  u32 drv_rx_counter)
 {
-	return status->rx_pkt_descs[drv_rx_counter] & RX_MEM_BLOCK_MASK;
+	return le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
+		RX_MEM_BLOCK_MASK;
 }
 
 static u32 wl1271_rx_get_buf_size(struct wl1271_fw_status *status,
 				 u32 drv_rx_counter)
 {
-	return (status->rx_pkt_descs[drv_rx_counter] & RX_BUF_SIZE_MASK) >>
-		RX_BUF_SIZE_SHIFT_DIV;
+	return (le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
+		RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV;
 }
 
 /* The values of this table must match the wl1271_rates[] array */
@@ -70,6 +71,36 @@ static u8 wl1271_rx_rate_to_idx[] = {
 	0                           /* WL1271_RATE_1    */
 };
 
+/* The values of this table must match the wl1271_rates[] array */
+static u8 wl1271_5_ghz_rx_rate_to_idx[] = {
+	/* MCS rates are used only with 11n */
+	WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS7 */
+	WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS6 */
+	WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS5 */
+	WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS4 */
+	WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS3 */
+	WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS2 */
+	WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS1 */
+	WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS0 */
+
+	7,                          /* WL1271_RATE_54   */
+	6,                          /* WL1271_RATE_48   */
+	5,                          /* WL1271_RATE_36   */
+	4,                          /* WL1271_RATE_24   */
+
+	/* TI-specific rate */
+	WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_22   */
+
+	3,                          /* WL1271_RATE_18   */
+	2,                          /* WL1271_RATE_12   */
+	WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_11   */
+	1,                          /* WL1271_RATE_9    */
+	0,                          /* WL1271_RATE_6    */
+	WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_5_5  */
+	WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_2    */
+	WL1271_RX_RATE_UNSUPPORTED  /* WL1271_RATE_1    */
+};
+
 static void wl1271_rx_status(struct wl1271 *wl,
 			     struct wl1271_rx_descriptor *desc,
 			     struct ieee80211_rx_status *status,
@@ -77,12 +108,21 @@ static void wl1271_rx_status(struct wl1271 *wl,
 {
 	memset(status, 0, sizeof(struct ieee80211_rx_status));
 
-	if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == WL1271_RX_DESC_BAND_BG)
+	if ((desc->flags & WL1271_RX_DESC_BAND_MASK) ==
+	    WL1271_RX_DESC_BAND_BG) {
 		status->band = IEEE80211_BAND_2GHZ;
-	else
+		status->rate_idx = wl1271_rx_rate_to_idx[desc->rate];
+	} else if ((desc->flags & WL1271_RX_DESC_BAND_MASK) ==
+		 WL1271_RX_DESC_BAND_A) {
+		status->band = IEEE80211_BAND_5GHZ;
+		status->rate_idx = wl1271_5_ghz_rx_rate_to_idx[desc->rate];
+	} else
 		wl1271_warning("unsupported band 0x%x",
 			       desc->flags & WL1271_RX_DESC_BAND_MASK);
 
+	if (unlikely(status->rate_idx == WL1271_RX_RATE_UNSUPPORTED))
+		wl1271_warning("unsupported rate");
+
 	/*
 	 * FIXME: Add mactime handling.  For IBSS (ad-hoc) we need to get the
 	 * timestamp from the beacon (acx_tsf_info).  In BSS mode (infra) we
@@ -91,12 +131,6 @@ static void wl1271_rx_status(struct wl1271 *wl,
 	 */
 	status->signal = desc->rssi;
 
-	/* FIXME: Should this be optimized? */
-	status->qual = (desc->rssi - WL1271_RX_MIN_RSSI) * 100 /
-		(WL1271_RX_MAX_RSSI - WL1271_RX_MIN_RSSI);
-	status->qual = min(status->qual, 100);
-	status->qual = max(status->qual, 0);
-
 	/*
 	 * FIXME: In wl1251, the SNR should be divided by two.  In wl1271 we
 	 * need to divide by two for now, but TI has been discussing about
@@ -109,17 +143,11 @@ static void wl1271_rx_status(struct wl1271 *wl,
 	if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) {
 		status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
 
-		if (likely(!(desc->flags & WL1271_RX_DESC_DECRYPT_FAIL)))
+		if (likely(!(desc->status & WL1271_RX_DESC_DECRYPT_FAIL)))
 			status->flag |= RX_FLAG_DECRYPTED;
-
-		if (unlikely(desc->flags & WL1271_RX_DESC_MIC_FAIL))
+		if (unlikely(desc->status & WL1271_RX_DESC_MIC_FAIL))
 			status->flag |= RX_FLAG_MMIC_ERROR;
 	}
-
-	status->rate_idx = wl1271_rx_rate_to_idx[desc->rate];
-
-	if (status->rate_idx == WL1271_RX_RATE_UNSUPPORTED)
-		wl1271_warning("unsupported rate");
 }
 
 static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
@@ -131,14 +159,14 @@ static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
 	u8 *buf;
 	u8 beacon = 0;
 
-	skb = dev_alloc_skb(length);
+	skb = __dev_alloc_skb(length, GFP_KERNEL);
 	if (!skb) {
 		wl1271_error("Couldn't allocate RX frame");
 		return;
 	}
 
 	buf = skb_put(skb, length);
-	wl1271_spi_reg_read(wl, WL1271_SLV_MEM_DATA, buf, length, true);
+	wl1271_spi_read(wl, WL1271_SLV_MEM_DATA, buf, length, true);
 
 	/* the data read starts with the descriptor */
 	desc = (struct wl1271_rx_descriptor *) buf;
@@ -156,7 +184,7 @@ static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
 		     beacon ? "beacon" : "");
 
 	memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
-	ieee80211_rx(wl->hw, skb);
+	ieee80211_rx_ni(wl->hw, skb);
 }
 
 void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
@@ -176,15 +204,15 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
 			break;
 		}
 
-		wl->rx_mem_pool_addr.addr =
-			(mem_block << 8) + wl_mem_map->packet_memory_pool_start;
+		wl->rx_mem_pool_addr.addr = (mem_block << 8) +
+			le32_to_cpu(wl_mem_map->packet_memory_pool_start);
 		wl->rx_mem_pool_addr.addr_extra =
 			wl->rx_mem_pool_addr.addr + 4;
 
 		/* Choose the block we want to read */
-		wl1271_spi_reg_write(wl, WL1271_SLV_REG_DATA,
-				     &wl->rx_mem_pool_addr,
-				     sizeof(wl->rx_mem_pool_addr), false);
+		wl1271_spi_write(wl, WL1271_SLV_REG_DATA,
+				 &wl->rx_mem_pool_addr,
+				 sizeof(wl->rx_mem_pool_addr), false);
 
 		wl1271_rx_handle_data(wl, buf_size);
 
@@ -192,9 +220,5 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
 		drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
 	}
 
-	wl1271_reg_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
-
-	/* This is a workaround for some problems in the chip */
-	wl1271_reg_write32(wl, RX_DRIVER_DUMMY_WRITE_ADDRESS, 0x1);
-
+	wl1271_spi_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
 }
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.h b/drivers/net/wireless/wl12xx/wl1271_rx.h
index d1ca60e43a25..1ae6d1783ed4 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.h
@@ -102,14 +102,14 @@
 #define RX_BUF_SIZE_SHIFT_DIV 6
 
 struct wl1271_rx_descriptor {
-	u16 length;
+	__le16 length;
 	u8  status;
 	u8  flags;
 	u8  rate;
 	u8  channel;
 	s8  rssi;
 	u8  snr;
-	u32 timestamp;
+	__le32 timestamp;
 	u8  packet_class;
 	u8  process_id;
 	u8  pad_len;
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/wl1271_spi.c
index 4a12880c16a8..02978a16e732 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.c
@@ -30,17 +30,29 @@
 #include "wl12xx_80211.h"
 #include "wl1271_spi.h"
 
-static int wl1271_translate_reg_addr(struct wl1271 *wl, int addr)
+static int wl1271_translate_addr(struct wl1271 *wl, int addr)
 {
-	return addr - wl->physical_reg_addr + wl->virtual_reg_addr;
-}
-
-static int wl1271_translate_mem_addr(struct wl1271 *wl, int addr)
-{
-	return addr - wl->physical_mem_addr + wl->virtual_mem_addr;
+	/*
+	 * To translate, first check to which window of addresses the
+	 * particular address belongs. Then subtract the starting address
+	 * of that window from the address. Then, add offset of the
+	 * translated region.
+	 *
+	 * The translated regions occur next to each other in physical device
+	 * memory, so just add the sizes of the preceeding address regions to
+	 * get the offset to the new region.
+	 *
+	 * Currently, only the two first regions are addressed, and the
+	 * assumption is that all addresses will fall into either of those
+	 * two.
+	 */
+	if ((addr >= wl->part.reg.start) &&
+	    (addr < wl->part.reg.start + wl->part.reg.size))
+		return addr - wl->part.reg.start + wl->part.mem.size;
+	else
+		return addr - wl->part.mem.start;
 }
 
-
 void wl1271_spi_reset(struct wl1271 *wl)
 {
 	u8 *cmd;
@@ -123,133 +135,137 @@ void wl1271_spi_init(struct wl1271 *wl)
 
 /* Set the SPI partitions to access the chip addresses
  *
- * There are two VIRTUAL (SPI) partitions (the memory partition and the
- * registers partition), which are mapped to two different areas of the
- * PHYSICAL (hardware) memory.  This function also makes other checks to
- * ensure that the partitions are not overlapping.  In the diagram below, the
- * memory partition comes before the register partition, but the opposite is
- * also supported.
+ * To simplify driver code, a fixed (virtual) memory map is defined for
+ * register and memory addresses. Because in the chipset, in different stages
+ * of operation, those addresses will move around, an address translation
+ * mechanism is required.
  *
- *                               PHYSICAL address
+ * There are four partitions (three memory and one register partition),
+ * which are mapped to two different areas of the hardware memory.
+ *
+ *                                Virtual address
  *                                     space
  *
  *                                    |    |
- *                                 ...+----+--> mem_start
- *          VIRTUAL address     ...   |    |
+ *                                 ...+----+--> mem.start
+ *          Physical address    ...   |    |
  *               space       ...      |    | [PART_0]
  *                        ...         |    |
- * 0x00000000 <--+----+...         ...+----+--> mem_start + mem_size
+ *  00000000  <--+----+...         ...+----+--> mem.start + mem.size
  *               |    |         ...   |    |
  *               |MEM |      ...      |    |
  *               |    |   ...         |    |
- *  part_size <--+----+...            |    | {unused area)
+ *  mem.size  <--+----+...            |    | {unused area)
  *               |    |   ...         |    |
  *               |REG |      ...      |    |
- *  part_size    |    |         ...   |    |
- *      +     <--+----+...         ...+----+--> reg_start
- *  reg_size              ...         |    |
- *                           ...      |    | [PART_1]
- *                              ...   |    |
- *                                 ...+----+--> reg_start + reg_size
+ *  mem.size     |    |         ...   |    |
+ *      +     <--+----+...         ...+----+--> reg.start
+ *  reg.size     |    |   ...         |    |
+ *               |MEM2|      ...      |    | [PART_1]
+ *               |    |         ...   |    |
+ *                                 ...+----+--> reg.start + reg.size
  *                                    |    |
  *
  */
 int wl1271_set_partition(struct wl1271 *wl,
-			  u32 mem_start, u32 mem_size,
-			  u32 reg_start, u32 reg_size)
+			 struct wl1271_partition_set *p)
 {
-	struct wl1271_partition *partition;
-	struct spi_transfer t;
-	struct spi_message m;
-	size_t len, cmd_len;
-	u32 *cmd;
-	int addr;
-
-	cmd_len = sizeof(u32) + 2 * sizeof(struct wl1271_partition);
-	cmd = kzalloc(cmd_len, GFP_KERNEL);
-	if (!cmd)
-		return -ENOMEM;
-
-	spi_message_init(&m);
-	memset(&t, 0, sizeof(t));
-
-	partition = (struct wl1271_partition *) (cmd + 1);
-	addr = HW_ACCESS_PART0_SIZE_ADDR;
-	len = 2 * sizeof(struct wl1271_partition);
-
-	*cmd |= WSPI_CMD_WRITE;
-	*cmd |= (len << WSPI_CMD_BYTE_LENGTH_OFFSET) & WSPI_CMD_BYTE_LENGTH;
-	*cmd |= addr & WSPI_CMD_BYTE_ADDR;
+	/* copy partition info */
+	memcpy(&wl->part, p, sizeof(*p));
 
 	wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
-		     mem_start, mem_size);
+		     p->mem.start, p->mem.size);
 	wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
-		     reg_start, reg_size);
-
-	/* Make sure that the two partitions together don't exceed the
-	 * address range */
-	if ((mem_size + reg_size) > HW_ACCESS_MEMORY_MAX_RANGE) {
-		wl1271_debug(DEBUG_SPI, "Total size exceeds maximum virtual"
-			     " address range.  Truncating partition[0].");
-		mem_size = HW_ACCESS_MEMORY_MAX_RANGE - reg_size;
-		wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
-			     mem_start, mem_size);
-		wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
-			     reg_start, reg_size);
-	}
+		     p->reg.start, p->reg.size);
+	wl1271_debug(DEBUG_SPI, "mem2_start %08X mem2_size %08X",
+		     p->mem2.start, p->mem2.size);
+	wl1271_debug(DEBUG_SPI, "mem3_start %08X mem3_size %08X",
+		     p->mem3.start, p->mem3.size);
+
+	/* write partition info to the chipset */
+	wl1271_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start);
+	wl1271_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size);
+	wl1271_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start);
+	wl1271_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size);
+	wl1271_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start);
+	wl1271_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size);
+	wl1271_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
 
-	if ((mem_start < reg_start) &&
-	    ((mem_start + mem_size) > reg_start)) {
-		/* Guarantee that the memory partition doesn't overlap the
-		 * registers partition */
-		wl1271_debug(DEBUG_SPI, "End of partition[0] is "
-			     "overlapping partition[1].  Adjusted.");
-		mem_size = reg_start - mem_start;
-		wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
-			     mem_start, mem_size);
-		wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
-			     reg_start, reg_size);
-	} else if ((reg_start < mem_start) &&
-		   ((reg_start + reg_size) > mem_start)) {
-		/* Guarantee that the register partition doesn't overlap the
-		 * memory partition */
-		wl1271_debug(DEBUG_SPI, "End of partition[1] is"
-			     " overlapping partition[0].  Adjusted.");
-		reg_size = mem_start - reg_start;
-		wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
-			     mem_start, mem_size);
-		wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
-			     reg_start, reg_size);
-	}
+	return 0;
+}
 
-	partition[0].start = mem_start;
-	partition[0].size  = mem_size;
-	partition[1].start = reg_start;
-	partition[1].size  = reg_size;
+#define WL1271_BUSY_WORD_TIMEOUT 1000
 
-	wl->physical_mem_addr = mem_start;
-	wl->physical_reg_addr = reg_start;
+/* FIXME: Check busy words, removed due to SPI bug */
+#if 0
+static void wl1271_spi_read_busy(struct wl1271 *wl, void *buf, size_t len)
+{
+	struct spi_transfer t[1];
+	struct spi_message m;
+	u32 *busy_buf;
+	int num_busy_bytes = 0;
 
-	wl->virtual_mem_addr = 0;
-	wl->virtual_reg_addr = mem_size;
+	wl1271_info("spi read BUSY!");
 
-	t.tx_buf = cmd;
-	t.len = cmd_len;
-	spi_message_add_tail(&t, &m);
+	/*
+	 * Look for the non-busy word in the read buffer, and if found,
+	 * read in the remaining data into the buffer.
+	 */
+	busy_buf = (u32 *)buf;
+	for (; (u32)busy_buf < (u32)buf + len; busy_buf++) {
+		num_busy_bytes += sizeof(u32);
+		if (*busy_buf & 0x1) {
+			spi_message_init(&m);
+			memset(t, 0, sizeof(t));
+			memmove(buf, busy_buf, len - num_busy_bytes);
+			t[0].rx_buf = buf + (len - num_busy_bytes);
+			t[0].len = num_busy_bytes;
+			spi_message_add_tail(&t[0], &m);
+			spi_sync(wl->spi, &m);
+			return;
+		}
+	}
 
-	spi_sync(wl->spi, &m);
+	/*
+	 * Read further busy words from SPI until a non-busy word is
+	 * encountered, then read the data itself into the buffer.
+	 */
+	wl1271_info("spi read BUSY-polling needed!");
 
-	kfree(cmd);
+	num_busy_bytes = WL1271_BUSY_WORD_TIMEOUT;
+	busy_buf = wl->buffer_busyword;
+	while (num_busy_bytes) {
+		num_busy_bytes--;
+		spi_message_init(&m);
+		memset(t, 0, sizeof(t));
+		t[0].rx_buf = busy_buf;
+		t[0].len = sizeof(u32);
+		spi_message_add_tail(&t[0], &m);
+		spi_sync(wl->spi, &m);
+
+		if (*busy_buf & 0x1) {
+			spi_message_init(&m);
+			memset(t, 0, sizeof(t));
+			t[0].rx_buf = buf;
+			t[0].len = len;
+			spi_message_add_tail(&t[0], &m);
+			spi_sync(wl->spi, &m);
+			return;
+		}
+	}
 
-	return 0;
+	/* The SPI bus is unresponsive, the read failed. */
+	memset(buf, 0, len);
+	wl1271_error("SPI read busy-word timeout!\n");
 }
+#endif
 
-void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf,
-		     size_t len, bool fixed)
+void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
+			 size_t len, bool fixed)
 {
 	struct spi_transfer t[3];
 	struct spi_message m;
-	u8 *busy_buf;
+	u32 *busy_buf;
 	u32 *cmd;
 
 	cmd = &wl->buffer_cmd;
@@ -281,14 +297,16 @@ void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf,
 
 	spi_sync(wl->spi, &m);
 
-	/* FIXME: check busy words */
+	/* FIXME: Check busy words, removed due to SPI bug */
+	/* if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1))
+	   wl1271_spi_read_busy(wl, buf, len); */
 
 	wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd));
 	wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, len);
 }
 
-void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf,
-		      size_t len, bool fixed)
+void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
+			  size_t len, bool fixed)
 {
 	struct spi_transfer t[2];
 	struct spi_message m;
@@ -321,62 +339,77 @@ void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf,
 	wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, len);
 }
 
-void wl1271_spi_mem_read(struct wl1271 *wl, int addr, void *buf,
-			 size_t len)
+void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf, size_t len,
+		     bool fixed)
 {
 	int physical;
 
-	physical = wl1271_translate_mem_addr(wl, addr);
+	physical = wl1271_translate_addr(wl, addr);
 
-	wl1271_spi_read(wl, physical, buf, len, false);
+	wl1271_spi_raw_read(wl, physical, buf, len, fixed);
 }
 
-void wl1271_spi_mem_write(struct wl1271 *wl, int addr, void *buf,
-			  size_t len)
+void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf, size_t len,
+		      bool fixed)
 {
 	int physical;
 
-	physical = wl1271_translate_mem_addr(wl, addr);
+	physical = wl1271_translate_addr(wl, addr);
 
-	wl1271_spi_write(wl, physical, buf, len, false);
+	wl1271_spi_raw_write(wl, physical, buf, len, fixed);
 }
 
-void wl1271_spi_reg_read(struct wl1271 *wl, int addr, void *buf, size_t len,
-			 bool fixed)
+u32 wl1271_spi_read32(struct wl1271 *wl, int addr)
 {
-	int physical;
-
-	physical = wl1271_translate_reg_addr(wl, addr);
+	return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr));
+}
 
-	wl1271_spi_read(wl, physical, buf, len, fixed);
+void wl1271_spi_write32(struct wl1271 *wl, int addr, u32 val)
+{
+	wl1271_raw_write32(wl, wl1271_translate_addr(wl, addr), val);
 }
 
-void wl1271_spi_reg_write(struct wl1271 *wl, int addr, void *buf, size_t len,
-			  bool fixed)
+void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
 {
-	int physical;
+	/* write address >> 1 + 0x30000 to OCP_POR_CTR */
+	addr = (addr >> 1) + 0x30000;
+	wl1271_spi_write32(wl, OCP_POR_CTR, addr);
 
-	physical = wl1271_translate_reg_addr(wl, addr);
+	/* write value to OCP_POR_WDATA */
+	wl1271_spi_write32(wl, OCP_DATA_WRITE, val);
 
-	wl1271_spi_write(wl, physical, buf, len, fixed);
+	/* write 1 to OCP_CMD */
+	wl1271_spi_write32(wl, OCP_CMD, OCP_CMD_WRITE);
 }
 
-u32 wl1271_mem_read32(struct wl1271 *wl, int addr)
+u16 wl1271_top_reg_read(struct wl1271 *wl, int addr)
 {
-	return wl1271_read32(wl, wl1271_translate_mem_addr(wl, addr));
-}
+	u32 val;
+	int timeout = OCP_CMD_LOOP;
 
-void wl1271_mem_write32(struct wl1271 *wl, int addr, u32 val)
-{
-	wl1271_write32(wl, wl1271_translate_mem_addr(wl, addr), val);
-}
+	/* write address >> 1 + 0x30000 to OCP_POR_CTR */
+	addr = (addr >> 1) + 0x30000;
+	wl1271_spi_write32(wl, OCP_POR_CTR, addr);
 
-u32 wl1271_reg_read32(struct wl1271 *wl, int addr)
-{
-	return wl1271_read32(wl, wl1271_translate_reg_addr(wl, addr));
-}
+	/* write 2 to OCP_CMD */
+	wl1271_spi_write32(wl, OCP_CMD, OCP_CMD_READ);
 
-void wl1271_reg_write32(struct wl1271 *wl, int addr, u32 val)
-{
-	wl1271_write32(wl, wl1271_translate_reg_addr(wl, addr), val);
+	/* poll for data ready */
+	do {
+		val = wl1271_spi_read32(wl, OCP_DATA_READ);
+		timeout--;
+	} while (!(val & OCP_READY_MASK) && timeout);
+
+	if (!timeout) {
+		wl1271_warning("Top register access timed out.");
+		return 0xffff;
+	}
+
+	/* check data status and return if OK */
+	if ((val & OCP_STATUS_MASK) == OCP_STATUS_OK)
+		return val & 0xffff;
+	else {
+		wl1271_warning("Top register access returned error.");
+		return 0xffff;
+	}
 }
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.h b/drivers/net/wireless/wl12xx/wl1271_spi.h
index 2c9968458646..cb7df1c56314 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.h
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.h
@@ -29,10 +29,14 @@
 
 #define HW_ACCESS_MEMORY_MAX_RANGE		0x1FFC0
 
-#define HW_ACCESS_PART0_SIZE_ADDR           0x1FFC0
-#define HW_ACCESS_PART0_START_ADDR          0x1FFC4
-#define HW_ACCESS_PART1_SIZE_ADDR           0x1FFC8
-#define HW_ACCESS_PART1_START_ADDR          0x1FFCC
+#define HW_PARTITION_REGISTERS_ADDR         0x1ffc0
+#define HW_PART0_SIZE_ADDR                  (HW_PARTITION_REGISTERS_ADDR)
+#define HW_PART0_START_ADDR                 (HW_PARTITION_REGISTERS_ADDR + 4)
+#define HW_PART1_SIZE_ADDR                  (HW_PARTITION_REGISTERS_ADDR + 8)
+#define HW_PART1_START_ADDR                 (HW_PARTITION_REGISTERS_ADDR + 12)
+#define HW_PART2_SIZE_ADDR                  (HW_PARTITION_REGISTERS_ADDR + 16)
+#define HW_PART2_START_ADDR                 (HW_PARTITION_REGISTERS_ADDR + 20)
+#define HW_PART3_START_ADDR                 (HW_PARTITION_REGISTERS_ADDR + 24)
 
 #define HW_ACCESS_REGISTER_SIZE             4
 
@@ -67,47 +71,56 @@
 		((WL1271_BUSY_WORD_LEN - 4) / sizeof(u32))
 #define HW_ACCESS_WSPI_INIT_CMD_MASK  0
 
+#define OCP_CMD_LOOP  32
+
+#define OCP_CMD_WRITE 0x1
+#define OCP_CMD_READ  0x2
+
+#define OCP_READY_MASK  BIT(18)
+#define OCP_STATUS_MASK (BIT(16) | BIT(17))
+
+#define OCP_STATUS_NO_RESP    0x00000
+#define OCP_STATUS_OK         0x10000
+#define OCP_STATUS_REQ_FAILED 0x20000
+#define OCP_STATUS_RESP_ERROR 0x30000
 
 /* Raw target IO, address is not translated */
-void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf,
+void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
 		      size_t len, bool fixed);
-void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf,
+void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
 		     size_t len, bool fixed);
 
-/* Memory target IO, address is tranlated to partition 0 */
-void wl1271_spi_mem_read(struct wl1271 *wl, int addr, void *buf, size_t len);
-void wl1271_spi_mem_write(struct wl1271 *wl, int addr, void *buf, size_t len);
-u32 wl1271_mem_read32(struct wl1271 *wl, int addr);
-void wl1271_mem_write32(struct wl1271 *wl, int addr, u32 val);
+/* Translated target IO */
+void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf, size_t len,
+		     bool fixed);
+void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf, size_t len,
+		      bool fixed);
+u32 wl1271_spi_read32(struct wl1271 *wl, int addr);
+void wl1271_spi_write32(struct wl1271 *wl, int addr, u32 val);
 
-/* Registers IO */
-void wl1271_spi_reg_read(struct wl1271 *wl, int addr, void *buf, size_t len,
-			 bool fixed);
-void wl1271_spi_reg_write(struct wl1271 *wl, int addr, void *buf, size_t len,
-			  bool fixed);
-u32 wl1271_reg_read32(struct wl1271 *wl, int addr);
-void wl1271_reg_write32(struct wl1271 *wl, int addr, u32 val);
+/* Top Register IO */
+void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val);
+u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
 
 /* INIT and RESET words */
 void wl1271_spi_reset(struct wl1271 *wl);
 void wl1271_spi_init(struct wl1271 *wl);
 int wl1271_set_partition(struct wl1271 *wl,
-			 u32 part_start, u32 part_size,
-			 u32 reg_start,  u32 reg_size);
+			 struct wl1271_partition_set *p);
 
-static inline u32 wl1271_read32(struct wl1271 *wl, int addr)
+static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
 {
-	wl1271_spi_read(wl, addr, &wl->buffer_32,
-			sizeof(wl->buffer_32), false);
+	wl1271_spi_raw_read(wl, addr, &wl->buffer_32,
+			    sizeof(wl->buffer_32), false);
 
 	return wl->buffer_32;
 }
 
-static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val)
+static inline void wl1271_raw_write32(struct wl1271 *wl, int addr, u32 val)
 {
 	wl->buffer_32 = val;
-	wl1271_spi_write(wl, addr, &wl->buffer_32,
-			 sizeof(wl->buffer_32), false);
+	wl1271_spi_raw_write(wl, addr, &wl->buffer_32,
+			     sizeof(wl->buffer_32), false);
 }
 
 #endif /* __WL1271_SPI_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/wl1271_tx.c
index ff221258b941..00af065c77c2 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.c
@@ -33,8 +33,7 @@
 static int wl1271_tx_id(struct wl1271 *wl, struct sk_buff *skb)
 {
 	int i;
-
-	for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
+	for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
 		if (wl->tx_frames[i] == NULL) {
 			wl->tx_frames[i] = skb;
 			return i;
@@ -58,8 +57,8 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra)
 	/* approximate the number of blocks required for this packet
 	   in the firmware */
 	/* FIXME: try to figure out what is done here and make it cleaner */
-	total_blocks = (skb->len) >> TX_HW_BLOCK_SHIFT_DIV;
-	excluded = (total_blocks << 2) + (skb->len & 0xff) + 34;
+	total_blocks = (total_len + 20) >> TX_HW_BLOCK_SHIFT_DIV;
+	excluded = (total_blocks << 2) + ((total_len + 20) & 0xff) + 34;
 	total_blocks += (excluded > 252) ? 2 : 1;
 	total_blocks += TX_HW_BLOCK_SPARE;
 
@@ -89,15 +88,25 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
 {
 	struct wl1271_tx_hw_descr *desc;
 	int pad;
+	u16 tx_attr;
 
 	desc = (struct wl1271_tx_hw_descr *) skb->data;
 
+	/* relocate space for security header */
+	if (extra) {
+		void *framestart = skb->data + sizeof(*desc);
+		u16 fc = *(u16 *)(framestart + extra);
+		int hdrlen = ieee80211_hdrlen(cpu_to_le16(fc));
+		memmove(framestart, framestart + extra, hdrlen);
+	}
+
 	/* configure packet life time */
-	desc->start_time = jiffies_to_usecs(jiffies) - wl->time_offset;
-	desc->life_time = TX_HW_MGMT_PKT_LIFETIME_TU;
+	desc->start_time = cpu_to_le32(jiffies_to_usecs(jiffies) -
+				       wl->time_offset);
+	desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
 
 	/* configure the tx attributes */
-	desc->tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
+	tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
 	/* FIXME: do we know the packet priority? can we identify mgmt
 	   packets, and use max prio for them at least? */
 	desc->tid = 0;
@@ -106,11 +115,13 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
 
 	/* align the length (and store in terms of words) */
 	pad = WL1271_TX_ALIGN(skb->len);
-	desc->length = pad >> 2;
+	desc->length = cpu_to_le16(pad >> 2);
 
 	/* calculate number of padding bytes */
 	pad = pad - skb->len;
-	desc->tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
+	tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
+
+	desc->tx_attr = cpu_to_le16(tx_attr);
 
 	wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad);
 	return 0;
@@ -147,11 +158,11 @@ static int wl1271_tx_send_packet(struct wl1271 *wl, struct sk_buff *skb,
 	len = WL1271_TX_ALIGN(skb->len);
 
 	/* perform a fixed address block write with the packet */
-	wl1271_spi_reg_write(wl, WL1271_SLV_MEM_DATA, skb->data, len, true);
+	wl1271_spi_write(wl, WL1271_SLV_MEM_DATA, skb->data, len, true);
 
 	/* write packet new counter into the write access register */
 	wl->tx_packets_count++;
-	wl1271_reg_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
+	wl1271_spi_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
 
 	desc = (struct wl1271_tx_hw_descr *) skb->data;
 	wl1271_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u (%u words)",
@@ -254,14 +265,13 @@ out:
 static void wl1271_tx_complete_packet(struct wl1271 *wl,
 				      struct wl1271_tx_hw_res_descr *result)
 {
-
 	struct ieee80211_tx_info *info;
 	struct sk_buff *skb;
-	u32 header_len;
+	u16 seq;
 	int id = result->id;
 
 	/* check for id legality */
-	if (id >= TX_HW_RESULT_QUEUE_LEN || wl->tx_frames[id] == NULL) {
+	if (id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL) {
 		wl1271_warning("TX result illegal id: %d", id);
 		return;
 	}
@@ -284,22 +294,32 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
 	/* info->status.retry_count = result->ack_failures; */
 	wl->stats.retry_count += result->ack_failures;
 
-	/* get header len */
+	/* update security sequence number */
+	seq = wl->tx_security_seq_16 +
+		(result->lsb_security_sequence_number -
+		 wl->tx_security_last_seq);
+	wl->tx_security_last_seq = result->lsb_security_sequence_number;
+
+	if (seq < wl->tx_security_seq_16)
+		wl->tx_security_seq_32++;
+	wl->tx_security_seq_16 = seq;
+
+	/* remove private header from packet */
+	skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
+
+	/* remove TKIP header space if present */
 	if (info->control.hw_key &&
-	    info->control.hw_key->alg == ALG_TKIP)
-		header_len = WL1271_TKIP_IV_SPACE +
-			sizeof(struct wl1271_tx_hw_descr);
-	else
-		header_len = sizeof(struct wl1271_tx_hw_descr);
+	    info->control.hw_key->alg == ALG_TKIP) {
+		int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+		memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data, hdrlen);
+		skb_pull(skb, WL1271_TKIP_IV_SPACE);
+	}
 
 	wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
 		     " status 0x%x",
 		     result->id, skb, result->ack_failures,
 		     result->rate_class_index, result->status);
 
-	/* remove private header from packet */
-	skb_pull(skb, header_len);
-
 	/* return the packet to the stack */
 	ieee80211_tx_status(wl->hw, skb);
 	wl->tx_frames[result->id] = NULL;
@@ -315,8 +335,8 @@ void wl1271_tx_complete(struct wl1271 *wl, u32 count)
 	wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
 
 	/* read the tx results from the chipset */
-	wl1271_spi_mem_read(wl, memmap->tx_result,
-			    wl->tx_res_if, sizeof(*wl->tx_res_if));
+	wl1271_spi_read(wl, le32_to_cpu(memmap->tx_result),
+			wl->tx_res_if, sizeof(*wl->tx_res_if), false);
 
 	/* verify that the result buffer is not getting overrun */
 	if (count > TX_HW_RESULT_QUEUE_LEN) {
@@ -337,10 +357,10 @@ void wl1271_tx_complete(struct wl1271 *wl, u32 count)
 	}
 
 	/* write host counter to chipset (to ack) */
-	wl1271_mem_write32(wl, memmap->tx_result +
+	wl1271_spi_write32(wl, le32_to_cpu(memmap->tx_result) +
 			   offsetof(struct wl1271_tx_hw_res_if,
 				    tx_result_host_counter),
-			   wl->tx_res_if->tx_result_fw_counter);
+			   le32_to_cpu(wl->tx_res_if->tx_result_fw_counter));
 }
 
 /* caller must hold wl->mutex */
@@ -364,7 +384,7 @@ void wl1271_tx_flush(struct wl1271 *wl)
 		ieee80211_tx_status(wl->hw, skb);
 	}
 
-	for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
+	for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
 		if (wl->tx_frames[i] != NULL) {
 			skb = wl->tx_frames[i];
 			info = IEEE80211_SKB_CB(skb);
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.h b/drivers/net/wireless/wl12xx/wl1271_tx.h
index 4a614067ddba..416396caf0a0 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.h
@@ -58,7 +58,7 @@
 
 struct wl1271_tx_hw_descr {
 	/* Length of packet in words, including descriptor+header+data */
-	u16 length;
+	__le16 length;
 	/* Number of extra memory blocks to allocate for this packet in
 	   addition to the number of blocks derived from the packet length */
 	u8 extra_mem_blocks;
@@ -67,12 +67,12 @@ struct wl1271_tx_hw_descr {
 	   HW!! */
 	u8 total_mem_blocks;
 	/* Device time (in us) when the packet arrived to the driver */
-	u32 start_time;
+	__le32 start_time;
 	/* Max delay in TUs until transmission. The last device time the
 	   packet can be transmitted is: startTime+(1024*LifeTime) */
-	u16 life_time;
+	__le16 life_time;
 	/* Bitwise fields - see TX_ATTR... definitions above. */
-	u16 tx_attr;
+	__le16 tx_attr;
 	/* Packet identifier used also in the Tx-Result. */
 	u8 id;
 	/* The packet TID value (as User-Priority) */
@@ -100,12 +100,12 @@ struct wl1271_tx_hw_res_descr {
 	   several possible reasons for failure. */
 	u8 status;
 	/* Total air access duration including all retrys and overheads.*/
-	u16 medium_usage;
+	__le16 medium_usage;
 	/* The time passed from host xfer to Tx-complete.*/
-	u32 fw_handling_time;
+	__le32 fw_handling_time;
 	/* Total media delay
 	   (from 1st EDCA AIFS counter until TX Complete). */
-	u32 medium_delay;
+	__le32 medium_delay;
 	/* LS-byte of last TKIP seq-num (saved per AC for recovery). */
 	u8 lsb_security_sequence_number;
 	/* Retry count - number of transmissions without successful ACK.*/
@@ -118,8 +118,8 @@ struct wl1271_tx_hw_res_descr {
 } __attribute__ ((packed));
 
 struct wl1271_tx_hw_res_if {
-	u32 tx_result_fw_counter;
-	u32 tx_result_host_counter;
+	__le32 tx_result_fw_counter;
+	__le32 tx_result_host_counter;
 	struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN];
 } __attribute__ ((packed));
 
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h
index 657c2dbcb7d3..055d7bc6f592 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_80211.h
+++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h
@@ -122,8 +122,8 @@ struct wl12xx_null_data_template {
 } __attribute__ ((packed));
 
 struct wl12xx_ps_poll_template {
-	u16 fc;
-	u16 aid;
+	__le16 fc;
+	__le16 aid;
 	u8 bssid[ETH_ALEN];
 	u8 ta[ETH_ALEN];
 } __attribute__ ((packed));
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 5f0401a52cff..7b9621de239f 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -365,7 +365,7 @@ static void wl3501_free_tx_buffer(struct wl3501_card *this, u16 ptr)
 
 static int wl3501_esbq_req_test(struct wl3501_card *this)
 {
-	u8 tmp;
+	u8 tmp = 0;
 
 	wl3501_get_from_wla(this, this->esbq_req_head + 3, &tmp, sizeof(tmp));
 	return tmp & 0x80;
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index bc81974a2bc7..33c8be7ec8e6 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -112,6 +112,9 @@ exit:
 	return err;
 }
 
+MODULE_FIRMWARE("zd1201-ap.fw");
+MODULE_FIRMWARE("zd1201.fw");
+
 static void zd1201_usbfree(struct urb *urb)
 {
 	struct zd1201 *zd = urb->context;
diff --git a/drivers/net/wireless/zd1211rw/Kconfig b/drivers/net/wireless/zd1211rw/Kconfig
index 74b31eafe72d..5f809695f71a 100644
--- a/drivers/net/wireless/zd1211rw/Kconfig
+++ b/drivers/net/wireless/zd1211rw/Kconfig
@@ -1,6 +1,6 @@
 config ZD1211RW
 	tristate "ZyDAS ZD1211/ZD1211B USB-wireless support"
-	depends on USB && MAC80211 && WLAN_80211 && EXPERIMENTAL
+	depends on USB && MAC80211 && EXPERIMENTAL
 	select FW_LOADER
 	---help---
 	  This is an experimental driver for the ZyDAS ZD1211/ZD1211B wireless
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 4e79a9800134..dfa1b9bc22c8 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -755,7 +755,7 @@ static int hw_reset_phy(struct zd_chip *chip)
 static int zd1211_hw_init_hmac(struct zd_chip *chip)
 {
 	static const struct zd_ioreq32 ioreqs[] = {
-		{ CR_ZD1211_RETRY_MAX,		0x2 },
+		{ CR_ZD1211_RETRY_MAX,		ZD1211_RETRY_COUNT },
 		{ CR_RX_THRESHOLD,		0x000c0640 },
 	};
 
@@ -767,7 +767,7 @@ static int zd1211_hw_init_hmac(struct zd_chip *chip)
 static int zd1211b_hw_init_hmac(struct zd_chip *chip)
 {
 	static const struct zd_ioreq32 ioreqs[] = {
-		{ CR_ZD1211B_RETRY_MAX,		0x02020202 },
+		{ CR_ZD1211B_RETRY_MAX,		ZD1211B_RETRY_COUNT },
 		{ CR_ZD1211B_CWIN_MAX_MIN_AC0,	0x007f003f },
 		{ CR_ZD1211B_CWIN_MAX_MIN_AC1,	0x007f003f },
 		{ CR_ZD1211B_CWIN_MAX_MIN_AC2,  0x003f001f },
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index 678c139a840c..9fd8f3508d66 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -642,13 +642,29 @@ enum {
 #define CR_ZD1211B_TXOP			CTL_REG(0x0b20)
 #define CR_ZD1211B_RETRY_MAX		CTL_REG(0x0b28)
 
+/* Value for CR_ZD1211_RETRY_MAX & CR_ZD1211B_RETRY_MAX. Vendor driver uses 2,
+ * we use 0. The first rate is tried (count+2), then all next rates are tried
+ * twice, until 1 Mbits is tried. */
+#define	ZD1211_RETRY_COUNT		0
+#define	ZD1211B_RETRY_COUNT	\
+	(ZD1211_RETRY_COUNT <<  0)|	\
+	(ZD1211_RETRY_COUNT <<  8)|	\
+	(ZD1211_RETRY_COUNT << 16)|	\
+	(ZD1211_RETRY_COUNT << 24)
+
 /* Used to detect PLL lock */
 #define UW2453_INTR_REG			((zd_addr_t)0x85c1)
 
 #define CWIN_SIZE			0x007f043f
 
 
-#define HWINT_ENABLED			0x004f0000
+#define HWINT_ENABLED			\
+	(INT_TX_COMPLETE_EN|		\
+	 INT_RX_COMPLETE_EN|		\
+	 INT_RETRY_FAIL_EN|		\
+	 INT_WAKEUP_EN|			\
+	 INT_CFG_NEXT_BCN_EN)
+
 #define HWINT_DISABLED			0
 
 #define E2P_PWR_INT_GUARD		8
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 6d666359a42f..8a243732c519 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -88,6 +88,34 @@ static const struct ieee80211_rate zd_rates[] = {
 	  .flags = 0 },
 };
 
+/*
+ * Zydas retry rates table. Each line is listed in the same order as
+ * in zd_rates[] and contains all the rate used when a packet is sent
+ * starting with a given rates. Let's consider an example :
+ *
+ * "11 Mbits : 4, 3, 2, 1, 0" means :
+ * - packet is sent using 4 different rates
+ * - 1st rate is index 3 (ie 11 Mbits)
+ * - 2nd rate is index 2 (ie 5.5 Mbits)
+ * - 3rd rate is index 1 (ie 2 Mbits)
+ * - 4th rate is index 0 (ie 1 Mbits)
+ */
+
+static const struct tx_retry_rate zd_retry_rates[] = {
+	{ /*  1 Mbits */	1, { 0 }},
+	{ /*  2 Mbits */	2, { 1,  0 }},
+	{ /*  5.5 Mbits */	3, { 2,  1, 0 }},
+	{ /* 11 Mbits */	4, { 3,  2, 1, 0 }},
+	{ /*  6 Mbits */	5, { 4,  3, 2, 1, 0 }},
+	{ /*  9 Mbits */	6, { 5,  4, 3, 2, 1, 0}},
+	{ /* 12 Mbits */	5, { 6,  3, 2, 1, 0 }},
+	{ /* 18 Mbits */	6, { 7,  6, 3, 2, 1, 0 }},
+	{ /* 24 Mbits */	6, { 8,  6, 3, 2, 1, 0 }},
+	{ /* 36 Mbits */	7, { 9,  8, 6, 3, 2, 1, 0 }},
+	{ /* 48 Mbits */	8, {10,  9, 8, 6, 3, 2, 1, 0 }},
+	{ /* 54 Mbits */	9, {11, 10, 9, 8, 6, 3, 2, 1, 0 }}
+};
+
 static const struct ieee80211_channel zd_channels[] = {
 	{ .center_freq = 2412, .hw_value = 1 },
 	{ .center_freq = 2417, .hw_value = 2 },
@@ -282,7 +310,7 @@ static void zd_op_stop(struct ieee80211_hw *hw)
 }
 
 /**
- * tx_status - reports tx status of a packet if required
+ * zd_mac_tx_status - reports tx status of a packet if required
  * @hw - a &struct ieee80211_hw pointer
  * @skb - a sk-buffer
  * @flags: extra flags to set in the TX status info
@@ -295,15 +323,49 @@ static void zd_op_stop(struct ieee80211_hw *hw)
  *
  * If no status information has been requested, the skb is freed.
  */
-static void tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
-		      int ackssi, bool success)
+static void zd_mac_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
+		      int ackssi, struct tx_status *tx_status)
 {
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	int i;
+	int success = 1, retry = 1;
+	int first_idx;
+	const struct tx_retry_rate *retries;
 
 	ieee80211_tx_info_clear_status(info);
 
-	if (success)
+	if (tx_status) {
+		success = !tx_status->failure;
+		retry = tx_status->retry + success;
+	}
+
+	if (success) {
+		/* success */
 		info->flags |= IEEE80211_TX_STAT_ACK;
+	} else {
+		/* failure */
+		info->flags &= ~IEEE80211_TX_STAT_ACK;
+	}
+
+	first_idx = info->status.rates[0].idx;
+	ZD_ASSERT(0<=first_idx && first_idx<ARRAY_SIZE(zd_retry_rates));
+	retries = &zd_retry_rates[first_idx];
+	ZD_ASSERT(0<=retry && retry<=retries->count);
+
+	info->status.rates[0].idx = retries->rate[0];
+	info->status.rates[0].count = 1; // (retry > 1 ? 2 : 1);
+
+	for (i=1; i<IEEE80211_TX_MAX_RATES-1 && i<retry; i++) {
+		info->status.rates[i].idx = retries->rate[i];
+		info->status.rates[i].count = 1; // ((i==retry-1) && success ? 1:2);
+	}
+	for (; i<IEEE80211_TX_MAX_RATES && i<retry; i++) {
+		info->status.rates[i].idx = retries->rate[retry-1];
+		info->status.rates[i].count = 1; // (success ? 1:2);
+	}
+	if (i<IEEE80211_TX_MAX_RATES)
+		info->status.rates[i].idx = -1; /* terminate */
+
 	info->status.ack_signal = ackssi;
 	ieee80211_tx_status_irqsafe(hw, skb);
 }
@@ -316,16 +378,79 @@ static void tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
  * transferred. The first frame from the tx queue, will be selected and
  * reported as error to the upper layers.
  */
-void zd_mac_tx_failed(struct ieee80211_hw *hw)
+void zd_mac_tx_failed(struct urb *urb)
 {
-	struct sk_buff_head *q = &zd_hw_mac(hw)->ack_wait_queue;
+	struct ieee80211_hw * hw = zd_usb_to_hw(urb->context);
+	struct zd_mac *mac = zd_hw_mac(hw);
+	struct sk_buff_head *q = &mac->ack_wait_queue;
 	struct sk_buff *skb;
+	struct tx_status *tx_status = (struct tx_status *)urb->transfer_buffer;
+	unsigned long flags;
+	int success = !tx_status->failure;
+	int retry = tx_status->retry + success;
+	int found = 0;
+	int i, position = 0;
 
-	skb = skb_dequeue(q);
-	if (skb == NULL)
-		return;
+	q = &mac->ack_wait_queue;
+	spin_lock_irqsave(&q->lock, flags);
+
+	skb_queue_walk(q, skb) {
+		struct ieee80211_hdr *tx_hdr;
+		struct ieee80211_tx_info *info;
+		int first_idx, final_idx;
+		const struct tx_retry_rate *retries;
+		u8 final_rate;
+
+		position ++;
+
+		/* if the hardware reports a failure and we had a 802.11 ACK
+		 * pending, then we skip the first skb when searching for a
+		 * matching frame */
+		if (tx_status->failure && mac->ack_pending &&
+		    skb_queue_is_first(q, skb)) {
+			continue;
+		}
+
+		tx_hdr = (struct ieee80211_hdr *)skb->data;
+
+		/* we skip all frames not matching the reported destination */
+		if (unlikely(memcmp(tx_hdr->addr1, tx_status->mac, ETH_ALEN))) {
+			continue;
+		}
+
+		/* we skip all frames not matching the reported final rate */
 
-	tx_status(hw, skb, 0, 0);
+		info = IEEE80211_SKB_CB(skb);
+		first_idx = info->status.rates[0].idx;
+		ZD_ASSERT(0<=first_idx && first_idx<ARRAY_SIZE(zd_retry_rates));
+		retries = &zd_retry_rates[first_idx];
+		if (retry < 0 || retry > retries->count) {
+			continue;
+		}
+
+		ZD_ASSERT(0<=retry && retry<=retries->count);
+		final_idx = retries->rate[retry-1];
+		final_rate = zd_rates[final_idx].hw_value;
+
+		if (final_rate != tx_status->rate) {
+			continue;
+		}
+
+		found = 1;
+		break;
+	}
+
+	if (found) {
+		for (i=1; i<=position; i++) {
+			skb = __skb_dequeue(q);
+			zd_mac_tx_status(hw, skb,
+					 mac->ack_pending ? mac->ack_signal : 0,
+					 i == position ? tx_status : NULL);
+			mac->ack_pending = 0;
+		}
+	}
+
+	spin_unlock_irqrestore(&q->lock, flags);
 }
 
 /**
@@ -342,18 +467,27 @@ void zd_mac_tx_to_dev(struct sk_buff *skb, int error)
 {
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_hw *hw = info->rate_driver_data[0];
+	struct zd_mac *mac = zd_hw_mac(hw);
+
+	ieee80211_tx_info_clear_status(info);
 
 	skb_pull(skb, sizeof(struct zd_ctrlset));
 	if (unlikely(error ||
 	    (info->flags & IEEE80211_TX_CTL_NO_ACK))) {
-		tx_status(hw, skb, 0, !error);
+		/*
+		 * FIXME : do we need to fill in anything ?
+		 */
+		ieee80211_tx_status_irqsafe(hw, skb);
 	} else {
-		struct sk_buff_head *q =
-			&zd_hw_mac(hw)->ack_wait_queue;
+		struct sk_buff_head *q = &mac->ack_wait_queue;
 
 		skb_queue_tail(q, skb);
-		while (skb_queue_len(q) > ZD_MAC_MAX_ACK_WAITERS)
-			zd_mac_tx_failed(hw);
+		while (skb_queue_len(q) > ZD_MAC_MAX_ACK_WAITERS) {
+			zd_mac_tx_status(hw, skb_dequeue(q),
+					 mac->ack_pending ? mac->ack_signal : 0,
+					 NULL);
+			mac->ack_pending = 0;
+		}
 	}
 }
 
@@ -606,27 +740,47 @@ fail:
 static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
 		      struct ieee80211_rx_status *stats)
 {
+	struct zd_mac *mac = zd_hw_mac(hw);
 	struct sk_buff *skb;
 	struct sk_buff_head *q;
 	unsigned long flags;
+	int found = 0;
+	int i, position = 0;
 
 	if (!ieee80211_is_ack(rx_hdr->frame_control))
 		return 0;
 
-	q = &zd_hw_mac(hw)->ack_wait_queue;
+	q = &mac->ack_wait_queue;
 	spin_lock_irqsave(&q->lock, flags);
 	skb_queue_walk(q, skb) {
 		struct ieee80211_hdr *tx_hdr;
 
+		position ++;
+
+		if (mac->ack_pending && skb_queue_is_first(q, skb))
+		    continue;
+
 		tx_hdr = (struct ieee80211_hdr *)skb->data;
 		if (likely(!memcmp(tx_hdr->addr2, rx_hdr->addr1, ETH_ALEN)))
 		{
-			__skb_unlink(skb, q);
-			tx_status(hw, skb, stats->signal, 1);
-			goto out;
+			found = 1;
+			break;
 		}
 	}
-out:
+
+	if (found) {
+		for (i=1; i<position; i++) {
+			skb = __skb_dequeue(q);
+			zd_mac_tx_status(hw, skb,
+					 mac->ack_pending ? mac->ack_signal : 0,
+					 NULL);
+			mac->ack_pending = 0;
+		}
+
+		mac->ack_pending = 1;
+		mac->ack_signal = stats->signal;
+	}
+
 	spin_unlock_irqrestore(&q->lock, flags);
 	return 1;
 }
@@ -709,6 +863,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
 		skb_reserve(skb, 2);
 	}
 
+	/* FIXME : could we avoid this big memcpy ? */
 	memcpy(skb_put(skb, length), buffer, length);
 
 	memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats));
@@ -999,7 +1154,14 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
 	hw->queues = 1;
 	hw->extra_tx_headroom = sizeof(struct zd_ctrlset);
 
+	/*
+	 * Tell mac80211 that we support multi rate retries
+	 */
+	hw->max_rates = IEEE80211_TX_MAX_RATES;
+	hw->max_rate_tries = 18;	/* 9 rates * 2 retries/rate */
+
 	skb_queue_head_init(&mac->ack_wait_queue);
+	mac->ack_pending = 0;
 
 	zd_chip_init(&mac->chip, hw, intf);
 	housekeeping_init(mac);
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index 7c2759118d13..630c298a730e 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -140,6 +140,21 @@ struct rx_status {
 #define ZD_RX_CRC16_ERROR		0x40
 #define ZD_RX_ERROR			0x80
 
+struct tx_retry_rate {
+	int count;	/* number of valid element in rate[] array */
+	int rate[10];	/* retry rates, described by an index in zd_rates[] */
+};
+
+struct tx_status {
+	u8 type;	/* must always be 0x01 : USB_INT_TYPE */
+	u8 id;		/* must always be 0xa0 : USB_INT_ID_RETRY_FAILED */
+	u8 rate;
+	u8 pad;
+	u8 mac[ETH_ALEN];
+	u8 retry;
+	u8 failure;
+} __attribute__((packed));
+
 enum mac_flags {
 	MAC_FIXED_CHANNEL = 0x01,
 };
@@ -150,7 +165,7 @@ struct housekeeping {
 
 #define ZD_MAC_STATS_BUFFER_SIZE 16
 
-#define ZD_MAC_MAX_ACK_WAITERS 10
+#define ZD_MAC_MAX_ACK_WAITERS 50
 
 struct zd_mac {
 	struct zd_chip chip;
@@ -184,6 +199,12 @@ struct zd_mac {
 
 	/* whether to pass control frames to stack */
 	unsigned int pass_ctrl:1;
+
+	/* whether we have received a 802.11 ACK that is pending */
+	unsigned int ack_pending:1;
+
+	/* signal strength of the last 802.11 ACK received */
+	int ack_signal;
 };
 
 #define ZD_REGDOMAIN_FCC	0x10
@@ -279,7 +300,7 @@ int zd_mac_preinit_hw(struct ieee80211_hw *hw);
 int zd_mac_init_hw(struct ieee80211_hw *hw);
 
 int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length);
-void zd_mac_tx_failed(struct ieee80211_hw *hw);
+void zd_mac_tx_failed(struct urb *urb);
 void zd_mac_tx_to_dev(struct sk_buff *skb, int error);
 
 #ifdef DEBUG
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 23a6a6d4863b..ac19ecd19cfe 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -318,6 +318,13 @@ error:
 	return r;
 }
 
+MODULE_FIRMWARE(FW_ZD1211B_PREFIX "ur");
+MODULE_FIRMWARE(FW_ZD1211_PREFIX "ur");
+MODULE_FIRMWARE(FW_ZD1211B_PREFIX "ub");
+MODULE_FIRMWARE(FW_ZD1211_PREFIX "ub");
+MODULE_FIRMWARE(FW_ZD1211B_PREFIX "uphr");
+MODULE_FIRMWARE(FW_ZD1211_PREFIX "uphr");
+
 /* Read data from device address space using "firmware interface" which does
  * not require firmware to be loaded. */
 int zd_usb_read_fw(struct zd_usb *usb, zd_addr_t addr, u8 *data, u16 len)
@@ -419,7 +426,7 @@ static void int_urb_complete(struct urb *urb)
 		handle_regs_int(urb);
 		break;
 	case USB_INT_ID_RETRY_FAILED:
-		zd_mac_tx_failed(zd_usb_to_hw(urb->context));
+		zd_mac_tx_failed(urb);
 		break;
 	default:
 		dev_dbg_f(urb_dev(urb), "error: urb %p unknown id %x\n", urb,
@@ -553,6 +560,8 @@ static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
 
 	if (length < sizeof(struct rx_length_info)) {
 		/* It's not a complete packet anyhow. */
+		printk("%s: invalid, small RX packet : %d\n",
+		       __func__, length);
 		return;
 	}
 	length_info = (struct rx_length_info *)
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index 83a044dbd1d7..8c777ba4e2b3 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -660,7 +660,7 @@ static int xemaclite_open(struct net_device *dev)
 	xemaclite_set_mac_address(lp, dev->dev_addr);
 
 	/* Grab the IRQ */
-	retval = request_irq(dev->irq, &xemaclite_interrupt, 0, dev->name, dev);
+	retval = request_irq(dev->irq, xemaclite_interrupt, 0, dev->name, dev);
 	if (retval) {
 		dev_err(&lp->ndev->dev, "Could not allocate interrupt %d\n",
 			dev->irq);
diff --git a/drivers/net/xtsonic.c b/drivers/net/xtsonic.c
index 0c44135c0b1f..389ba9df7120 100644
--- a/drivers/net/xtsonic.c
+++ b/drivers/net/xtsonic.c
@@ -92,7 +92,7 @@ static unsigned short known_revisions[] =
 
 static int xtsonic_open(struct net_device *dev)
 {
-	if (request_irq(dev->irq,&sonic_interrupt,IRQF_DISABLED,"sonic",dev)) {
+	if (request_irq(dev->irq,sonic_interrupt,IRQF_DISABLED,"sonic",dev)) {
 		printk(KERN_ERR "%s: unable to get IRQ %d.\n",
 		       dev->name, dev->irq);
 		return -EAGAIN;
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 40ad0dee0406..0f773a9a3ff2 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -579,7 +579,7 @@ static int yellowfin_open(struct net_device *dev)
 	/* Reset the chip. */
 	iowrite32(0x80000000, ioaddr + DMACtrl);
 
-	ret = request_irq(dev->irq, &yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
+	ret = request_irq(dev->irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
 	if (ret)
 		return ret;
 
@@ -944,8 +944,8 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
 			dev_kfree_skb_irq(skb);
 			yp->tx_skbuff[entry] = NULL;
 		}
-		if (yp->tx_full
-			&& yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
+		if (yp->tx_full &&
+		    yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
 			/* The ring is no longer full, clear tbusy. */
 			yp->tx_full = 0;
 			netif_wake_queue(dev);
@@ -1014,8 +1014,8 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
 			}
 #endif
 
-			if (yp->tx_full
-				&& yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
+			if (yp->tx_full &&
+			    yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
 				/* The ring is no longer full, clear tbusy. */
 				yp->tx_full = 0;
 				netif_wake_queue(dev);
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index b42347333750..bc5ae0f6e934 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -103,8 +103,7 @@
 #include <asm/io.h>
 #include <asm/dma.h>
 
-/* This include could be elsewhere, since it is not wireless specific */
-#include "wireless/i82593.h"
+#include <linux/i82593.h>
 
 static char version[] __initdata = "znet.c:v1.02 9/23/94 becker@scyld.com\n";
 
@@ -170,7 +169,7 @@ static int znet_request_resources (struct net_device *dev)
 {
 	struct znet_private *znet = netdev_priv(dev);
 
-	if (request_irq (dev->irq, &znet_interrupt, 0, "ZNet", dev))
+	if (request_irq (dev->irq, znet_interrupt, 0, "ZNet", dev))
 		goto failed;
 	if (request_dma (znet->rx_dma, "ZNet rx"))
 		goto free_irq;
@@ -698,8 +697,8 @@ static void znet_rx(struct net_device *dev)
 	   the same area of the backwards links we now have.  This allows us to
 	   pass packets to the upper layers in the order they were received --
 	   important for fast-path sequential operations. */
-	 while (znet->rx_start + cur_frame_end_offset != znet->rx_cur
-			&& ++boguscount < 5) {
+	while (znet->rx_start + cur_frame_end_offset != znet->rx_cur &&
+	       ++boguscount < 5) {
 		unsigned short hi_cnt, lo_cnt, hi_status, lo_status;
 		int count, status;
 
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index 9581d3619450..79caf1ca4a29 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -352,11 +352,9 @@ static __inline__ int led_get_net_activity(void)
 
 	rx_total = tx_total = 0;
 	
-	/* we are running as a workqueue task, so locking dev_base 
-	 * for reading should be OK */
-	read_lock(&dev_base_lock);
+	/* we are running as a workqueue task, so we can use an RCU lookup */
 	rcu_read_lock();
-	for_each_netdev(&init_net, dev) {
+	for_each_netdev_rcu(&init_net, dev) {
 	    const struct net_device_stats *stats;
 	    struct in_device *in_dev = __in_dev_get_rcu(dev);
 	    if (!in_dev || !in_dev->ifa_list)
@@ -368,7 +366,6 @@ static __inline__ int led_get_net_activity(void)
 	    tx_total += stats->tx_packets;
 	}
 	rcu_read_unlock();
-	read_unlock(&dev_base_lock);
 
 	retval = 0;
 
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index 96eddb3b1d08..6cab5a62f99e 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -3,11 +3,11 @@
 #
 
 ctcm-y += ctcm_main.o ctcm_fsms.o ctcm_mpc.o ctcm_sysfs.o ctcm_dbug.o
-obj-$(CONFIG_CTCM) += ctcm.o fsm.o cu3088.o
+obj-$(CONFIG_CTCM) += ctcm.o fsm.o
 obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
 obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
-obj-$(CONFIG_LCS) += lcs.o cu3088.o
-obj-$(CONFIG_CLAW) += claw.o cu3088.o
+obj-$(CONFIG_LCS) += lcs.o
+obj-$(CONFIG_CLAW) += claw.o
 qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o
 obj-$(CONFIG_QETH) += qeth.o
 qeth_l2-y += qeth_l2_main.o
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index c63babefb698..3c77bfe0764c 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -90,7 +90,6 @@
 #include <linux/timer.h>
 #include <linux/types.h>
 
-#include "cu3088.h"
 #include "claw.h"
 
 /*
@@ -258,6 +257,9 @@ static int claw_pm_prepare(struct ccwgroup_device *gdev)
 	return -EPERM;
 }
 
+/* the root device for claw group devices */
+static struct device *claw_root_dev;
+
 /* ccwgroup table  */
 
 static struct ccwgroup_driver claw_group_driver = {
@@ -272,6 +274,47 @@ static struct ccwgroup_driver claw_group_driver = {
 	.prepare     = claw_pm_prepare,
 };
 
+static struct ccw_device_id claw_ids[] = {
+	{CCW_DEVICE(0x3088, 0x61), .driver_info = claw_channel_type_claw},
+	{},
+};
+MODULE_DEVICE_TABLE(ccw, claw_ids);
+
+static struct ccw_driver claw_ccw_driver = {
+	.owner	= THIS_MODULE,
+	.name	= "claw",
+	.ids	= claw_ids,
+	.probe	= ccwgroup_probe_ccwdev,
+	.remove	= ccwgroup_remove_ccwdev,
+};
+
+static ssize_t
+claw_driver_group_store(struct device_driver *ddrv, const char *buf,
+			size_t count)
+{
+	int err;
+	err = ccwgroup_create_from_string(claw_root_dev,
+					  claw_group_driver.driver_id,
+					  &claw_ccw_driver, 3, buf);
+	return err ? err : count;
+}
+
+static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store);
+
+static struct attribute *claw_group_attrs[] = {
+	&driver_attr_group.attr,
+	NULL,
+};
+
+static struct attribute_group claw_group_attr_group = {
+	.attrs = claw_group_attrs,
+};
+
+static const struct attribute_group *claw_group_attr_groups[] = {
+	&claw_group_attr_group,
+	NULL,
+};
+
 /*
 *       Key functions
 */
@@ -3326,7 +3369,11 @@ claw_remove_files(struct device *dev)
 static void __exit
 claw_cleanup(void)
 {
-	unregister_cu3088_discipline(&claw_group_driver);
+	driver_remove_file(&claw_group_driver.driver,
+			   &driver_attr_group);
+	ccwgroup_driver_unregister(&claw_group_driver);
+	ccw_driver_unregister(&claw_ccw_driver);
+	root_device_unregister(claw_root_dev);
 	claw_unregister_debug_facility();
 	pr_info("Driver unloaded\n");
 
@@ -3348,16 +3395,31 @@ claw_init(void)
 	if (ret) {
 		pr_err("Registering with the S/390 debug feature"
 			" failed with error code %d\n", ret);
-		return ret;
+		goto out_err;
 	}
 	CLAW_DBF_TEXT(2, setup, "init_mod");
-	ret = register_cu3088_discipline(&claw_group_driver);
-	if (ret) {
-		CLAW_DBF_TEXT(2, setup, "init_bad");
-		claw_unregister_debug_facility();
-		pr_err("Registering with the cu3088 device driver failed "
-			   "with error code %d\n", ret);
-	}
+	claw_root_dev = root_device_register("qeth");
+	ret = IS_ERR(claw_root_dev) ? PTR_ERR(claw_root_dev) : 0;
+	if (ret)
+		goto register_err;
+	ret = ccw_driver_register(&claw_ccw_driver);
+	if (ret)
+		goto ccw_err;
+	claw_group_driver.driver.groups = claw_group_attr_groups;
+	ret = ccwgroup_driver_register(&claw_group_driver);
+	if (ret)
+		goto ccwgroup_err;
+	return 0;
+
+ccwgroup_err:
+	ccw_driver_unregister(&claw_ccw_driver);
+ccw_err:
+	root_device_unregister(claw_root_dev);
+register_err:
+	CLAW_DBF_TEXT(2, setup, "init_bad");
+	claw_unregister_debug_facility();
+out_err:
+	pr_err("Initializing the claw device driver failed\n");
 	return ret;
 }
 
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h
index 005072c420d3..46d59a13db12 100644
--- a/drivers/s390/net/claw.h
+++ b/drivers/s390/net/claw.h
@@ -129,6 +129,18 @@ static inline int claw_dbf_passes(debug_info_t *dbf_grp, int level)
 		} \
 	} while (0)
 
+/**
+ * Enum for classifying detected devices.
+ */
+enum claw_channel_types {
+	/* Device is not a channel  */
+	claw_channel_type_none,
+
+	/* Device is a CLAW channel device */
+	claw_channel_type_claw
+};
+
+
 /*******************************************************
 *  Define Control Blocks                               *
 *                                                      *
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 4ded9ac2c5ef..70eb7f138414 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -44,7 +44,6 @@
 #include <asm/idals.h>
 
 #include "fsm.h"
-#include "cu3088.h"
 
 #include "ctcm_dbug.h"
 #include "ctcm_main.h"
diff --git a/drivers/s390/net/ctcm_fsms.h b/drivers/s390/net/ctcm_fsms.h
index 2326aba9807a..046d077fabbb 100644
--- a/drivers/s390/net/ctcm_fsms.h
+++ b/drivers/s390/net/ctcm_fsms.h
@@ -39,7 +39,6 @@
 #include <asm/idals.h>
 
 #include "fsm.h"
-#include "cu3088.h"
 #include "ctcm_main.h"
 
 /*
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index c5b83874500c..e35713dd0504 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -51,12 +51,16 @@
 
 #include <asm/idals.h>
 
-#include "cu3088.h"
 #include "ctcm_fsms.h"
 #include "ctcm_main.h"
 
 /* Some common global variables */
 
+/**
+ * The root device for ctcm group devices
+ */
+static struct device *ctcm_root_dev;
+
 /*
  * Linked list of all detected channels.
  */
@@ -246,7 +250,7 @@ static void channel_remove(struct channel *ch)
  *
  * returns Pointer to a channel or NULL if no matching channel available.
  */
-static struct channel *channel_get(enum channel_types type,
+static struct channel *channel_get(enum ctcm_channel_types type,
 					char *id, int direction)
 {
 	struct channel *ch = channels;
@@ -1342,7 +1346,7 @@ static int ctcm_probe_device(struct ccwgroup_device *cgdev)
  *
  * returns 0 on success, !0 on error.
  */
-static int add_channel(struct ccw_device *cdev, enum channel_types type,
+static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
 				struct ctcm_priv *priv)
 {
 	struct channel **c = &channels;
@@ -1501,13 +1505,13 @@ free_return:	/* note that all channel pointers are 0 or valid */
 /*
  * Return type of a detected device.
  */
-static enum channel_types get_channel_type(struct ccw_device_id *id)
+static enum ctcm_channel_types get_channel_type(struct ccw_device_id *id)
 {
-	enum channel_types type;
-	type = (enum channel_types)id->driver_info;
+	enum ctcm_channel_types type;
+	type = (enum ctcm_channel_types)id->driver_info;
 
-	if (type == channel_type_ficon)
-		type = channel_type_escon;
+	if (type == ctcm_channel_type_ficon)
+		type = ctcm_channel_type_escon;
 
 	return type;
 }
@@ -1525,16 +1529,21 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
 	char read_id[CTCM_ID_SIZE];
 	char write_id[CTCM_ID_SIZE];
 	int direction;
-	enum channel_types type;
+	enum ctcm_channel_types type;
 	struct ctcm_priv *priv;
 	struct net_device *dev;
 	struct ccw_device *cdev0;
 	struct ccw_device *cdev1;
+	struct channel *readc;
+	struct channel *writec;
 	int ret;
+	int result;
 
 	priv = dev_get_drvdata(&cgdev->dev);
-	if (!priv)
-		return -ENODEV;
+	if (!priv) {
+		result = -ENODEV;
+		goto out_err_result;
+	}
 
 	cdev0 = cgdev->cdev[0];
 	cdev1 = cgdev->cdev[1];
@@ -1545,31 +1554,40 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
 	snprintf(write_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev1->dev));
 
 	ret = add_channel(cdev0, type, priv);
-	if (ret)
-		return ret;
+	if (ret) {
+		result = ret;
+		goto out_err_result;
+	}
 	ret = add_channel(cdev1, type, priv);
-	if (ret)
-		return ret;
+	if (ret) {
+		result = ret;
+		goto out_remove_channel1;
+	}
 
 	ret = ccw_device_set_online(cdev0);
 	if (ret != 0) {
-		/* may be ok to fail now - can be done later */
 		CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
 			"%s(%s) set_online rc=%d",
 				CTCM_FUNTAIL, read_id, ret);
+		result = -EIO;
+		goto out_remove_channel2;
 	}
 
 	ret = ccw_device_set_online(cdev1);
 	if (ret != 0) {
-		/* may be ok to fail now - can be done later */
 		CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
 			"%s(%s) set_online rc=%d",
 				CTCM_FUNTAIL, write_id, ret);
+
+		result = -EIO;
+		goto out_ccw1;
 	}
 
 	dev = ctcm_init_netdevice(priv);
-	if (dev == NULL)
-			goto out;
+	if (dev == NULL) {
+		result = -ENODEV;
+		goto out_ccw2;
+	}
 
 	for (direction = READ; direction <= WRITE; direction++) {
 		priv->channel[direction] =
@@ -1587,12 +1605,14 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
 	/* sysfs magic */
 	SET_NETDEV_DEV(dev, &cgdev->dev);
 
-	if (register_netdev(dev))
-			goto out_dev;
+	if (register_netdev(dev)) {
+		result = -ENODEV;
+		goto out_dev;
+	}
 
 	if (ctcm_add_attributes(&cgdev->dev)) {
-		unregister_netdev(dev);
-			goto out_dev;
+		result = -ENODEV;
+		goto out_unregister;
 	}
 
 	strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name));
@@ -1608,13 +1628,22 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
 			priv->channel[WRITE]->id, priv->protocol);
 
 	return 0;
+out_unregister:
+	unregister_netdev(dev);
 out_dev:
 	ctcm_free_netdevice(dev);
-out:
+out_ccw2:
 	ccw_device_set_offline(cgdev->cdev[1]);
+out_ccw1:
 	ccw_device_set_offline(cgdev->cdev[0]);
-
-	return -ENODEV;
+out_remove_channel2:
+	readc = channel_get(type, read_id, READ);
+	channel_remove(readc);
+out_remove_channel1:
+	writec = channel_get(type, write_id, WRITE);
+	channel_remove(writec);
+out_err_result:
+	return result;
 }
 
 /**
@@ -1695,6 +1724,11 @@ static int ctcm_pm_suspend(struct ccwgroup_device *gdev)
 		return 0;
 	netif_device_detach(priv->channel[READ]->netdev);
 	ctcm_close(priv->channel[READ]->netdev);
+	if (!wait_event_timeout(priv->fsm->wait_q,
+	    fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) {
+		netif_device_attach(priv->channel[READ]->netdev);
+		return -EBUSY;
+	}
 	ccw_device_set_offline(gdev->cdev[1]);
 	ccw_device_set_offline(gdev->cdev[0]);
 	return 0;
@@ -1719,6 +1753,22 @@ err_out:
 	return rc;
 }
 
+static struct ccw_device_id ctcm_ids[] = {
+	{CCW_DEVICE(0x3088, 0x08), .driver_info = ctcm_channel_type_parallel},
+	{CCW_DEVICE(0x3088, 0x1e), .driver_info = ctcm_channel_type_ficon},
+	{CCW_DEVICE(0x3088, 0x1f), .driver_info = ctcm_channel_type_escon},
+	{},
+};
+MODULE_DEVICE_TABLE(ccw, ctcm_ids);
+
+static struct ccw_driver ctcm_ccw_driver = {
+	.owner	= THIS_MODULE,
+	.name	= "ctcm",
+	.ids	= ctcm_ids,
+	.probe	= ccwgroup_probe_ccwdev,
+	.remove	= ccwgroup_remove_ccwdev,
+};
+
 static struct ccwgroup_driver ctcm_group_driver = {
 	.owner       = THIS_MODULE,
 	.name        = CTC_DRIVER_NAME,
@@ -1733,6 +1783,33 @@ static struct ccwgroup_driver ctcm_group_driver = {
 	.restore     = ctcm_pm_resume,
 };
 
+static ssize_t
+ctcm_driver_group_store(struct device_driver *ddrv, const char *buf,
+			size_t count)
+{
+	int err;
+
+	err = ccwgroup_create_from_string(ctcm_root_dev,
+					  ctcm_group_driver.driver_id,
+					  &ctcm_ccw_driver, 2, buf);
+	return err ? err : count;
+}
+
+static DRIVER_ATTR(group, 0200, NULL, ctcm_driver_group_store);
+
+static struct attribute *ctcm_group_attrs[] = {
+	&driver_attr_group.attr,
+	NULL,
+};
+
+static struct attribute_group ctcm_group_attr_group = {
+	.attrs = ctcm_group_attrs,
+};
+
+static const struct attribute_group *ctcm_group_attr_groups[] = {
+	&ctcm_group_attr_group,
+	NULL,
+};
 
 /*
  * Module related routines
@@ -1746,7 +1823,10 @@ static struct ccwgroup_driver ctcm_group_driver = {
  */
 static void __exit ctcm_exit(void)
 {
-	unregister_cu3088_discipline(&ctcm_group_driver);
+	driver_remove_file(&ctcm_group_driver.driver, &driver_attr_group);
+	ccwgroup_driver_unregister(&ctcm_group_driver);
+	ccw_driver_unregister(&ctcm_ccw_driver);
+	root_device_unregister(ctcm_root_dev);
 	ctcm_unregister_dbf_views();
 	pr_info("CTCM driver unloaded\n");
 }
@@ -1772,17 +1852,31 @@ static int __init ctcm_init(void)
 	channels = NULL;
 
 	ret = ctcm_register_dbf_views();
-	if (ret) {
-		return ret;
-	}
-	ret = register_cu3088_discipline(&ctcm_group_driver);
-	if (ret) {
-		ctcm_unregister_dbf_views();
-		pr_err("%s / register_cu3088_discipline failed, ret = %d\n",
-			__func__, ret);
-		return ret;
-	}
+	if (ret)
+		goto out_err;
+	ctcm_root_dev = root_device_register("ctcm");
+	ret = IS_ERR(ctcm_root_dev) ? PTR_ERR(ctcm_root_dev) : 0;
+	if (ret)
+		goto register_err;
+	ret = ccw_driver_register(&ctcm_ccw_driver);
+	if (ret)
+		goto ccw_err;
+	ctcm_group_driver.driver.groups = ctcm_group_attr_groups;
+	ret = ccwgroup_driver_register(&ctcm_group_driver);
+	if (ret)
+		goto ccwgroup_err;
 	print_banner();
+	return 0;
+
+ccwgroup_err:
+	ccw_driver_unregister(&ctcm_ccw_driver);
+ccw_err:
+	root_device_unregister(ctcm_root_dev);
+register_err:
+	ctcm_unregister_dbf_views();
+out_err:
+	pr_err("%s / Initializing the ctcm device driver failed, ret = %d\n",
+		__func__, ret);
 	return ret;
 }
 
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h
index d925e732b7d8..d34fa14f44e7 100644
--- a/drivers/s390/net/ctcm_main.h
+++ b/drivers/s390/net/ctcm_main.h
@@ -16,7 +16,6 @@
 #include <linux/netdevice.h>
 
 #include "fsm.h"
-#include "cu3088.h"
 #include "ctcm_dbug.h"
 #include "ctcm_mpc.h"
 
@@ -66,6 +65,23 @@
 			ctcmpc_dumpit(buf, len); \
 	} while (0)
 
+/**
+ * Enum for classifying detected devices
+ */
+enum ctcm_channel_types {
+	/* Device is not a channel  */
+	ctcm_channel_type_none,
+
+	/* Device is a CTC/A */
+	ctcm_channel_type_parallel,
+
+	/* Device is a FICON channel */
+	ctcm_channel_type_ficon,
+
+	/* Device is a ESCON channel */
+	ctcm_channel_type_escon
+};
+
 /*
  * CCW commands, used in this driver.
  */
@@ -121,7 +137,7 @@ struct channel {
 	 * Type of this channel.
 	 * CTC/A or Escon for valid channels.
 	 */
-	enum channel_types type;
+	enum ctcm_channel_types type;
 	/*
 	 * Misc. flags. See CHANNEL_FLAGS_... below
 	 */
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 781e18be7e8f..5978b390153f 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -53,7 +53,6 @@
 #include <linux/moduleparam.h>
 #include <asm/idals.h>
 
-#include "cu3088.h"
 #include "ctcm_mpc.h"
 #include "ctcm_main.h"
 #include "ctcm_fsms.h"
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index 8452bb052d68..738ad26c74a7 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -158,6 +158,15 @@ static ssize_t ctcm_proto_store(struct device *dev,
 	return count;
 }
 
+const char *ctcm_type[] = {
+	"not a channel",
+	"CTC/A",
+	"FICON channel",
+	"ESCON channel",
+	"unknown channel type",
+	"unsupported channel type",
+};
+
 static ssize_t ctcm_type_show(struct device *dev,
 				struct device_attribute *attr, char *buf)
 {
@@ -168,7 +177,7 @@ static ssize_t ctcm_type_show(struct device *dev,
 		return -ENODEV;
 
 	return sprintf(buf, "%s\n",
-			cu3088_type[cgdev->cdev[0]->id.driver_info]);
+			ctcm_type[cgdev->cdev[0]->id.driver_info]);
 }
 
 static DEVICE_ATTR(buffer, 0644, ctcm_buffer_show, ctcm_buffer_write);
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c
deleted file mode 100644
index 48383459e99b..000000000000
--- a/drivers/s390/net/cu3088.c
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * CTC / LCS ccw_device driver
- *
- * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Arnd Bergmann <arndb@de.ibm.com>
- *            Cornelia Huck <cornelia.huck@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/err.h>
-
-#include <asm/ccwdev.h>
-#include <asm/ccwgroup.h>
-
-#include "cu3088.h"
-
-const char *cu3088_type[] = {
-	"not a channel",
-	"CTC/A",
-	"ESCON channel",
-	"FICON channel",
-	"OSA LCS card",
-	"CLAW channel device",
-	"unknown channel type",
-	"unsupported channel type",
-};
-
-/* static definitions */
-
-static struct ccw_device_id cu3088_ids[] = {
-	{ CCW_DEVICE(0x3088, 0x08), .driver_info = channel_type_parallel },
-	{ CCW_DEVICE(0x3088, 0x1f), .driver_info = channel_type_escon },
-	{ CCW_DEVICE(0x3088, 0x1e), .driver_info = channel_type_ficon },
-	{ CCW_DEVICE(0x3088, 0x60), .driver_info = channel_type_osa2 },
-	{ CCW_DEVICE(0x3088, 0x61), .driver_info = channel_type_claw },
-	{ /* end of list */ }
-};
-
-static struct ccw_driver cu3088_driver;
-
-static struct device *cu3088_root_dev;
-
-static ssize_t
-group_write(struct device_driver *drv, const char *buf, size_t count)
-{
-	int ret;
-	struct ccwgroup_driver *cdrv;
-
-	cdrv = to_ccwgroupdrv(drv);
-	if (!cdrv)
-		return -EINVAL;
-	ret = ccwgroup_create_from_string(cu3088_root_dev, cdrv->driver_id,
-					  &cu3088_driver, 2, buf);
-
-	return (ret == 0) ? count : ret;
-}
-
-static DRIVER_ATTR(group, 0200, NULL, group_write);
-
-/* Register-unregister for ctc&lcs */
-int
-register_cu3088_discipline(struct ccwgroup_driver *dcp)
-{
-	int rc;
-
-	if (!dcp)
-		return -EINVAL;
-
-	/* Register discipline.*/
-	rc = ccwgroup_driver_register(dcp);
-	if (rc)
-		return rc;
-
-	rc = driver_create_file(&dcp->driver, &driver_attr_group);
-	if (rc)
-		ccwgroup_driver_unregister(dcp);
-
-	return rc;
-
-}
-
-void
-unregister_cu3088_discipline(struct ccwgroup_driver *dcp)
-{
-	if (!dcp)
-		return;
-
-	driver_remove_file(&dcp->driver, &driver_attr_group);
-	ccwgroup_driver_unregister(dcp);
-}
-
-static struct ccw_driver cu3088_driver = {
-	.owner	     = THIS_MODULE,
-	.ids	     = cu3088_ids,
-	.name        = "cu3088",
-	.probe	     = ccwgroup_probe_ccwdev,
-	.remove	     = ccwgroup_remove_ccwdev,
-};
-
-/* module setup */
-static int __init
-cu3088_init (void)
-{
-	int rc;
-
-	cu3088_root_dev = root_device_register("cu3088");
-	if (IS_ERR(cu3088_root_dev))
-		return PTR_ERR(cu3088_root_dev);
-	rc = ccw_driver_register(&cu3088_driver);
-	if (rc)
-		root_device_unregister(cu3088_root_dev);
-
-	return rc;
-}
-
-static void __exit
-cu3088_exit (void)
-{
-	ccw_driver_unregister(&cu3088_driver);
-	root_device_unregister(cu3088_root_dev);
-}
-
-MODULE_DEVICE_TABLE(ccw,cu3088_ids);
-MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
-MODULE_LICENSE("GPL");
-
-module_init(cu3088_init);
-module_exit(cu3088_exit);
-
-EXPORT_SYMBOL_GPL(cu3088_type);
-EXPORT_SYMBOL_GPL(register_cu3088_discipline);
-EXPORT_SYMBOL_GPL(unregister_cu3088_discipline);
diff --git a/drivers/s390/net/cu3088.h b/drivers/s390/net/cu3088.h
deleted file mode 100644
index d8558a7105a5..000000000000
--- a/drivers/s390/net/cu3088.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef _CU3088_H
-#define _CU3088_H
-
-/**
- * Enum for classifying detected devices.
- */
-enum channel_types {
-        /* Device is not a channel  */
-	channel_type_none,
-
-        /* Device is a CTC/A */
-	channel_type_parallel,
-
-	/* Device is a ESCON channel */
-	channel_type_escon,
-
-	/* Device is a FICON channel */
-	channel_type_ficon,
-
-	/* Device is a OSA2 card */
-	channel_type_osa2,
-
-	/* Device is a CLAW channel device */
-	channel_type_claw,
-
-	/* Device is a channel, but we don't know
-	 * anything about it */
-	channel_type_unknown,
-
-	/* Device is an unsupported model */
-	channel_type_unsupported,
-
-	/* number of type entries */
-	num_channel_types
-};
-
-extern const char *cu3088_type[num_channel_types];
-extern int register_cu3088_discipline(struct ccwgroup_driver *);
-extern void unregister_cu3088_discipline(struct ccwgroup_driver *);
-
-#endif
diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c
index 2c1db8036b7c..cae48cbc5e96 100644
--- a/drivers/s390/net/fsm.c
+++ b/drivers/s390/net/fsm.c
@@ -27,6 +27,7 @@ init_fsm(char *name, const char **state_names, const char **event_names, int nr_
 		return NULL;
 	}
 	strlcpy(this->name, name, sizeof(this->name));
+	init_waitqueue_head(&this->wait_q);
 
 	f = kzalloc(sizeof(fsm), order);
 	if (f == NULL) {
diff --git a/drivers/s390/net/fsm.h b/drivers/s390/net/fsm.h
index af679c10f1bd..1e8b235d95b5 100644
--- a/drivers/s390/net/fsm.h
+++ b/drivers/s390/net/fsm.h
@@ -66,6 +66,7 @@ typedef struct fsm_instance_t {
 	char name[16];
 	void *userdata;
 	int userint;
+	wait_queue_head_t wait_q;
 #if FSM_DEBUG_HISTORY
 	int         history_index;
 	int         history_size;
@@ -197,6 +198,7 @@ fsm_newstate(fsm_instance *fi, int newstate)
 	printk(KERN_DEBUG "fsm(%s): New state %s\n", fi->name,
 		fi->f->state_names[newstate]);
 #endif
+	wake_up(&fi->wait_q);
 }
 
 /**
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index a70de9b4bf29..f6cc46dc0501 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -47,7 +47,6 @@
 #include <asm/ccwgroup.h>
 
 #include "lcs.h"
-#include "cu3088.h"
 
 
 #if !defined(CONFIG_NET_ETHERNET) && \
@@ -60,7 +59,11 @@
  */
 
 static char version[] __initdata = "LCS driver";
-static char debug_buffer[255];
+
+/**
+  * the root device for lcs group devices
+  */
+static struct device *lcs_root_dev;
 
 /**
  * Some prototypes.
@@ -76,6 +79,7 @@ static int lcs_recovery(void *ptr);
 /**
  * Debug Facility Stuff
  */
+static char debug_buffer[255];
 static debug_info_t *lcs_dbf_setup;
 static debug_info_t *lcs_dbf_trace;
 
@@ -889,7 +893,7 @@ lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer,
 	rc = lcs_ready_buffer(&card->write, buffer);
 	if (rc)
 		return rc;
-	init_timer(&timer);
+	init_timer_on_stack(&timer);
 	timer.function = lcs_lancmd_timeout;
 	timer.data = (unsigned long) reply;
 	timer.expires = jiffies + HZ*card->lancmd_timeout;
@@ -1968,6 +1972,15 @@ lcs_portno_store (struct device *dev, struct device_attribute *attr, const char
 
 static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store);
 
+const char *lcs_type[] = {
+	"not a channel",
+	"2216 parallel",
+	"2216 channel",
+	"OSA LCS card",
+	"unknown channel type",
+	"unsupported channel type",
+};
+
 static ssize_t
 lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
@@ -1977,7 +1990,7 @@ lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf)
 	if (!cgdev)
 		return -ENODEV;
 
-	return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]);
+	return sprintf(buf, "%s\n", lcs_type[cgdev->cdev[0]->id.driver_info]);
 }
 
 static DEVICE_ATTR(type, 0444, lcs_type_show, NULL);
@@ -2130,8 +2143,12 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
 	card->write.ccwdev = ccwgdev->cdev[1];
 
 	recover_state = card->state;
-	ccw_device_set_online(card->read.ccwdev);
-	ccw_device_set_online(card->write.ccwdev);
+	rc = ccw_device_set_online(card->read.ccwdev);
+	if (rc)
+		goto out_err;
+	rc = ccw_device_set_online(card->write.ccwdev);
+	if (rc)
+		goto out_werr;
 
 	LCS_DBF_TEXT(3, setup, "lcsnewdv");
 
@@ -2210,8 +2227,10 @@ netdev_out:
 	return 0;
 out:
 
-	ccw_device_set_offline(card->read.ccwdev);
 	ccw_device_set_offline(card->write.ccwdev);
+out_werr:
+	ccw_device_set_offline(card->read.ccwdev);
+out_err:
 	return -ENODEV;
 }
 
@@ -2364,6 +2383,22 @@ static int lcs_restore(struct ccwgroup_device *gdev)
 	return lcs_pm_resume(card);
 }
 
+static struct ccw_device_id lcs_ids[] = {
+	{CCW_DEVICE(0x3088, 0x08), .driver_info = lcs_channel_type_parallel},
+	{CCW_DEVICE(0x3088, 0x1f), .driver_info = lcs_channel_type_2216},
+	{CCW_DEVICE(0x3088, 0x60), .driver_info = lcs_channel_type_osa2},
+	{},
+};
+MODULE_DEVICE_TABLE(ccw, lcs_ids);
+
+static struct ccw_driver lcs_ccw_driver = {
+	.owner	= THIS_MODULE,
+	.name	= "lcs",
+	.ids	= lcs_ids,
+	.probe	= ccwgroup_probe_ccwdev,
+	.remove	= ccwgroup_remove_ccwdev,
+};
+
 /**
  * LCS ccwgroup driver registration
  */
@@ -2383,6 +2418,33 @@ static struct ccwgroup_driver lcs_group_driver = {
 	.restore     = lcs_restore,
 };
 
+static ssize_t
+lcs_driver_group_store(struct device_driver *ddrv, const char *buf,
+		       size_t count)
+{
+	int err;
+	err = ccwgroup_create_from_string(lcs_root_dev,
+					  lcs_group_driver.driver_id,
+					  &lcs_ccw_driver, 2, buf);
+	return err ? err : count;
+}
+
+static DRIVER_ATTR(group, 0200, NULL, lcs_driver_group_store);
+
+static struct attribute *lcs_group_attrs[] = {
+	&driver_attr_group.attr,
+	NULL,
+};
+
+static struct attribute_group lcs_group_attr_group = {
+	.attrs = lcs_group_attrs,
+};
+
+static const struct attribute_group *lcs_group_attr_groups[] = {
+	&lcs_group_attr_group,
+	NULL,
+};
+
 /**
  *  LCS Module/Kernel initialization function
  */
@@ -2394,17 +2456,30 @@ __init lcs_init_module(void)
 	pr_info("Loading %s\n", version);
 	rc = lcs_register_debug_facility();
 	LCS_DBF_TEXT(0, setup, "lcsinit");
-	if (rc) {
-		pr_err("Initialization failed\n");
-		return rc;
-	}
-
-	rc = register_cu3088_discipline(&lcs_group_driver);
-	if (rc) {
-		pr_err("Initialization failed\n");
-		return rc;
-	}
+	if (rc)
+		goto out_err;
+	lcs_root_dev = root_device_register("lcs");
+	rc = IS_ERR(lcs_root_dev) ? PTR_ERR(lcs_root_dev) : 0;
+	if (rc)
+		goto register_err;
+	rc = ccw_driver_register(&lcs_ccw_driver);
+	if (rc)
+		goto ccw_err;
+	lcs_group_driver.driver.groups = lcs_group_attr_groups;
+	rc = ccwgroup_driver_register(&lcs_group_driver);
+	if (rc)
+		goto ccwgroup_err;
 	return 0;
+
+ccwgroup_err:
+	ccw_driver_unregister(&lcs_ccw_driver);
+ccw_err:
+	root_device_unregister(lcs_root_dev);
+register_err:
+	lcs_unregister_debug_facility();
+out_err:
+	pr_err("Initializing the lcs device driver failed\n");
+	return rc;
 }
 
 
@@ -2416,7 +2491,11 @@ __exit lcs_cleanup_module(void)
 {
 	pr_info("Terminating lcs module.\n");
 	LCS_DBF_TEXT(0, trace, "cleanup");
-	unregister_cu3088_discipline(&lcs_group_driver);
+	driver_remove_file(&lcs_group_driver.driver,
+			   &driver_attr_group);
+	ccwgroup_driver_unregister(&lcs_group_driver);
+	ccw_driver_unregister(&lcs_ccw_driver);
+	root_device_unregister(lcs_root_dev);
 	lcs_unregister_debug_facility();
 }
 
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index 6d668642af27..8c03392ac833 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -36,6 +36,24 @@ static inline int lcs_dbf_passes(debug_info_t *dbf_grp, int level)
 #define CARD_FROM_DEV(cdev) \
 	(struct lcs_card *) dev_get_drvdata( \
 		&((struct ccwgroup_device *)dev_get_drvdata(&cdev->dev))->dev);
+
+/**
+ * Enum for classifying detected devices.
+ */
+enum lcs_channel_types {
+	/* Device is not a channel  */
+	lcs_channel_type_none,
+
+	/* Device is a 2216 channel */
+	lcs_channel_type_parallel,
+
+	/* Device is a 2216 channel */
+	lcs_channel_type_2216,
+
+	/* Device is a OSA2 card */
+	lcs_channel_type_osa2
+};
+
 /**
  * CCW commands used in this driver
  */
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index c84eadd3602a..395c04c2b00f 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -741,13 +741,13 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
 	if (single_flag) {
 		if ((skb = skb_dequeue(&conn->commit_queue))) {
 			atomic_dec(&skb->users);
-			dev_kfree_skb_any(skb);
 			if (privptr) {
 				privptr->stats.tx_packets++;
 				privptr->stats.tx_bytes +=
 					(skb->len - NETIUCV_HDRLEN
-					 	  - NETIUCV_HDRLEN);
+						  - NETIUCV_HDRLEN);
 			}
+			dev_kfree_skb_any(skb);
 		}
 	}
 	conn->tx_buff->data = conn->tx_buff->head;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 31a2b4e502ce..b232693378cd 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -122,7 +122,6 @@ struct qeth_perf_stats {
 	__u64 outbound_do_qdio_start_time;
 	unsigned int outbound_do_qdio_cnt;
 	unsigned int outbound_do_qdio_time;
-	/* eddp data */
 	unsigned int large_send_bytes;
 	unsigned int large_send_cnt;
 	unsigned int sg_skbs_sent;
@@ -135,6 +134,7 @@ struct qeth_perf_stats {
 	unsigned int sg_frags_rx;
 	unsigned int sg_alloc_page_rx;
 	unsigned int tx_csum;
+	unsigned int tx_lin;
 };
 
 /* Routing stuff */
@@ -648,6 +648,7 @@ struct qeth_card_options {
 	enum qeth_large_send_types large_send;
 	int performance_stats;
 	int rx_sg_cb;
+	enum qeth_ipa_isolation_modes isolation;
 };
 
 /*
@@ -776,7 +777,6 @@ static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
 	list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
 }
 
-struct qeth_eddp_context;
 extern struct ccwgroup_driver qeth_l2_ccwgroup_driver;
 extern struct ccwgroup_driver qeth_l3_ccwgroup_driver;
 const char *qeth_get_cardname_short(struct qeth_card *);
@@ -836,7 +836,6 @@ void qeth_prepare_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, char);
 struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
 int qeth_mdio_read(struct net_device *, int, int);
 int qeth_snmp_command(struct qeth_card *, char __user *);
-int qeth_set_large_send(struct qeth_card *, enum qeth_large_send_types);
 struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *, __u32, __u32);
 int qeth_default_setadapterparms_cb(struct qeth_card *, struct qeth_reply *,
 					unsigned long);
@@ -849,13 +848,14 @@ int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
 			struct sk_buff *, struct qeth_hdr *, int, int, int);
 int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *,
 		    struct sk_buff *, struct qeth_hdr *, int);
-int qeth_core_get_stats_count(struct net_device *);
+int qeth_core_get_sset_count(struct net_device *, int);
 void qeth_core_get_ethtool_stats(struct net_device *,
 				struct ethtool_stats *, u64 *);
 void qeth_core_get_strings(struct net_device *, u32, u8 *);
 void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
 void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...);
 int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
+int qeth_set_access_ctrl_online(struct qeth_card *card);
 
 /* exports for OSN */
 int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index c4a42d970158..d34804d5ece1 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -270,41 +270,6 @@ int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
 	return qeth_alloc_buffer_pool(card);
 }
 
-int qeth_set_large_send(struct qeth_card *card,
-		enum qeth_large_send_types type)
-{
-	int rc = 0;
-
-	if (card->dev == NULL) {
-		card->options.large_send = type;
-		return 0;
-	}
-	if (card->state == CARD_STATE_UP)
-		netif_tx_disable(card->dev);
-	card->options.large_send = type;
-	switch (card->options.large_send) {
-	case QETH_LARGE_SEND_TSO:
-		if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
-			card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
-						NETIF_F_HW_CSUM;
-		} else {
-			card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
-						NETIF_F_HW_CSUM);
-			card->options.large_send = QETH_LARGE_SEND_NO;
-			rc = -EOPNOTSUPP;
-		}
-		break;
-	default: /* includes QETH_LARGE_SEND_NO */
-		card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
-					NETIF_F_HW_CSUM);
-		break;
-	}
-	if (card->state == CARD_STATE_UP)
-		netif_wake_queue(card->dev);
-	return rc;
-}
-EXPORT_SYMBOL_GPL(qeth_set_large_send);
-
 static int qeth_issue_next_read(struct qeth_card *card)
 {
 	int rc;
@@ -1079,6 +1044,7 @@ static void qeth_set_intial_options(struct qeth_card *card)
 	card->options.add_hhlen = DEFAULT_ADD_HHLEN;
 	card->options.performance_stats = 0;
 	card->options.rx_sg_cb = QETH_RX_SG_CB;
+	card->options.isolation = ISOLATION_MODE_NONE;
 }
 
 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
@@ -3389,6 +3355,156 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
 }
 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
 
+static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
+		struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd;
+	struct qeth_set_access_ctrl *access_ctrl_req;
+	int rc;
+
+	QETH_DBF_TEXT(TRACE, 4, "setaccb");
+
+	cmd = (struct qeth_ipa_cmd *) data;
+	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
+	QETH_DBF_TEXT_(SETUP, 2, "setaccb");
+	QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
+	QETH_DBF_TEXT_(SETUP, 2, "rc=%d",
+		cmd->data.setadapterparms.hdr.return_code);
+	switch (cmd->data.setadapterparms.hdr.return_code) {
+	case SET_ACCESS_CTRL_RC_SUCCESS:
+	case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
+	case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
+	{
+		card->options.isolation = access_ctrl_req->subcmd_code;
+		if (card->options.isolation == ISOLATION_MODE_NONE) {
+			dev_info(&card->gdev->dev,
+			    "QDIO data connection isolation is deactivated\n");
+		} else {
+			dev_info(&card->gdev->dev,
+			    "QDIO data connection isolation is activated\n");
+		}
+		QETH_DBF_MESSAGE(3, "OK:SET_ACCESS_CTRL(%s, %d)==%d\n",
+			card->gdev->dev.kobj.name,
+			access_ctrl_req->subcmd_code,
+			cmd->data.setadapterparms.hdr.return_code);
+		rc = 0;
+		break;
+	}
+	case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
+	{
+		QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n",
+			card->gdev->dev.kobj.name,
+			access_ctrl_req->subcmd_code,
+			cmd->data.setadapterparms.hdr.return_code);
+		dev_err(&card->gdev->dev, "Adapter does not "
+			"support QDIO data connection isolation\n");
+
+		/* ensure isolation mode is "none" */
+		card->options.isolation = ISOLATION_MODE_NONE;
+		rc = -EOPNOTSUPP;
+		break;
+	}
+	case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
+	{
+		QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n",
+			card->gdev->dev.kobj.name,
+			access_ctrl_req->subcmd_code,
+			cmd->data.setadapterparms.hdr.return_code);
+		dev_err(&card->gdev->dev,
+			"Adapter is dedicated. "
+			"QDIO data connection isolation not supported\n");
+
+		/* ensure isolation mode is "none" */
+		card->options.isolation = ISOLATION_MODE_NONE;
+		rc = -EOPNOTSUPP;
+		break;
+	}
+	case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
+	{
+		QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n",
+			card->gdev->dev.kobj.name,
+			access_ctrl_req->subcmd_code,
+			cmd->data.setadapterparms.hdr.return_code);
+		dev_err(&card->gdev->dev,
+			"TSO does not permit QDIO data connection isolation\n");
+
+		/* ensure isolation mode is "none" */
+		card->options.isolation = ISOLATION_MODE_NONE;
+		rc = -EPERM;
+		break;
+	}
+	default:
+	{
+		/* this should never happen */
+		QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d"
+			"==UNKNOWN\n",
+			card->gdev->dev.kobj.name,
+			access_ctrl_req->subcmd_code,
+			cmd->data.setadapterparms.hdr.return_code);
+
+		/* ensure isolation mode is "none" */
+		card->options.isolation = ISOLATION_MODE_NONE;
+		rc = 0;
+		break;
+	}
+	}
+	qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
+	return rc;
+}
+
+static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
+		enum qeth_ipa_isolation_modes isolation)
+{
+	int rc;
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+	struct qeth_set_access_ctrl *access_ctrl_req;
+
+	QETH_DBF_TEXT(TRACE, 4, "setacctl");
+
+	QETH_DBF_TEXT_(SETUP, 2, "setacctl");
+	QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
+
+	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
+				   sizeof(struct qeth_ipacmd_setadpparms_hdr) +
+				   sizeof(struct qeth_set_access_ctrl));
+	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
+	access_ctrl_req->subcmd_code = isolation;
+
+	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
+			       NULL);
+	QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc);
+	return rc;
+}
+
+int qeth_set_access_ctrl_online(struct qeth_card *card)
+{
+	int rc = 0;
+
+	QETH_DBF_TEXT(TRACE, 4, "setactlo");
+
+	if (card->info.type == QETH_CARD_TYPE_OSAE &&
+	    qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
+		rc = qeth_setadpparms_set_access_ctrl(card,
+			card->options.isolation);
+		if (rc) {
+			QETH_DBF_MESSAGE(3,
+				"IPA(SET_ACCESS_CTRL,%s,%d) sent failed",
+				card->gdev->dev.kobj.name,
+				rc);
+		}
+	} else if (card->options.isolation != ISOLATION_MODE_NONE) {
+		card->options.isolation = ISOLATION_MODE_NONE;
+
+		dev_err(&card->gdev->dev, "Adapter does not "
+			"support QDIO data connection isolation\n");
+		rc = -EOPNOTSUPP;
+	}
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online);
+
 void qeth_tx_timeout(struct net_device *dev)
 {
 	struct qeth_card *card;
@@ -3732,30 +3848,36 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev,
 int qeth_core_hardsetup_card(struct qeth_card *card)
 {
 	struct qdio_ssqd_desc *ssqd;
-	int retries = 3;
+	int retries = 0;
 	int mpno = 0;
 	int rc;
 
 	QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
 	atomic_set(&card->force_alloc_skb, 0);
 retry:
-	if (retries < 3) {
+	if (retries)
 		QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
 			dev_name(&card->gdev->dev));
-		ccw_device_set_offline(CARD_DDEV(card));
-		ccw_device_set_offline(CARD_WDEV(card));
-		ccw_device_set_offline(CARD_RDEV(card));
-		ccw_device_set_online(CARD_RDEV(card));
-		ccw_device_set_online(CARD_WDEV(card));
-		ccw_device_set_online(CARD_DDEV(card));
-	}
+	ccw_device_set_offline(CARD_DDEV(card));
+	ccw_device_set_offline(CARD_WDEV(card));
+	ccw_device_set_offline(CARD_RDEV(card));
+	rc = ccw_device_set_online(CARD_RDEV(card));
+	if (rc)
+		goto retriable;
+	rc = ccw_device_set_online(CARD_WDEV(card));
+	if (rc)
+		goto retriable;
+	rc = ccw_device_set_online(CARD_DDEV(card));
+	if (rc)
+		goto retriable;
 	rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
+retriable:
 	if (rc == -ERESTARTSYS) {
 		QETH_DBF_TEXT(SETUP, 2, "break1");
 		return rc;
 	} else if (rc) {
 		QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
-		if (--retries < 0)
+		if (++retries > 3)
 			goto out;
 		else
 			goto retry;
@@ -4303,13 +4425,19 @@ static struct {
 	{"tx do_QDIO time"},
 	{"tx do_QDIO count"},
 	{"tx csum"},
+	{"tx lin"},
 };
 
-int qeth_core_get_stats_count(struct net_device *dev)
+int qeth_core_get_sset_count(struct net_device *dev, int stringset)
 {
-	return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN);
+	switch (stringset) {
+	case ETH_SS_STATS:
+		return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN);
+	default:
+		return -EINVAL;
+	}
 }
-EXPORT_SYMBOL_GPL(qeth_core_get_stats_count);
+EXPORT_SYMBOL_GPL(qeth_core_get_sset_count);
 
 void qeth_core_get_ethtool_stats(struct net_device *dev,
 		struct ethtool_stats *stats, u64 *data)
@@ -4355,6 +4483,7 @@ void qeth_core_get_ethtool_stats(struct net_device *dev,
 	data[31] = card->perf_stats.outbound_do_qdio_time;
 	data[32] = card->perf_stats.outbound_do_qdio_cnt;
 	data[33] = card->perf_stats.tx_csum;
+	data[34] = card->perf_stats.tx_lin;
 }
 EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
 
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index eecb2ee62e85..52c03438dbec 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -234,18 +234,19 @@ enum qeth_ipa_setdelip_flags {
 
 /* SETADAPTER IPA Command: ****************************************************/
 enum qeth_ipa_setadp_cmd {
-	IPA_SETADP_QUERY_COMMANDS_SUPPORTED	= 0x0001,
-	IPA_SETADP_ALTER_MAC_ADDRESS		= 0x0002,
-	IPA_SETADP_ADD_DELETE_GROUP_ADDRESS	= 0x0004,
-	IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR	= 0x0008,
-	IPA_SETADP_SET_ADDRESSING_MODE		= 0x0010,
-	IPA_SETADP_SET_CONFIG_PARMS		= 0x0020,
-	IPA_SETADP_SET_CONFIG_PARMS_EXTENDED	= 0x0040,
-	IPA_SETADP_SET_BROADCAST_MODE		= 0x0080,
-	IPA_SETADP_SEND_OSA_MESSAGE		= 0x0100,
-	IPA_SETADP_SET_SNMP_CONTROL		= 0x0200,
-	IPA_SETADP_QUERY_CARD_INFO		= 0x0400,
-	IPA_SETADP_SET_PROMISC_MODE		= 0x0800,
+	IPA_SETADP_QUERY_COMMANDS_SUPPORTED	= 0x00000001L,
+	IPA_SETADP_ALTER_MAC_ADDRESS		= 0x00000002L,
+	IPA_SETADP_ADD_DELETE_GROUP_ADDRESS	= 0x00000004L,
+	IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR	= 0x00000008L,
+	IPA_SETADP_SET_ADDRESSING_MODE		= 0x00000010L,
+	IPA_SETADP_SET_CONFIG_PARMS		= 0x00000020L,
+	IPA_SETADP_SET_CONFIG_PARMS_EXTENDED	= 0x00000040L,
+	IPA_SETADP_SET_BROADCAST_MODE		= 0x00000080L,
+	IPA_SETADP_SEND_OSA_MESSAGE		= 0x00000100L,
+	IPA_SETADP_SET_SNMP_CONTROL		= 0x00000200L,
+	IPA_SETADP_QUERY_CARD_INFO		= 0x00000400L,
+	IPA_SETADP_SET_PROMISC_MODE		= 0x00000800L,
+	IPA_SETADP_SET_ACCESS_CONTROL		= 0x00010000L,
 };
 enum qeth_ipa_mac_ops {
 	CHANGE_ADDR_READ_MAC		= 0,
@@ -264,6 +265,20 @@ enum qeth_ipa_promisc_modes {
 	SET_PROMISC_MODE_OFF		= 0,
 	SET_PROMISC_MODE_ON		= 1,
 };
+enum qeth_ipa_isolation_modes {
+	ISOLATION_MODE_NONE		= 0x00000000L,
+	ISOLATION_MODE_FWD		= 0x00000001L,
+	ISOLATION_MODE_DROP		= 0x00000002L,
+};
+enum qeth_ipa_set_access_mode_rc {
+	SET_ACCESS_CTRL_RC_SUCCESS		= 0x0000,
+	SET_ACCESS_CTRL_RC_NOT_SUPPORTED	= 0x0004,
+	SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED	= 0x0008,
+	SET_ACCESS_CTRL_RC_ALREADY_ISOLATED	= 0x0010,
+	SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER	= 0x0014,
+	SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF	= 0x0018,
+};
+
 
 /* (SET)DELIP(M) IPA stuff ***************************************************/
 struct qeth_ipacmd_setdelip4 {
@@ -376,6 +391,11 @@ struct qeth_snmp_ureq {
 	struct qeth_snmp_cmd cmd;
 } __attribute__((packed));
 
+/* SET_ACCESS_CONTROL: same format for request and reply */
+struct qeth_set_access_ctrl {
+	__u32 subcmd_code;
+} __attribute__((packed));
+
 struct qeth_ipacmd_setadpparms_hdr {
 	__u32 supp_hw_cmds;
 	__u32 reserved1;
@@ -394,6 +414,7 @@ struct qeth_ipacmd_setadpparms {
 		struct qeth_query_cmds_supp query_cmds_supp;
 		struct qeth_change_addr change_addr;
 		struct qeth_snmp_cmd snmp;
+		struct qeth_set_access_ctrl set_access_ctrl;
 		__u32 mode;
 	} data;
 } __attribute__ ((packed));
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 33505c2a0e3a..9ff2b36fdc43 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -416,7 +416,11 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
 static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show,
 		   qeth_dev_layer2_store);
 
-static ssize_t qeth_dev_large_send_show(struct device *dev,
+#define ATTR_QETH_ISOLATION_NONE	("none")
+#define ATTR_QETH_ISOLATION_FWD		("forward")
+#define ATTR_QETH_ISOLATION_DROP	("drop")
+
+static ssize_t qeth_dev_isolation_show(struct device *dev,
 				struct device_attribute *attr, char *buf)
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
@@ -424,44 +428,69 @@ static ssize_t qeth_dev_large_send_show(struct device *dev,
 	if (!card)
 		return -EINVAL;
 
-	switch (card->options.large_send) {
-	case QETH_LARGE_SEND_NO:
-		return sprintf(buf, "%s\n", "no");
-	case QETH_LARGE_SEND_TSO:
-		return sprintf(buf, "%s\n", "TSO");
+	switch (card->options.isolation) {
+	case ISOLATION_MODE_NONE:
+		return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_NONE);
+	case ISOLATION_MODE_FWD:
+		return snprintf(buf, 9, "%s\n", ATTR_QETH_ISOLATION_FWD);
+	case ISOLATION_MODE_DROP:
+		return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_DROP);
 	default:
-		return sprintf(buf, "%s\n", "N/A");
+		return snprintf(buf, 5, "%s\n", "N/A");
 	}
 }
 
-static ssize_t qeth_dev_large_send_store(struct device *dev,
+static ssize_t qeth_dev_isolation_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t count)
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
-	enum qeth_large_send_types type;
+	enum qeth_ipa_isolation_modes isolation;
 	int rc = 0;
-	char *tmp;
+	char *tmp, *curtoken;
+	curtoken = (char *) buf;
 
-	if (!card)
-		return -EINVAL;
-	tmp = strsep((char **) &buf, "\n");
-	if (!strcmp(tmp, "no")) {
-		type = QETH_LARGE_SEND_NO;
-	} else if (!strcmp(tmp, "TSO")) {
-		type = QETH_LARGE_SEND_TSO;
+	if (!card) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/* check for unknown, too, in case we do not yet know who we are */
+	if (card->info.type != QETH_CARD_TYPE_OSAE &&
+	    card->info.type != QETH_CARD_TYPE_UNKNOWN) {
+		rc = -EOPNOTSUPP;
+		dev_err(&card->gdev->dev, "Adapter does not "
+			"support QDIO data connection isolation\n");
+		goto out;
+	}
+
+	/* parse input into isolation mode */
+	tmp = strsep(&curtoken, "\n");
+	if (!strcmp(tmp, ATTR_QETH_ISOLATION_NONE)) {
+		isolation = ISOLATION_MODE_NONE;
+	} else if (!strcmp(tmp, ATTR_QETH_ISOLATION_FWD)) {
+		isolation = ISOLATION_MODE_FWD;
+	} else if (!strcmp(tmp, ATTR_QETH_ISOLATION_DROP)) {
+		isolation = ISOLATION_MODE_DROP;
 	} else {
-		return -EINVAL;
+		rc = -EINVAL;
+		goto out;
 	}
-	if (card->options.large_send == type)
-		return count;
-	rc = qeth_set_large_send(card, type);
-	if (rc)
-		return rc;
-	return count;
+	rc = count;
+
+	/* defer IP assist if device is offline (until discipline->set_online)*/
+	card->options.isolation = isolation;
+	if (card->state == CARD_STATE_SOFTSETUP ||
+	    card->state == CARD_STATE_UP) {
+		int ipa_rc = qeth_set_access_ctrl_online(card);
+		if (ipa_rc != 0)
+			rc = ipa_rc;
+	}
+out:
+	return rc;
 }
 
-static DEVICE_ATTR(large_send, 0644, qeth_dev_large_send_show,
-		   qeth_dev_large_send_store);
+static DEVICE_ATTR(isolation, 0644, qeth_dev_isolation_show,
+		   qeth_dev_isolation_store);
 
 static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value)
 {
@@ -582,7 +611,7 @@ static struct attribute *qeth_device_attrs[] = {
 	&dev_attr_recover.attr,
 	&dev_attr_performance_stats.attr,
 	&dev_attr_layer2.attr,
-	&dev_attr_large_send.attr,
+	&dev_attr_isolation.attr,
 	NULL,
 };
 
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index f4f3ca1393b2..0b763396d5d1 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -866,7 +866,7 @@ static const struct ethtool_ops qeth_l2_ethtool_ops = {
 	.get_link = ethtool_op_get_link,
 	.get_strings = qeth_core_get_strings,
 	.get_ethtool_stats = qeth_core_get_ethtool_stats,
-	.get_stats_count = qeth_core_get_stats_count,
+	.get_sset_count = qeth_core_get_sset_count,
 	.get_drvinfo = qeth_core_get_drvinfo,
 	.get_settings = qeth_core_ethtool_get_settings,
 };
@@ -874,7 +874,7 @@ static const struct ethtool_ops qeth_l2_ethtool_ops = {
 static const struct ethtool_ops qeth_l2_osn_ops = {
 	.get_strings = qeth_core_get_strings,
 	.get_ethtool_stats = qeth_core_get_ethtool_stats,
-	.get_stats_count = qeth_core_get_stats_count,
+	.get_sset_count = qeth_core_get_sset_count,
 	.get_drvinfo = qeth_core_get_drvinfo,
 };
 
@@ -940,30 +940,17 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
 
 	qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
 	recover_flag = card->state;
-	rc = ccw_device_set_online(CARD_RDEV(card));
-	if (rc) {
-		QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
-		return -EIO;
-	}
-	rc = ccw_device_set_online(CARD_WDEV(card));
-	if (rc) {
-		QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
-		return -EIO;
-	}
-	rc = ccw_device_set_online(CARD_DDEV(card));
-	if (rc) {
-		QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
-		return -EIO;
-	}
-
 	rc = qeth_core_hardsetup_card(card);
 	if (rc) {
 		QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+		rc = -ENODEV;
 		goto out_remove;
 	}
 
-	if (!card->dev && qeth_l2_setup_netdev(card))
+	if (!card->dev && qeth_l2_setup_netdev(card)) {
+		rc = -ENODEV;
 		goto out_remove;
+	}
 
 	if (card->info.type != QETH_CARD_TYPE_OSN)
 		qeth_l2_send_setmac(card, &card->dev->dev_addr[0]);
@@ -983,12 +970,14 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
 			card->lan_online = 0;
 			return 0;
 		}
+		rc = -ENODEV;
 		goto out_remove;
 	} else
 		card->lan_online = 1;
 
 	if (card->info.type != QETH_CARD_TYPE_OSN) {
-		qeth_set_large_send(card, card->options.large_send);
+		/* configure isolation level */
+		qeth_set_access_ctrl_online(card);
 		qeth_l2_process_vlans(card, 0);
 	}
 
@@ -997,6 +986,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
 	rc = qeth_init_qdio_queues(card);
 	if (rc) {
 		QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+		rc = -ENODEV;
 		goto out_remove;
 	}
 	card->state = CARD_STATE_SOFTSETUP;
@@ -1018,6 +1008,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
 	/* let user_space know that device is online */
 	kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
 	return 0;
+
 out_remove:
 	card->use_hard_stop = 1;
 	qeth_l2_stop_card(card, 0);
@@ -1028,7 +1019,7 @@ out_remove:
 		card->state = CARD_STATE_RECOVER;
 	else
 		card->state = CARD_STATE_DOWN;
-	return -ENODEV;
+	return rc;
 }
 
 static int qeth_l2_set_online(struct ccwgroup_device *gdev)
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 9f143c83bba3..321988fa9f7d 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -60,5 +60,7 @@ void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
 int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
 void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
 			const u8 *);
+int qeth_l3_set_large_send(struct qeth_card *, enum qeth_large_send_types);
+int qeth_l3_set_rx_csum(struct qeth_card *, enum qeth_checksum_types);
 
 #endif /* __QETH_L3_H__ */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 073b6d354915..fd1b6ed3721f 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -41,6 +41,32 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *,
 static int __qeth_l3_set_online(struct ccwgroup_device *, int);
 static int __qeth_l3_set_offline(struct ccwgroup_device *, int);
 
+int qeth_l3_set_large_send(struct qeth_card *card,
+		enum qeth_large_send_types type)
+{
+	int rc = 0;
+
+	card->options.large_send = type;
+	if (card->dev == NULL)
+		return 0;
+
+	if (card->options.large_send == QETH_LARGE_SEND_TSO) {
+		if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
+			card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
+					NETIF_F_HW_CSUM;
+		} else {
+			card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
+					NETIF_F_HW_CSUM);
+			card->options.large_send = QETH_LARGE_SEND_NO;
+			rc = -EOPNOTSUPP;
+		}
+	} else {
+		card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
+					NETIF_F_HW_CSUM);
+		card->options.large_send = QETH_LARGE_SEND_NO;
+	}
+	return rc;
+}
 
 static int qeth_l3_isxdigit(char *buf)
 {
@@ -1439,6 +1465,35 @@ static int qeth_l3_send_checksum_command(struct qeth_card *card)
 	return 0;
 }
 
+int qeth_l3_set_rx_csum(struct qeth_card *card,
+	enum qeth_checksum_types csum_type)
+{
+	int rc = 0;
+
+	if (card->options.checksum_type == HW_CHECKSUMMING) {
+		if ((csum_type != HW_CHECKSUMMING) &&
+			(card->state != CARD_STATE_DOWN)) {
+			rc = qeth_l3_send_simple_setassparms(card,
+				IPA_INBOUND_CHECKSUM, IPA_CMD_ASS_STOP, 0);
+			if (rc)
+				return -EIO;
+		}
+	} else {
+		if (csum_type == HW_CHECKSUMMING) {
+			if (card->state != CARD_STATE_DOWN) {
+				if (!qeth_is_supported(card,
+				    IPA_INBOUND_CHECKSUM))
+					return -EPERM;
+				rc = qeth_l3_send_checksum_command(card);
+				if (rc)
+					return -EIO;
+			}
+		}
+	}
+	card->options.checksum_type = csum_type;
+	return rc;
+}
+
 static int qeth_l3_start_ipa_checksum(struct qeth_card *card)
 {
 	int rc = 0;
@@ -1506,6 +1561,8 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card)
 static int qeth_l3_start_ipassists(struct qeth_card *card)
 {
 	QETH_DBF_TEXT(TRACE, 3, "strtipas");
+
+	qeth_set_access_ctrl_online(card);	/* go on*/
 	qeth_l3_start_ipa_arp_processing(card);	/* go on*/
 	qeth_l3_start_ipa_ip_fragmentation(card);	/* go on*/
 	qeth_l3_start_ipa_source_mac(card);	/* go on*/
@@ -2684,6 +2741,24 @@ static void qeth_tx_csum(struct sk_buff *skb)
 	*(__sum16 *)(skb->data + offset) = csum_fold(csum);
 }
 
+static inline int qeth_l3_tso_elements(struct sk_buff *skb)
+{
+	unsigned long tcpd = (unsigned long)tcp_hdr(skb) +
+		tcp_hdr(skb)->doff * 4;
+	int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data);
+	int elements = PFN_UP(tcpd + tcpd_len) - PFN_DOWN(tcpd);
+	elements += skb_shinfo(skb)->nr_frags;
+	return elements;
+}
+
+static inline int qeth_l3_tso_check(struct sk_buff *skb)
+{
+	int len = ((unsigned long)tcp_hdr(skb) + tcp_hdr(skb)->doff * 4) -
+		(unsigned long)skb->data;
+	return (((unsigned long)skb->data & PAGE_MASK) !=
+		(((unsigned long)skb->data + len) & PAGE_MASK));
+}
+
 static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	int rc;
@@ -2777,16 +2852,21 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	/* fix hardware limitation: as long as we do not have sbal
 	 * chaining we can not send long frag lists
 	 */
-	if ((large_send == QETH_LARGE_SEND_TSO) &&
-	    ((skb_shinfo(new_skb)->nr_frags + 2) > 16)) {
-		if (skb_linearize(new_skb))
-			goto tx_drop;
+	if (large_send == QETH_LARGE_SEND_TSO) {
+		if (qeth_l3_tso_elements(new_skb) + 1 > 16) {
+			if (skb_linearize(new_skb))
+				goto tx_drop;
+			if (card->options.performance_stats)
+				card->perf_stats.tx_lin++;
+		}
 	}
 
 	if ((large_send == QETH_LARGE_SEND_TSO) &&
 	    (cast_type == RTN_UNSPEC)) {
 		hdr = (struct qeth_hdr *)skb_push(new_skb,
 						sizeof(struct qeth_hdr_tso));
+		if (qeth_l3_tso_check(new_skb))
+			QETH_DBF_MESSAGE(2, "tso skb misaligned\n");
 		memset(hdr, 0, sizeof(struct qeth_hdr_tso));
 		qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
 		qeth_tso_fill_header(card, hdr, new_skb);
@@ -2903,46 +2983,28 @@ static u32 qeth_l3_ethtool_get_rx_csum(struct net_device *dev)
 static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data)
 {
 	struct qeth_card *card = dev->ml_priv;
-	enum qeth_card_states old_state;
 	enum qeth_checksum_types csum_type;
 
-	if ((card->state != CARD_STATE_UP) &&
-	    (card->state != CARD_STATE_DOWN))
-		return -EPERM;
-
 	if (data)
 		csum_type = HW_CHECKSUMMING;
 	else
 		csum_type = SW_CHECKSUMMING;
 
-	if (card->options.checksum_type != csum_type) {
-		old_state = card->state;
-		if (card->state == CARD_STATE_UP)
-			__qeth_l3_set_offline(card->gdev, 1);
-		card->options.checksum_type = csum_type;
-		if (old_state == CARD_STATE_UP)
-			__qeth_l3_set_online(card->gdev, 1);
-	}
-	return 0;
+	return qeth_l3_set_rx_csum(card, csum_type);
 }
 
 static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data)
 {
 	struct qeth_card *card = dev->ml_priv;
+	int rc = 0;
 
 	if (data) {
-		if (card->options.large_send == QETH_LARGE_SEND_NO) {
-			if (card->info.type == QETH_CARD_TYPE_IQD)
-				return -EPERM;
-			else
-				card->options.large_send = QETH_LARGE_SEND_TSO;
-			dev->features |= NETIF_F_TSO;
-		}
+		rc = qeth_l3_set_large_send(card, QETH_LARGE_SEND_TSO);
 	} else {
 		dev->features &= ~NETIF_F_TSO;
 		card->options.large_send = QETH_LARGE_SEND_NO;
 	}
-	return 0;
+	return rc;
 }
 
 static const struct ethtool_ops qeth_l3_ethtool_ops = {
@@ -2957,7 +3019,7 @@ static const struct ethtool_ops qeth_l3_ethtool_ops = {
 	.set_tso     = qeth_l3_ethtool_set_tso,
 	.get_strings = qeth_core_get_strings,
 	.get_ethtool_stats = qeth_core_get_ethtool_stats,
-	.get_stats_count = qeth_core_get_stats_count,
+	.get_sset_count = qeth_core_get_sset_count,
 	.get_drvinfo = qeth_core_get_drvinfo,
 	.get_settings = qeth_core_ethtool_get_settings,
 };
@@ -3058,6 +3120,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
 				NETIF_F_HW_VLAN_RX |
 				NETIF_F_HW_VLAN_FILTER;
 	card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+	card->dev->gso_max_size = 15 * PAGE_SIZE;
 
 	SET_NETDEV_DEV(card->dev, &card->gdev->dev);
 	return register_netdev(card->dev);
@@ -3154,32 +3217,19 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
 	qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
 
 	recover_flag = card->state;
-	rc = ccw_device_set_online(CARD_RDEV(card));
-	if (rc) {
-		QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
-		return -EIO;
-	}
-	rc = ccw_device_set_online(CARD_WDEV(card));
-	if (rc) {
-		QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
-		return -EIO;
-	}
-	rc = ccw_device_set_online(CARD_DDEV(card));
-	if (rc) {
-		QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
-		return -EIO;
-	}
-
 	rc = qeth_core_hardsetup_card(card);
 	if (rc) {
 		QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+		rc = -ENODEV;
 		goto out_remove;
 	}
 
 	qeth_l3_query_ipassists(card, QETH_PROT_IPV4);
 
-	if (!card->dev && qeth_l3_setup_netdev(card))
+	if (!card->dev && qeth_l3_setup_netdev(card)) {
+		rc = -ENODEV;
 		goto out_remove;
+	}
 
 	card->state = CARD_STATE_HARDSETUP;
 	qeth_print_status_message(card);
@@ -3196,10 +3246,11 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
 			card->lan_online = 0;
 			return 0;
 		}
+		rc = -ENODEV;
 		goto out_remove;
 	} else
 		card->lan_online = 1;
-	qeth_set_large_send(card, card->options.large_send);
+	qeth_l3_set_large_send(card, card->options.large_send);
 
 	rc = qeth_l3_setadapter_parms(card);
 	if (rc)
@@ -3218,6 +3269,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
 	rc = qeth_init_qdio_queues(card);
 	if (rc) {
 		QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+		rc = -ENODEV;
 		goto out_remove;
 	}
 	card->state = CARD_STATE_SOFTSETUP;
@@ -3248,7 +3300,7 @@ out_remove:
 		card->state = CARD_STATE_RECOVER;
 	else
 		card->state = CARD_STATE_DOWN;
-	return -ENODEV;
+	return rc;
 }
 
 static int qeth_l3_set_online(struct ccwgroup_device *gdev)
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index c144b9924d52..3360b0941aa1 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -293,31 +293,79 @@ static ssize_t qeth_l3_dev_checksum_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t count)
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
+	enum qeth_checksum_types csum_type;
 	char *tmp;
+	int rc;
 
 	if (!card)
 		return -EINVAL;
 
-	if ((card->state != CARD_STATE_DOWN) &&
-	    (card->state != CARD_STATE_RECOVER))
-		return -EPERM;
-
 	tmp = strsep((char **) &buf, "\n");
 	if (!strcmp(tmp, "sw_checksumming"))
-		card->options.checksum_type = SW_CHECKSUMMING;
+		csum_type = SW_CHECKSUMMING;
 	else if (!strcmp(tmp, "hw_checksumming"))
-		card->options.checksum_type = HW_CHECKSUMMING;
+		csum_type = HW_CHECKSUMMING;
 	else if (!strcmp(tmp, "no_checksumming"))
-		card->options.checksum_type = NO_CHECKSUMMING;
-	else {
+		csum_type = NO_CHECKSUMMING;
+	else
 		return -EINVAL;
-	}
+
+	rc = qeth_l3_set_rx_csum(card, csum_type);
+	if (rc)
+		return rc;
 	return count;
 }
 
 static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show,
 		qeth_l3_dev_checksum_store);
 
+static ssize_t qeth_l3_dev_large_send_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	switch (card->options.large_send) {
+	case QETH_LARGE_SEND_NO:
+		return sprintf(buf, "%s\n", "no");
+	case QETH_LARGE_SEND_TSO:
+		return sprintf(buf, "%s\n", "TSO");
+	default:
+		return sprintf(buf, "%s\n", "N/A");
+	}
+}
+
+static ssize_t qeth_l3_dev_large_send_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	enum qeth_large_send_types type;
+	int rc = 0;
+	char *tmp;
+
+	if (!card)
+		return -EINVAL;
+	tmp = strsep((char **) &buf, "\n");
+	if (!strcmp(tmp, "no"))
+		type = QETH_LARGE_SEND_NO;
+	else if (!strcmp(tmp, "TSO"))
+		type = QETH_LARGE_SEND_TSO;
+	else
+		return -EINVAL;
+
+	if (card->options.large_send == type)
+		return count;
+	rc = qeth_l3_set_large_send(card, type);
+	if (rc)
+		return rc;
+	return count;
+}
+
+static DEVICE_ATTR(large_send, 0644, qeth_l3_dev_large_send_show,
+		   qeth_l3_dev_large_send_store);
+
 static struct attribute *qeth_l3_device_attrs[] = {
 	&dev_attr_route4.attr,
 	&dev_attr_route6.attr,
@@ -325,6 +373,7 @@ static struct attribute *qeth_l3_device_attrs[] = {
 	&dev_attr_broadcast_mode.attr,
 	&dev_attr_canonical_macaddr.attr,
 	&dev_attr_checksumming.attr,
+	&dev_attr_large_send.attr,
 	NULL,
 };
 
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 538c570df337..f1dcd7969a5c 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -551,13 +551,13 @@ int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc,
 	might_sleep_if(pdev->id.coreid != SSB_DEV_PCI);
 
 	/* Enable interrupts for this device. */
-	if (bus->host_pci &&
-	    ((pdev->id.revision >= 6) || (pdev->id.coreid == SSB_DEV_PCIE))) {
+	if ((pdev->id.revision >= 6) || (pdev->id.coreid == SSB_DEV_PCIE)) {
 		u32 coremask;
 
 		/* Calculate the "coremask" for the device. */
 		coremask = (1 << dev->core_index);
 
+		SSB_WARN_ON(bus->bustype != SSB_BUSTYPE_PCI);
 		err = pci_read_config_dword(bus->host_pci, SSB_PCI_IRQMASK, &tmp);
 		if (err)
 			goto out;
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 579b114be412..5681ebed9c65 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -140,6 +140,19 @@ static void ssb_device_put(struct ssb_device *dev)
 		put_device(dev->dev);
 }
 
+static inline struct ssb_driver *ssb_driver_get(struct ssb_driver *drv)
+{
+	if (drv)
+		get_driver(&drv->drv);
+	return drv;
+}
+
+static inline void ssb_driver_put(struct ssb_driver *drv)
+{
+	if (drv)
+		put_driver(&drv->drv);
+}
+
 static int ssb_device_resume(struct device *dev)
 {
 	struct ssb_device *ssb_dev = dev_to_ssb_dev(dev);
@@ -210,90 +223,81 @@ int ssb_bus_suspend(struct ssb_bus *bus)
 EXPORT_SYMBOL(ssb_bus_suspend);
 
 #ifdef CONFIG_SSB_SPROM
-int ssb_devices_freeze(struct ssb_bus *bus)
+/** ssb_devices_freeze - Freeze all devices on the bus.
+ *
+ * After freezing no device driver will be handling a device
+ * on this bus anymore. ssb_devices_thaw() must be called after
+ * a successful freeze to reactivate the devices.
+ *
+ * @bus: The bus.
+ * @ctx: Context structure. Pass this to ssb_devices_thaw().
+ */
+int ssb_devices_freeze(struct ssb_bus *bus, struct ssb_freeze_context *ctx)
 {
-	struct ssb_device *dev;
-	struct ssb_driver *drv;
-	int err = 0;
-	int i;
-	pm_message_t state = PMSG_FREEZE;
+	struct ssb_device *sdev;
+	struct ssb_driver *sdrv;
+	unsigned int i;
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->bus = bus;
+	SSB_WARN_ON(bus->nr_devices > ARRAY_SIZE(ctx->device_frozen));
 
-	/* First check that we are capable to freeze all devices. */
 	for (i = 0; i < bus->nr_devices; i++) {
-		dev = &(bus->devices[i]);
-		if (!dev->dev ||
-		    !dev->dev->driver ||
-		    !device_is_registered(dev->dev))
-			continue;
-		drv = drv_to_ssb_drv(dev->dev->driver);
-		if (!drv)
+		sdev = ssb_device_get(&bus->devices[i]);
+
+		if (!sdev->dev || !sdev->dev->driver ||
+		    !device_is_registered(sdev->dev)) {
+			ssb_device_put(sdev);
 			continue;
-		if (!drv->suspend) {
-			/* Nope, can't suspend this one. */
-			return -EOPNOTSUPP;
 		}
-	}
-	/* Now suspend all devices */
-	for (i = 0; i < bus->nr_devices; i++) {
-		dev = &(bus->devices[i]);
-		if (!dev->dev ||
-		    !dev->dev->driver ||
-		    !device_is_registered(dev->dev))
-			continue;
-		drv = drv_to_ssb_drv(dev->dev->driver);
-		if (!drv)
+		sdrv = ssb_driver_get(drv_to_ssb_drv(sdev->dev->driver));
+		if (!sdrv || SSB_WARN_ON(!sdrv->remove)) {
+			ssb_device_put(sdev);
 			continue;
-		err = drv->suspend(dev, state);
-		if (err) {
-			ssb_printk(KERN_ERR PFX "Failed to freeze device %s\n",
-				   dev_name(dev->dev));
-			goto err_unwind;
 		}
+		sdrv->remove(sdev);
+		ctx->device_frozen[i] = 1;
 	}
 
 	return 0;
-err_unwind:
-	for (i--; i >= 0; i--) {
-		dev = &(bus->devices[i]);
-		if (!dev->dev ||
-		    !dev->dev->driver ||
-		    !device_is_registered(dev->dev))
-			continue;
-		drv = drv_to_ssb_drv(dev->dev->driver);
-		if (!drv)
-			continue;
-		if (drv->resume)
-			drv->resume(dev);
-	}
-	return err;
 }
 
-int ssb_devices_thaw(struct ssb_bus *bus)
+/** ssb_devices_thaw - Unfreeze all devices on the bus.
+ *
+ * This will re-attach the device drivers and re-init the devices.
+ *
+ * @ctx: The context structure from ssb_devices_freeze()
+ */
+int ssb_devices_thaw(struct ssb_freeze_context *ctx)
 {
-	struct ssb_device *dev;
-	struct ssb_driver *drv;
-	int err;
-	int i;
+	struct ssb_bus *bus = ctx->bus;
+	struct ssb_device *sdev;
+	struct ssb_driver *sdrv;
+	unsigned int i;
+	int err, result = 0;
 
 	for (i = 0; i < bus->nr_devices; i++) {
-		dev = &(bus->devices[i]);
-		if (!dev->dev ||
-		    !dev->dev->driver ||
-		    !device_is_registered(dev->dev))
+		if (!ctx->device_frozen[i])
 			continue;
-		drv = drv_to_ssb_drv(dev->dev->driver);
-		if (!drv)
+		sdev = &bus->devices[i];
+
+		if (SSB_WARN_ON(!sdev->dev || !sdev->dev->driver))
 			continue;
-		if (SSB_WARN_ON(!drv->resume))
+		sdrv = drv_to_ssb_drv(sdev->dev->driver);
+		if (SSB_WARN_ON(!sdrv || !sdrv->probe))
 			continue;
-		err = drv->resume(dev);
+
+		err = sdrv->probe(sdev, &sdev->id);
 		if (err) {
 			ssb_printk(KERN_ERR PFX "Failed to thaw device %s\n",
-				   dev_name(dev->dev));
+				   dev_name(sdev->dev));
+			result = err;
 		}
+		ssb_driver_put(sdrv);
+		ssb_device_put(sdev);
 	}
 
-	return 0;
+	return result;
 }
 #endif /* CONFIG_SSB_SPROM */
 
diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
index e8b89e8ac9bd..0d6c0280eb34 100644
--- a/drivers/ssb/scan.c
+++ b/drivers/ssb/scan.c
@@ -354,7 +354,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
 		dev->bus = bus;
 		dev->ops = bus->ops;
 
-		ssb_dprintk(KERN_INFO PFX
+		printk(KERN_DEBUG PFX
 			    "Core %d found: %s "
 			    "(cc 0x%03X, rev 0x%02X, vendor 0x%04X)\n",
 			    i, ssb_core_name(dev->id.coreid),
diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c
index 8943015a3eef..d0e6762fec50 100644
--- a/drivers/ssb/sprom.c
+++ b/drivers/ssb/sprom.c
@@ -13,6 +13,8 @@
 
 #include "ssb_private.h"
 
+#include <linux/ctype.h>
+
 
 static const struct ssb_sprom *fallback_sprom;
 
@@ -33,17 +35,27 @@ static int sprom2hex(const u16 *sprom, char *buf, size_t buf_len,
 static int hex2sprom(u16 *sprom, const char *dump, size_t len,
 		     size_t sprom_size_words)
 {
-	char tmp[5] = { 0 };
-	int cnt = 0;
+	char c, tmp[5] = { 0 };
+	int err, cnt = 0;
 	unsigned long parsed;
 
-	if (len < sprom_size_words * 2)
+	/* Strip whitespace at the end. */
+	while (len) {
+		c = dump[len - 1];
+		if (!isspace(c) && c != '\0')
+			break;
+		len--;
+	}
+	/* Length must match exactly. */
+	if (len != sprom_size_words * 4)
 		return -EINVAL;
 
 	while (cnt < sprom_size_words) {
 		memcpy(tmp, dump, 4);
 		dump += 4;
-		parsed = simple_strtoul(tmp, NULL, 16);
+		err = strict_strtoul(tmp, 16, &parsed);
+		if (err)
+			return err;
 		sprom[cnt++] = swab16((u16)parsed);
 	}
 
@@ -90,6 +102,7 @@ ssize_t ssb_attr_sprom_store(struct ssb_bus *bus,
 	u16 *sprom;
 	int res = 0, err = -ENOMEM;
 	size_t sprom_size_words = bus->sprom_size;
+	struct ssb_freeze_context freeze;
 
 	sprom = kcalloc(bus->sprom_size, sizeof(u16), GFP_KERNEL);
 	if (!sprom)
@@ -111,18 +124,13 @@ ssize_t ssb_attr_sprom_store(struct ssb_bus *bus,
 	err = -ERESTARTSYS;
 	if (mutex_lock_interruptible(&bus->sprom_mutex))
 		goto out_kfree;
-	err = ssb_devices_freeze(bus);
-	if (err == -EOPNOTSUPP) {
-		ssb_printk(KERN_ERR PFX "SPROM write: Could not freeze devices. "
-			   "No suspend support. Is CONFIG_PM enabled?\n");
-		goto out_unlock;
-	}
+	err = ssb_devices_freeze(bus, &freeze);
 	if (err) {
 		ssb_printk(KERN_ERR PFX "SPROM write: Could not freeze all devices\n");
 		goto out_unlock;
 	}
 	res = sprom_write(bus, sprom);
-	err = ssb_devices_thaw(bus);
+	err = ssb_devices_thaw(&freeze);
 	if (err)
 		ssb_printk(KERN_ERR PFX "SPROM write: Could not thaw all devices\n");
 out_unlock:
diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h
index 25433565dfda..56054be4d113 100644
--- a/drivers/ssb/ssb_private.h
+++ b/drivers/ssb/ssb_private.h
@@ -176,13 +176,21 @@ extern const struct ssb_sprom *ssb_get_fallback_sprom(void);
 
 /* core.c */
 extern u32 ssb_calc_clock_rate(u32 plltype, u32 n, u32 m);
-extern int ssb_devices_freeze(struct ssb_bus *bus);
-extern int ssb_devices_thaw(struct ssb_bus *bus);
 extern struct ssb_bus *ssb_pci_dev_to_bus(struct pci_dev *pdev);
 int ssb_for_each_bus_call(unsigned long data,
 			  int (*func)(struct ssb_bus *bus, unsigned long data));
 extern struct ssb_bus *ssb_pcmcia_dev_to_bus(struct pcmcia_device *pdev);
 
+struct ssb_freeze_context {
+	/* Pointer to the bus */
+	struct ssb_bus *bus;
+	/* Boolean list to indicate whether a device is frozen on this bus. */
+	bool device_frozen[SSB_MAX_NR_CORES];
+};
+extern int ssb_devices_freeze(struct ssb_bus *bus, struct ssb_freeze_context *ctx);
+extern int ssb_devices_thaw(struct ssb_freeze_context *ctx);
+
+
 
 /* b43_pci_bridge.c */
 #ifdef CONFIG_SSB_B43_PCI_BRIDGE
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index d21b3469f6d7..dfcd75cf4907 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -125,5 +125,13 @@ source "drivers/staging/sep/Kconfig"
 
 source "drivers/staging/iio/Kconfig"
 
+source "drivers/staging/strip/Kconfig"
+
+source "drivers/staging/arlan/Kconfig"
+
+source "drivers/staging/wavelan/Kconfig"
+
+source "drivers/staging/netwave/Kconfig"
+
 endif # !STAGING_EXCLUDE_BUILD
 endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 8cbf1aebea2e..7719d04a4a86 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -44,3 +44,8 @@ obj-$(CONFIG_VME_BUS)		+= vme/
 obj-$(CONFIG_RAR_REGISTER)	+= rar/
 obj-$(CONFIG_DX_SEP)		+= sep/
 obj-$(CONFIG_IIO)		+= iio/
+obj-$(CONFIG_STRIP)		+= strip/
+obj-$(CONFIG_ARLAN)		+= arlan/
+obj-$(CONFIG_WAVELAN)		+= wavelan/
+obj-$(CONFIG_PCMCIA_WAVELAN)	+= wavelan/
+obj-$(CONFIG_PCMCIA_NETWAVE)	+= netwave/
diff --git a/drivers/staging/arlan/Kconfig b/drivers/staging/arlan/Kconfig
new file mode 100644
index 000000000000..5e42b81f97b0
--- /dev/null
+++ b/drivers/staging/arlan/Kconfig
@@ -0,0 +1,15 @@
+config ARLAN
+	tristate "Aironet Arlan 655 & IC2200 DS support"
+	depends on ISA && !64BIT && WLAN
+	select WIRELESS_EXT
+	---help---
+	  Aironet makes Arlan, a class of wireless LAN adapters. These use the
+	  www.Telxon.com chip, which is also used on several similar cards.
+	  This driver is tested on the 655 and IC2200 series cards. Look at
+	  <http://www.ylenurme.ee/~elmer/655/> for the latest information.
+
+	  The driver is built as two modules, arlan and arlan-proc. The latter
+	  is the /proc interface and is not needed most of time.
+
+	  On some computers the card ends up in non-valid state after some
+	  time. Use a ping-reset script to clear it.
diff --git a/drivers/staging/arlan/Makefile b/drivers/staging/arlan/Makefile
new file mode 100644
index 000000000000..9e58e5fae7b9
--- /dev/null
+++ b/drivers/staging/arlan/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_ARLAN) += arlan.o 
+
+arlan-objs := arlan-main.o arlan-proc.o
diff --git a/drivers/staging/arlan/TODO b/drivers/staging/arlan/TODO
new file mode 100644
index 000000000000..9bd15a2f6d9e
--- /dev/null
+++ b/drivers/staging/arlan/TODO
@@ -0,0 +1,7 @@
+TODO:
+	- step up and maintain this driver to ensure that it continues
+	  to work.  Having the hardware for this is pretty much a
+	  requirement.  If this does not happen, the will be removed in
+	  the 2.6.35 kernel release.
+
+Please send patches to Greg Kroah-Hartman <greg@kroah.com>.
diff --git a/drivers/net/wireless/arlan-main.c b/drivers/staging/arlan/arlan-main.c
index 921a082487a1..921a082487a1 100644
--- a/drivers/net/wireless/arlan-main.c
+++ b/drivers/staging/arlan/arlan-main.c
diff --git a/drivers/net/wireless/arlan-proc.c b/drivers/staging/arlan/arlan-proc.c
index b22983e6c0cf..b22983e6c0cf 100644
--- a/drivers/net/wireless/arlan-proc.c
+++ b/drivers/staging/arlan/arlan-proc.c
diff --git a/drivers/net/wireless/arlan.h b/drivers/staging/arlan/arlan.h
index fb3ad51a1caf..fb3ad51a1caf 100644
--- a/drivers/net/wireless/arlan.h
+++ b/drivers/staging/arlan/arlan.h
diff --git a/drivers/staging/netwave/Kconfig b/drivers/staging/netwave/Kconfig
new file mode 100644
index 000000000000..8033e8171f9e
--- /dev/null
+++ b/drivers/staging/netwave/Kconfig
@@ -0,0 +1,11 @@
+config PCMCIA_NETWAVE
+	tristate "Xircom Netwave AirSurfer Pcmcia wireless support"
+	depends on PCMCIA && WLAN
+	select WIRELESS_EXT
+	select WEXT_PRIV
+	help
+	  Say Y here if you intend to attach this type of PCMCIA (PC-card)
+	  wireless Ethernet networking card to your computer.
+
+	  To compile this driver as a module, choose M here: the module will be
+	  called netwave_cs.  If unsure, say N.
diff --git a/drivers/staging/netwave/Makefile b/drivers/staging/netwave/Makefile
new file mode 100644
index 000000000000..2ab89de59b9b
--- /dev/null
+++ b/drivers/staging/netwave/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_PCMCIA_NETWAVE)	+= netwave_cs.o
diff --git a/drivers/staging/netwave/TODO b/drivers/staging/netwave/TODO
new file mode 100644
index 000000000000..9bd15a2f6d9e
--- /dev/null
+++ b/drivers/staging/netwave/TODO
@@ -0,0 +1,7 @@
+TODO:
+	- step up and maintain this driver to ensure that it continues
+	  to work.  Having the hardware for this is pretty much a
+	  requirement.  If this does not happen, the will be removed in
+	  the 2.6.35 kernel release.
+
+Please send patches to Greg Kroah-Hartman <greg@kroah.com>.
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/staging/netwave/netwave_cs.c
index e61e6b9440ab..e61e6b9440ab 100644
--- a/drivers/net/wireless/netwave_cs.c
+++ b/drivers/staging/netwave/netwave_cs.c
diff --git a/drivers/staging/rtl8187se/Kconfig b/drivers/staging/rtl8187se/Kconfig
index 203c79b8180f..3211dd3765a0 100644
--- a/drivers/staging/rtl8187se/Kconfig
+++ b/drivers/staging/rtl8187se/Kconfig
@@ -1,6 +1,7 @@
 config RTL8187SE
 	tristate "RealTek RTL8187SE Wireless LAN NIC driver"
 	depends on PCI && WLAN
-	depends on WIRELESS_EXT
+	select WIRELESS_EXT
+	select WEXT_PRIV
 	default N
 	---help---
diff --git a/drivers/staging/rtl8192e/Kconfig b/drivers/staging/rtl8192e/Kconfig
index 37e4fde45073..2ae3745f775f 100644
--- a/drivers/staging/rtl8192e/Kconfig
+++ b/drivers/staging/rtl8192e/Kconfig
@@ -1,6 +1,7 @@
 config RTL8192E
 	tristate "RealTek RTL8192E Wireless LAN NIC driver"
 	depends on PCI && WLAN
-	depends on WIRELESS_EXT
+	select WIRELESS_EXT
+	select WEXT_PRIV
 	default N
 	---help---
diff --git a/drivers/staging/strip/Kconfig b/drivers/staging/strip/Kconfig
new file mode 100644
index 000000000000..36257b5cd6e1
--- /dev/null
+++ b/drivers/staging/strip/Kconfig
@@ -0,0 +1,22 @@
+config STRIP
+	tristate "STRIP (Metricom starmode radio IP)"
+	depends on INET
+	select WIRELESS_EXT
+	---help---
+	  Say Y if you have a Metricom radio and intend to use Starmode Radio
+	  IP. STRIP is a radio protocol developed for the MosquitoNet project
+	  to send Internet traffic using Metricom radios.  Metricom radios are
+	  small, battery powered, 100kbit/sec packet radio transceivers, about
+	  the size and weight of a cellular telephone. (You may also have heard
+	  them called "Metricom modems" but we avoid the term "modem" because
+	  it misleads many people into thinking that you can plug a Metricom
+	  modem into a phone line and use it as a modem.)
+
+	  You can use STRIP on any Linux machine with a serial port, although
+	  it is obviously most useful for people with laptop computers. If you
+	  think you might get a Metricom radio in the future, there is no harm
+	  in saying Y to STRIP now, except that it makes the kernel a bit
+	  bigger.
+
+	  To compile this as a module, choose M here: the module will be
+	  called strip.
diff --git a/drivers/staging/strip/Makefile b/drivers/staging/strip/Makefile
new file mode 100644
index 000000000000..6417bdcac2fb
--- /dev/null
+++ b/drivers/staging/strip/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_STRIP) += strip.o
diff --git a/drivers/staging/strip/TODO b/drivers/staging/strip/TODO
new file mode 100644
index 000000000000..9bd15a2f6d9e
--- /dev/null
+++ b/drivers/staging/strip/TODO
@@ -0,0 +1,7 @@
+TODO:
+	- step up and maintain this driver to ensure that it continues
+	  to work.  Having the hardware for this is pretty much a
+	  requirement.  If this does not happen, the will be removed in
+	  the 2.6.35 kernel release.
+
+Please send patches to Greg Kroah-Hartman <greg@kroah.com>.
diff --git a/drivers/net/wireless/strip.c b/drivers/staging/strip/strip.c
index ea6a87c19319..698aade79d40 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/staging/strip/strip.c
@@ -106,6 +106,7 @@ static const char StripVersion[] = "1.3A-STUART.CHESHIRE";
 #include <linux/serial.h>
 #include <linux/serialP.h>
 #include <linux/rcupdate.h>
+#include <linux/compat.h>
 #include <net/arp.h>
 #include <net/net_namespace.h>
 
@@ -2725,6 +2726,19 @@ static int strip_ioctl(struct tty_struct *tty, struct file *file,
 	return 0;
 }
 
+#ifdef CONFIG_COMPAT
+static long strip_compat_ioctl(struct tty_struct *tty, struct file *file,
+		       unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case SIOCGIFNAME:
+	case SIOCSIFHWADDR:
+		return strip_ioctl(tty, file, cmd,
+			(unsigned long)compat_ptr(arg));
+	}
+	return -ENOIOCTLCMD;
+}
+#endif
 
 /************************************************************************/
 /* Initialization							*/
@@ -2736,6 +2750,9 @@ static struct tty_ldisc_ops strip_ldisc = {
 	.open = strip_open,
 	.close = strip_close,
 	.ioctl = strip_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = strip_compat_ioctl,
+#endif
 	.receive_buf = strip_receive_buf,
 	.write_wakeup = strip_write_some_more,
 };
diff --git a/drivers/staging/vt6655/Kconfig b/drivers/staging/vt6655/Kconfig
index 9bec95adcce2..825bbc4fc3fa 100644
--- a/drivers/staging/vt6655/Kconfig
+++ b/drivers/staging/vt6655/Kconfig
@@ -1,6 +1,8 @@
 config VT6655
    tristate "VIA Technologies VT6655 support"
-   depends on WIRELESS_EXT && PCI
+   depends on PCI
+   select WIRELESS_EXT
+   select WEXT_PRIV
    ---help---
    This is a vendor-written driver for VIA VT6655.
 
diff --git a/drivers/staging/vt6656/Kconfig b/drivers/staging/vt6656/Kconfig
index 3165f2c42079..87bcd269310c 100644
--- a/drivers/staging/vt6656/Kconfig
+++ b/drivers/staging/vt6656/Kconfig
@@ -1,6 +1,8 @@
 config VT6656
 	tristate "VIA Technologies VT6656 support"
-	depends on WIRELESS_EXT && USB
+	depends on USB
+	select WIRELESS_EXT
+	select WEXT_PRIV
 	---help---
 	This is a vendor-written driver for VIA VT6656.
 
diff --git a/drivers/staging/wavelan/Kconfig b/drivers/staging/wavelan/Kconfig
new file mode 100644
index 000000000000..af655668c2a7
--- /dev/null
+++ b/drivers/staging/wavelan/Kconfig
@@ -0,0 +1,38 @@
+config WAVELAN
+	tristate "AT&T/Lucent old WaveLAN & DEC RoamAbout DS ISA support"
+	depends on ISA && WLAN
+	select WIRELESS_EXT
+	select WEXT_SPY
+	select WEXT_PRIV
+	---help---
+	  The Lucent WaveLAN (formerly NCR and AT&T; or DEC RoamAbout DS) is
+	  a Radio LAN (wireless Ethernet-like Local Area Network) using the
+	  radio frequencies 900 MHz and 2.4 GHz.
+
+	  If you want to use an ISA WaveLAN card under Linux, say Y and read
+	  the Ethernet-HOWTO, available from
+	  <http://www.tldp.org/docs.html#howto>. Some more specific
+	  information is contained in
+	  <file:Documentation/networking/wavelan.txt> and in the source code
+	  <file:drivers/net/wireless/wavelan.p.h>.
+
+	  You will also need the wireless tools package available from
+	  <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
+	  Please read the man pages contained therein.
+
+	  To compile this driver as a module, choose M here: the module will be
+	  called wavelan.
+
+config PCMCIA_WAVELAN
+	tristate "AT&T/Lucent old WaveLAN Pcmcia wireless support"
+	depends on PCMCIA && WLAN
+	select WIRELESS_EXT
+	select WEXT_SPY
+	select WEXT_PRIV
+	help
+	  Say Y here if you intend to attach an AT&T/Lucent Wavelan PCMCIA
+	  (PC-card) wireless Ethernet networking card to your computer.  This
+	  driver is for the non-IEEE-802.11 Wavelan cards.
+
+	  To compile this driver as a module, choose M here: the module will be
+	  called wavelan_cs.  If unsure, say N.
diff --git a/drivers/staging/wavelan/Makefile b/drivers/staging/wavelan/Makefile
new file mode 100644
index 000000000000..1cde17c69a43
--- /dev/null
+++ b/drivers/staging/wavelan/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_WAVELAN)		+= wavelan.o
+obj-$(CONFIG_PCMCIA_WAVELAN)	+= wavelan_cs.o
diff --git a/drivers/staging/wavelan/TODO b/drivers/staging/wavelan/TODO
new file mode 100644
index 000000000000..9bd15a2f6d9e
--- /dev/null
+++ b/drivers/staging/wavelan/TODO
@@ -0,0 +1,7 @@
+TODO:
+	- step up and maintain this driver to ensure that it continues
+	  to work.  Having the hardware for this is pretty much a
+	  requirement.  If this does not happen, the will be removed in
+	  the 2.6.35 kernel release.
+
+Please send patches to Greg Kroah-Hartman <greg@kroah.com>.
diff --git a/drivers/net/wireless/i82586.h b/drivers/staging/wavelan/i82586.h
index 5f65b250646f..5f65b250646f 100644
--- a/drivers/net/wireless/i82586.h
+++ b/drivers/staging/wavelan/i82586.h
diff --git a/drivers/net/wireless/wavelan.c b/drivers/staging/wavelan/wavelan.c
index d634b2da3b84..d634b2da3b84 100644
--- a/drivers/net/wireless/wavelan.c
+++ b/drivers/staging/wavelan/wavelan.c
diff --git a/drivers/net/wireless/wavelan.h b/drivers/staging/wavelan/wavelan.h
index 9ab360558ffd..9ab360558ffd 100644
--- a/drivers/net/wireless/wavelan.h
+++ b/drivers/staging/wavelan/wavelan.h
diff --git a/drivers/net/wireless/wavelan.p.h b/drivers/staging/wavelan/wavelan.p.h
index dbe8de6e5f52..dbe8de6e5f52 100644
--- a/drivers/net/wireless/wavelan.p.h
+++ b/drivers/staging/wavelan/wavelan.p.h
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/staging/wavelan/wavelan_cs.c
index 33918fd5b231..33918fd5b231 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/staging/wavelan/wavelan_cs.c
diff --git a/drivers/net/wireless/wavelan_cs.h b/drivers/staging/wavelan/wavelan_cs.h
index 2e4bfe4147c6..2e4bfe4147c6 100644
--- a/drivers/net/wireless/wavelan_cs.h
+++ b/drivers/staging/wavelan/wavelan_cs.h
diff --git a/drivers/net/wireless/wavelan_cs.p.h b/drivers/staging/wavelan/wavelan_cs.p.h
index 81d91531c4f9..8fbfaa8a5a67 100644
--- a/drivers/net/wireless/wavelan_cs.p.h
+++ b/drivers/staging/wavelan/wavelan_cs.p.h
@@ -446,7 +446,7 @@
 #include <pcmcia/ds.h>
 
 /* Wavelan declarations */
-#include "i82593.h"	/* Definitions for the Intel chip */
+#include <linux/i82593.h>	/* Definitions for the Intel chip */
 
 #include "wavelan_cs.h"	/* Others bits of the hardware */